repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
scipy
|
scipy-main/scipy/_lib/tests/test_import_cycles.py
|
import sys
import subprocess
from .test_public_api import PUBLIC_MODULES
# Regression tests for gh-6793.
# Check that all modules are importable in a new Python process.
# This is not necessarily true if there are import cycles present.
def test_public_modules_importable():
pids = [subprocess.Popen([sys.executable, '-c', f'import {module}'])
for module in PUBLIC_MODULES]
for i, pid in enumerate(pids):
assert pid.wait() == 0, f'Failed to import {PUBLIC_MODULES[i]}'
| 500
| 32.4
| 72
|
py
|
scipy
|
scipy-main/scipy/_lib/tests/test_ccallback.py
|
from numpy.testing import assert_equal, assert_
from pytest import raises as assert_raises
import time
import pytest
import ctypes
import threading
from scipy._lib import _ccallback_c as _test_ccallback_cython
from scipy._lib import _test_ccallback
from scipy._lib._ccallback import LowLevelCallable
try:
import cffi
HAVE_CFFI = True
except ImportError:
HAVE_CFFI = False
ERROR_VALUE = 2.0
def callback_python(a, user_data=None):
if a == ERROR_VALUE:
raise ValueError("bad value")
if user_data is None:
return a + 1
else:
return a + user_data
def _get_cffi_func(base, signature):
if not HAVE_CFFI:
pytest.skip("cffi not installed")
# Get function address
voidp = ctypes.cast(base, ctypes.c_void_p)
address = voidp.value
# Create corresponding cffi handle
ffi = cffi.FFI()
func = ffi.cast(signature, address)
return func
def _get_ctypes_data():
value = ctypes.c_double(2.0)
return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp)
def _get_cffi_data():
if not HAVE_CFFI:
pytest.skip("cffi not installed")
ffi = cffi.FFI()
return ffi.new('double *', 2.0)
CALLERS = {
'simple': _test_ccallback.test_call_simple,
'nodata': _test_ccallback.test_call_nodata,
'nonlocal': _test_ccallback.test_call_nonlocal,
'cython': _test_ccallback_cython.test_call_cython,
}
# These functions have signatures known to the callers
FUNCS = {
'python': lambda: callback_python,
'capsule': lambda: _test_ccallback.test_get_plus1_capsule(),
'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1_cython"),
'ctypes': lambda: _test_ccallback_cython.plus1_ctypes,
'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes,
'double (*)(double, int *, void *)'),
'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(),
'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1b_cython"),
'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes,
'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes,
'double (*)(double, double, int *, void *)'),
}
# These functions have signatures the callers don't know
BAD_FUNCS = {
'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(),
'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1bc_cython"),
'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes,
'cffi_bc': lambda: _get_cffi_func(_test_ccallback_cython.plus1bc_ctypes,
'double (*)(double, double, double, int *, void *)'),
}
USER_DATAS = {
'ctypes': _get_ctypes_data,
'cffi': _get_cffi_data,
'capsule': _test_ccallback.test_get_data_capsule,
}
def test_callbacks():
def check(caller, func, user_data):
caller = CALLERS[caller]
func = FUNCS[func]()
user_data = USER_DATAS[user_data]()
if func is callback_python:
def func2(x):
return func(x, 2.0)
else:
func2 = LowLevelCallable(func, user_data)
func = LowLevelCallable(func)
# Test basic call
assert_equal(caller(func, 1.0), 2.0)
# Test 'bad' value resulting to an error
assert_raises(ValueError, caller, func, ERROR_VALUE)
# Test passing in user_data
assert_equal(caller(func2, 1.0), 3.0)
for caller in sorted(CALLERS.keys()):
for func in sorted(FUNCS.keys()):
for user_data in sorted(USER_DATAS.keys()):
check(caller, func, user_data)
def test_bad_callbacks():
def check(caller, func, user_data):
caller = CALLERS[caller]
user_data = USER_DATAS[user_data]()
func = BAD_FUNCS[func]()
if func is callback_python:
def func2(x):
return func(x, 2.0)
else:
func2 = LowLevelCallable(func, user_data)
func = LowLevelCallable(func)
# Test that basic call fails
assert_raises(ValueError, caller, LowLevelCallable(func), 1.0)
# Test that passing in user_data also fails
assert_raises(ValueError, caller, func2, 1.0)
# Test error message
llfunc = LowLevelCallable(func)
try:
caller(llfunc, 1.0)
except ValueError as err:
msg = str(err)
assert_(llfunc.signature in msg, msg)
assert_('double (double, double, int *, void *)' in msg, msg)
for caller in sorted(CALLERS.keys()):
for func in sorted(BAD_FUNCS.keys()):
for user_data in sorted(USER_DATAS.keys()):
check(caller, func, user_data)
def test_signature_override():
caller = _test_ccallback.test_call_simple
func = _test_ccallback.test_get_plus1_capsule()
llcallable = LowLevelCallable(func, signature="bad signature")
assert_equal(llcallable.signature, "bad signature")
assert_raises(ValueError, caller, llcallable, 3)
llcallable = LowLevelCallable(func, signature="double (double, int *, void *)")
assert_equal(llcallable.signature, "double (double, int *, void *)")
assert_equal(caller(llcallable, 3), 4)
def test_threadsafety():
def callback(a, caller):
if a <= 0:
return 1
else:
res = caller(lambda x: callback(x, caller), a - 1)
return 2*res
def check(caller):
caller = CALLERS[caller]
results = []
count = 10
def run():
time.sleep(0.01)
r = caller(lambda x: callback(x, caller), count)
results.append(r)
threads = [threading.Thread(target=run) for j in range(20)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert_equal(results, [2.0**count]*len(threads))
for caller in CALLERS.keys():
check(caller)
| 6,033
| 29.17
| 96
|
py
|
scipy
|
scipy-main/scipy/_lib/tests/test_deprecation.py
|
import pytest
def test_cython_api_deprecation():
match = ("`scipy._lib._test_deprecation_def.foo_deprecated` "
"is deprecated, use `foo` instead!\n"
"Deprecated in Scipy 42.0.0")
with pytest.warns(DeprecationWarning, match=match):
from .. import _test_deprecation_call
assert _test_deprecation_call.call() == (1, 1)
| 364
| 32.181818
| 65
|
py
|
scipy
|
scipy-main/scipy/_lib/tests/test__threadsafety.py
|
import threading
import time
import traceback
from numpy.testing import assert_
from pytest import raises as assert_raises
from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError
def test_parallel_threads():
# Check that ReentrancyLock serializes work in parallel threads.
#
# The test is not fully deterministic, and may succeed falsely if
# the timings go wrong.
lock = ReentrancyLock("failure")
failflag = [False]
exceptions_raised = []
def worker(k):
try:
with lock:
assert_(not failflag[0])
failflag[0] = True
time.sleep(0.1 * k)
assert_(failflag[0])
failflag[0] = False
except Exception:
exceptions_raised.append(traceback.format_exc(2))
threads = [threading.Thread(target=lambda k=k: worker(k))
for k in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
exceptions_raised = "\n".join(exceptions_raised)
assert_(not exceptions_raised, exceptions_raised)
def test_reentering():
# Check that ReentrancyLock prevents re-entering from the same thread.
@non_reentrant()
def func(x):
return func(x)
assert_raises(ReentrancyError, func, 0)
| 1,322
| 24.442308
| 83
|
py
|
scipy
|
scipy-main/scipy/_lib/tests/test_bunch.py
|
import pytest
import pickle
from numpy.testing import assert_equal
from scipy._lib._bunch import _make_tuple_bunch
# `Result` is defined at the top level of the module so it can be
# used to test pickling.
Result = _make_tuple_bunch('Result', ['x', 'y', 'z'], ['w', 'beta'])
class TestMakeTupleBunch:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Tests with Result
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def setup_method(self):
# Set up an instance of Result.
self.result = Result(x=1, y=2, z=3, w=99, beta=0.5)
def test_attribute_access(self):
assert_equal(self.result.x, 1)
assert_equal(self.result.y, 2)
assert_equal(self.result.z, 3)
assert_equal(self.result.w, 99)
assert_equal(self.result.beta, 0.5)
def test_indexing(self):
assert_equal(self.result[0], 1)
assert_equal(self.result[1], 2)
assert_equal(self.result[2], 3)
assert_equal(self.result[-1], 3)
with pytest.raises(IndexError, match='index out of range'):
self.result[3]
def test_unpacking(self):
x0, y0, z0 = self.result
assert_equal((x0, y0, z0), (1, 2, 3))
assert_equal(self.result, (1, 2, 3))
def test_slice(self):
assert_equal(self.result[1:], (2, 3))
assert_equal(self.result[::2], (1, 3))
assert_equal(self.result[::-1], (3, 2, 1))
def test_len(self):
assert_equal(len(self.result), 3)
def test_repr(self):
s = repr(self.result)
assert_equal(s, 'Result(x=1, y=2, z=3, w=99, beta=0.5)')
def test_hash(self):
assert_equal(hash(self.result), hash((1, 2, 3)))
def test_pickle(self):
s = pickle.dumps(self.result)
obj = pickle.loads(s)
assert isinstance(obj, Result)
assert_equal(obj.x, self.result.x)
assert_equal(obj.y, self.result.y)
assert_equal(obj.z, self.result.z)
assert_equal(obj.w, self.result.w)
assert_equal(obj.beta, self.result.beta)
def test_read_only_existing(self):
with pytest.raises(AttributeError, match="can't set attribute"):
self.result.x = -1
def test_read_only_new(self):
self.result.plate_of_shrimp = "lattice of coincidence"
assert self.result.plate_of_shrimp == "lattice of coincidence"
def test_constructor_missing_parameter(self):
with pytest.raises(TypeError, match='missing'):
# `w` is missing.
Result(x=1, y=2, z=3, beta=0.75)
def test_constructor_incorrect_parameter(self):
with pytest.raises(TypeError, match='unexpected'):
# `foo` is not an existing field.
Result(x=1, y=2, z=3, w=123, beta=0.75, foo=999)
def test_module(self):
m = 'scipy._lib.tests.test_bunch'
assert_equal(Result.__module__, m)
assert_equal(self.result.__module__, m)
def test_extra_fields_per_instance(self):
# This test exists to ensure that instances of the same class
# store their own values for the extra fields. That is, the values
# are stored per instance and not in the class.
result1 = Result(x=1, y=2, z=3, w=-1, beta=0.0)
result2 = Result(x=4, y=5, z=6, w=99, beta=1.0)
assert_equal(result1.w, -1)
assert_equal(result1.beta, 0.0)
# The rest of these checks aren't essential, but let's check
# them anyway.
assert_equal(result1[:], (1, 2, 3))
assert_equal(result2.w, 99)
assert_equal(result2.beta, 1.0)
assert_equal(result2[:], (4, 5, 6))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Other tests
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def test_extra_field_names_is_optional(self):
Square = _make_tuple_bunch('Square', ['width', 'height'])
sq = Square(width=1, height=2)
assert_equal(sq.width, 1)
assert_equal(sq.height, 2)
s = repr(sq)
assert_equal(s, 'Square(width=1, height=2)')
def test_tuple_like(self):
Tup = _make_tuple_bunch('Tup', ['a', 'b'])
tu = Tup(a=1, b=2)
assert isinstance(tu, tuple)
assert isinstance(tu + (1,), tuple)
def test_explicit_module(self):
m = 'some.module.name'
Foo = _make_tuple_bunch('Foo', ['x'], ['a', 'b'], module=m)
foo = Foo(x=1, a=355, b=113)
assert_equal(Foo.__module__, m)
assert_equal(foo.__module__, m)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument validation
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@pytest.mark.parametrize('args', [('123', ['a'], ['b']),
('Foo', ['-3'], ['x']),
('Foo', ['a'], ['+-*/'])])
def test_identifiers_not_allowed(self, args):
with pytest.raises(ValueError, match='identifiers'):
_make_tuple_bunch(*args)
@pytest.mark.parametrize('args', [('Foo', ['a', 'b', 'a'], ['x']),
('Foo', ['a', 'b'], ['b', 'x'])])
def test_repeated_field_names(self, args):
with pytest.raises(ValueError, match='Duplicate'):
_make_tuple_bunch(*args)
@pytest.mark.parametrize('args', [('Foo', ['_a'], ['x']),
('Foo', ['a'], ['_x'])])
def test_leading_underscore_not_allowed(self, args):
with pytest.raises(ValueError, match='underscore'):
_make_tuple_bunch(*args)
@pytest.mark.parametrize('args', [('Foo', ['def'], ['x']),
('Foo', ['a'], ['or']),
('and', ['a'], ['x'])])
def test_keyword_not_allowed_in_fields(self, args):
with pytest.raises(ValueError, match='keyword'):
_make_tuple_bunch(*args)
def test_at_least_one_field_name_required(self):
with pytest.raises(ValueError, match='at least one name'):
_make_tuple_bunch('Qwerty', [], ['a', 'b'])
| 6,168
| 36.846626
| 75
|
py
|
scipy
|
scipy-main/scipy/_lib/tests/test_public_api.py
|
"""
This test script is adopted from:
https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py
"""
import pkgutil
import types
import importlib
import warnings
from importlib import import_module
import pytest
import scipy
def test_dir_testing():
"""Assert that output of dir has only one "testing/tester"
attribute without duplicate"""
assert len(dir(scipy)) == len(set(dir(scipy)))
# Historically SciPy has not used leading underscores for private submodules
# much. This has resulted in lots of things that look like public modules
# (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
# but were never intended to be public. The PUBLIC_MODULES list contains
# modules that are either public because they were meant to be, or because they
# contain public functions/objects that aren't present in any other namespace
# for whatever reason and therefore should be treated as public.
PUBLIC_MODULES = ["scipy." + s for s in [
"cluster",
"cluster.vq",
"cluster.hierarchy",
"constants",
"datasets",
"fft",
"fftpack",
"integrate",
"interpolate",
"io",
"io.arff",
"io.matlab",
"io.wavfile",
"linalg",
"linalg.blas",
"linalg.cython_blas",
"linalg.lapack",
"linalg.cython_lapack",
"linalg.interpolative",
"misc",
"ndimage",
"odr",
"optimize",
"signal",
"signal.windows",
"sparse",
"sparse.linalg",
"sparse.csgraph",
"spatial",
"spatial.distance",
"spatial.transform",
"special",
"stats",
"stats.contingency",
"stats.distributions",
"stats.mstats",
"stats.qmc",
"stats.sampling"
]]
# The PRIVATE_BUT_PRESENT_MODULES list contains modules that lacked underscores
# in their name and hence looked public, but weren't meant to be. All these
# namespace were deprecated in the 1.8.0 release - see "clear split between
# public and private API" in the 1.8.0 release notes.
# These private modules support will be removed in SciPy v2.0.0, as the
# deprecation messages emitted by each of these modules say.
PRIVATE_BUT_PRESENT_MODULES = [
'scipy.constants.codata',
'scipy.constants.constants',
'scipy.fftpack.basic',
'scipy.fftpack.convolve',
'scipy.fftpack.helper',
'scipy.fftpack.pseudo_diffs',
'scipy.fftpack.realtransforms',
'scipy.integrate.odepack',
'scipy.integrate.quadpack',
'scipy.integrate.dop',
'scipy.integrate.lsoda',
'scipy.integrate.vode',
'scipy.interpolate.dfitpack',
'scipy.interpolate.fitpack',
'scipy.interpolate.fitpack2',
'scipy.interpolate.interpnd',
'scipy.interpolate.interpolate',
'scipy.interpolate.ndgriddata',
'scipy.interpolate.polyint',
'scipy.interpolate.rbf',
'scipy.io.arff.arffread',
'scipy.io.harwell_boeing',
'scipy.io.idl',
'scipy.io.mmio',
'scipy.io.netcdf',
'scipy.io.matlab.byteordercodes',
'scipy.io.matlab.mio',
'scipy.io.matlab.mio4',
'scipy.io.matlab.mio5',
'scipy.io.matlab.mio5_params',
'scipy.io.matlab.mio5_utils',
'scipy.io.matlab.mio_utils',
'scipy.io.matlab.miobase',
'scipy.io.matlab.streams',
'scipy.linalg.basic',
'scipy.linalg.decomp',
'scipy.linalg.decomp_cholesky',
'scipy.linalg.decomp_lu',
'scipy.linalg.decomp_qr',
'scipy.linalg.decomp_schur',
'scipy.linalg.decomp_svd',
'scipy.linalg.flinalg',
'scipy.linalg.matfuncs',
'scipy.linalg.misc',
'scipy.linalg.special_matrices',
'scipy.misc.common',
'scipy.misc.doccer',
'scipy.ndimage.filters',
'scipy.ndimage.fourier',
'scipy.ndimage.interpolation',
'scipy.ndimage.measurements',
'scipy.ndimage.morphology',
'scipy.odr.models',
'scipy.odr.odrpack',
'scipy.optimize.cobyla',
'scipy.optimize.cython_optimize',
'scipy.optimize.lbfgsb',
'scipy.optimize.linesearch',
'scipy.optimize.minpack',
'scipy.optimize.minpack2',
'scipy.optimize.moduleTNC',
'scipy.optimize.nonlin',
'scipy.optimize.optimize',
'scipy.optimize.slsqp',
'scipy.optimize.tnc',
'scipy.optimize.zeros',
'scipy.signal.bsplines',
'scipy.signal.filter_design',
'scipy.signal.fir_filter_design',
'scipy.signal.lti_conversion',
'scipy.signal.ltisys',
'scipy.signal.signaltools',
'scipy.signal.spectral',
'scipy.signal.spline',
'scipy.signal.waveforms',
'scipy.signal.wavelets',
'scipy.signal.windows.windows',
'scipy.sparse.base',
'scipy.sparse.bsr',
'scipy.sparse.compressed',
'scipy.sparse.construct',
'scipy.sparse.coo',
'scipy.sparse.csc',
'scipy.sparse.csr',
'scipy.sparse.data',
'scipy.sparse.dia',
'scipy.sparse.dok',
'scipy.sparse.extract',
'scipy.sparse.lil',
'scipy.sparse.linalg.dsolve',
'scipy.sparse.linalg.eigen',
'scipy.sparse.linalg.interface',
'scipy.sparse.linalg.isolve',
'scipy.sparse.linalg.matfuncs',
'scipy.sparse.sparsetools',
'scipy.sparse.spfuncs',
'scipy.sparse.sputils',
'scipy.spatial.ckdtree',
'scipy.spatial.kdtree',
'scipy.spatial.qhull',
'scipy.spatial.transform.rotation',
'scipy.special.add_newdocs',
'scipy.special.basic',
'scipy.special.cython_special',
'scipy.special.orthogonal',
'scipy.special.sf_error',
'scipy.special.specfun',
'scipy.special.spfun_stats',
'scipy.stats.biasedurn',
'scipy.stats.kde',
'scipy.stats.morestats',
'scipy.stats.mstats_basic',
'scipy.stats.mstats_extras',
'scipy.stats.mvn',
'scipy.stats.stats',
]
def is_unexpected(name):
"""Check if this needs to be considered."""
if '._' in name or '.tests' in name or '.setup' in name:
return False
if name in PUBLIC_MODULES:
return False
if name in PRIVATE_BUT_PRESENT_MODULES:
return False
return True
SKIP_LIST = [
'scipy.conftest',
'scipy.version',
]
def test_all_modules_are_expected():
"""
Test that we don't add anything that looks like a new public module by
accident. Check is based on filenames.
"""
modnames = []
for _, modname, ispkg in pkgutil.walk_packages(path=scipy.__path__,
prefix=scipy.__name__ + '.',
onerror=None):
if is_unexpected(modname) and modname not in SKIP_LIST:
# We have a name that is new. If that's on purpose, add it to
# PUBLIC_MODULES. We don't expect to have to add anything to
# PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
modnames.append(modname)
if modnames:
raise AssertionError(f'Found unexpected modules: {modnames}')
# Stuff that clearly shouldn't be in the API and is detected by the next test
# below
SKIP_LIST_2 = [
'scipy.char',
'scipy.rec',
'scipy.emath',
'scipy.math',
'scipy.random',
'scipy.ctypeslib',
'scipy.ma'
]
def test_all_modules_are_expected_2():
"""
Method checking all objects. The pkgutil-based method in
`test_all_modules_are_expected` does not catch imports into a namespace,
only filenames.
"""
def find_unexpected_members(mod_name):
members = []
module = importlib.import_module(mod_name)
if hasattr(module, '__all__'):
objnames = module.__all__
else:
objnames = dir(module)
for objname in objnames:
if not objname.startswith('_'):
fullobjname = mod_name + '.' + objname
if isinstance(getattr(module, objname), types.ModuleType):
if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2:
members.append(fullobjname)
return members
unexpected_members = find_unexpected_members("scipy")
for modname in PUBLIC_MODULES:
unexpected_members.extend(find_unexpected_members(modname))
if unexpected_members:
raise AssertionError("Found unexpected object(s) that look like "
"modules: {}".format(unexpected_members))
def test_api_importable():
"""
Check that all submodules listed higher up in this file can be imported
Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
simply need to be removed from the list (deprecation may or may not be
needed - apply common sense).
"""
def check_importable(module_name):
try:
importlib.import_module(module_name)
except (ImportError, AttributeError):
return False
return True
module_names = []
for module_name in PUBLIC_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that cannot be "
"imported: {}".format(module_names))
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', category=DeprecationWarning)
warnings.filterwarnings('always', category=ImportWarning)
for module_name in PRIVATE_BUT_PRESENT_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules that are not really public but looked "
"public and can not be imported: "
"{}".format(module_names))
@pytest.mark.parametrize("module_name",
['scipy.stats.stats',
'scipy.stats.morestats'])
def test_private_but_present_deprecation(module_name):
# gh-18279, gh-17572, gh-17771 noted that deprecation warnings
# for imports from private modules
# were misleading. Check that this is resolved.
module = import_module(module_name)
sub_package_name = module_name.split(".")[1]
sub_package = import_module(f"scipy.{sub_package_name}")
# Attributes that were formerly in `module_name` can still be imported from
# `module_name`, albeit with a deprecation warning. The specific message
# depends on whether the attribute is public in `scipy.xxx` or not.
for attr_name in module.__all__:
attr = getattr(sub_package, attr_name, None)
if attr is None:
message = f"`{module_name}.{attr_name}` is deprecated..."
else:
message = f"Please import `{attr_name}` from the `scipy.{sub_package_name}`..."
with pytest.deprecated_call(match=message):
getattr(module, attr_name)
# Attributes that were not in `module_name` get an error notifying the user
# that the attribute is not in `module_name` and that `module_name` is
# deprecated.
message = f"`{module_name}` is deprecated..."
with pytest.raises(AttributeError, match=message):
getattr(module, "ekki")
| 10,973
| 30.534483
| 91
|
py
|
scipy
|
scipy-main/scipy/_lib/tests/test__gcutils.py
|
""" Test for assert_deallocated context manager and gc utilities
"""
import gc
from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated,
ReferenceError, IS_PYPY)
from numpy.testing import assert_equal
import pytest
def test_set_gc_state():
gc_status = gc.isenabled()
try:
for state in (True, False):
gc.enable()
set_gc_state(state)
assert_equal(gc.isenabled(), state)
gc.disable()
set_gc_state(state)
assert_equal(gc.isenabled(), state)
finally:
if gc_status:
gc.enable()
def test_gc_state():
# Test gc_state context manager
gc_status = gc.isenabled()
try:
for pre_state in (True, False):
set_gc_state(pre_state)
for with_state in (True, False):
# Check the gc state is with_state in with block
with gc_state(with_state):
assert_equal(gc.isenabled(), with_state)
# And returns to previous state outside block
assert_equal(gc.isenabled(), pre_state)
# Even if the gc state is set explicitly within the block
with gc_state(with_state):
assert_equal(gc.isenabled(), with_state)
set_gc_state(not with_state)
assert_equal(gc.isenabled(), pre_state)
finally:
if gc_status:
gc.enable()
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_assert_deallocated():
# Ordinary use
class C:
def __init__(self, arg0, arg1, name='myname'):
self.name = name
for gc_current in (True, False):
with gc_state(gc_current):
# We are deleting from with-block context, so that's OK
with assert_deallocated(C, 0, 2, 'another name') as c:
assert_equal(c.name, 'another name')
del c
# Or not using the thing in with-block context, also OK
with assert_deallocated(C, 0, 2, name='third name'):
pass
assert_equal(gc.isenabled(), gc_current)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_assert_deallocated_nodel():
class C:
pass
with pytest.raises(ReferenceError):
# Need to delete after using if in with-block context
# Note: assert_deallocated(C) needs to be assigned for the test
# to function correctly. It is assigned to _, but _ itself is
# not referenced in the body of the with, it is only there for
# the refcount.
with assert_deallocated(C) as _:
pass
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_assert_deallocated_circular():
class C:
def __init__(self):
self._circular = self
with pytest.raises(ReferenceError):
# Circular reference, no automatic garbage collection
with assert_deallocated(C) as c:
del c
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_assert_deallocated_circular2():
class C:
def __init__(self):
self._circular = self
with pytest.raises(ReferenceError):
# Still circular reference, no automatic garbage collection
with assert_deallocated(C):
pass
| 3,416
| 32.5
| 76
|
py
|
scipy
|
scipy-main/scipy/_lib/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/_lib/tests/test_array_api.py
|
import numpy as np
from numpy.testing import assert_equal
import pytest
from scipy.conftest import array_api_compatible
from scipy._lib._array_api import (
_GLOBAL_CONFIG, array_namespace, as_xparray,
)
if not _GLOBAL_CONFIG["SCIPY_ARRAY_API"]:
pytest.skip(
"Array API test; set environment variable SCIPY_ARRAY_API=1 to run it",
allow_module_level=True
)
def to_numpy(array, xp):
"""Convert `array` into a NumPy ndarray on the CPU. From sklearn."""
xp_name = xp.__name__
if xp_name in {"array_api_compat.torch", "torch"}:
return array.cpu().numpy()
elif xp_name == "cupy.array_api":
return array._array.get()
elif xp_name in {"array_api_compat.cupy", "cupy"}: # pragma: nocover
return array.get()
return np.asarray(array)
def test_array_namespace():
x, y = np.array([0, 1, 2]), np.array([0, 1, 2])
xp = array_namespace(x, y)
assert 'array_api_compat.numpy' in xp.__name__
_GLOBAL_CONFIG["SCIPY_ARRAY_API"] = False
xp = array_namespace(x, y)
assert 'array_api_compat.numpy' in xp.__name__
_GLOBAL_CONFIG["SCIPY_ARRAY_API"] = True
@array_api_compatible
def test_asarray(xp):
x, y = as_xparray([0, 1, 2], xp=xp), as_xparray(np.arange(3), xp=xp)
ref = np.array([0, 1, 2])
assert_equal(x, ref)
assert_equal(y, ref)
@array_api_compatible
def test_to_numpy(xp):
x = xp.asarray([0, 1, 2])
x = to_numpy(x, xp=xp)
assert isinstance(x, np.ndarray)
@pytest.mark.filterwarnings("ignore: the matrix subclass")
def test_raises():
msg = "'numpy.ma.MaskedArray' are not supported"
with pytest.raises(TypeError, match=msg):
array_namespace(np.ma.array(1), np.array(1))
msg = "'numpy.matrix' are not supported"
with pytest.raises(TypeError, match=msg):
array_namespace(np.array(1), np.matrix(1))
msg = "Only support Array API"
with pytest.raises(TypeError, match=msg):
array_namespace([0, 1, 2])
with pytest.raises(TypeError, match=msg):
array_namespace(1)
| 2,051
| 26.72973
| 79
|
py
|
scipy
|
scipy-main/scipy/_lib/tests/test__pep440.py
|
from pytest import raises as assert_raises
from scipy._lib._pep440 import Version, parse
def test_main_versions():
assert Version('1.8.0') == Version('1.8.0')
for ver in ['1.9.0', '2.0.0', '1.8.1']:
assert Version('1.8.0') < Version(ver)
for ver in ['1.7.0', '1.7.1', '0.9.9']:
assert Version('1.8.0') > Version(ver)
def test_version_1_point_10():
# regression test for gh-2998.
assert Version('1.9.0') < Version('1.10.0')
assert Version('1.11.0') < Version('1.11.1')
assert Version('1.11.0') == Version('1.11.0')
assert Version('1.99.11') < Version('1.99.12')
def test_alpha_beta_rc():
assert Version('1.8.0rc1') == Version('1.8.0rc1')
for ver in ['1.8.0', '1.8.0rc2']:
assert Version('1.8.0rc1') < Version(ver)
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
assert Version('1.8.0rc1') > Version(ver)
assert Version('1.8.0b1') > Version('1.8.0a2')
def test_dev_version():
assert Version('1.9.0.dev+Unknown') < Version('1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev+ffffffff', '1.9.0.dev1']:
assert Version('1.9.0.dev+f16acvda') < Version(ver)
assert Version('1.9.0.dev+f16acvda') == Version('1.9.0.dev+f16acvda')
def test_dev_a_b_rc_mixed():
assert Version('1.9.0a2.dev+f16acvda') == Version('1.9.0a2.dev+f16acvda')
assert Version('1.9.0a2.dev+6acvda54') < Version('1.9.0a2')
def test_dev0_version():
assert Version('1.9.0.dev0+Unknown') < Version('1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
assert Version('1.9.0.dev0+f16acvda') < Version(ver)
assert Version('1.9.0.dev0+f16acvda') == Version('1.9.0.dev0+f16acvda')
def test_dev0_a_b_rc_mixed():
assert Version('1.9.0a2.dev0+f16acvda') == Version('1.9.0a2.dev0+f16acvda')
assert Version('1.9.0a2.dev0+6acvda54') < Version('1.9.0a2')
def test_raises():
for ver in ['1,9.0', '1.7.x']:
assert_raises(ValueError, Version, ver)
def test_legacy_version():
# Non-PEP-440 version identifiers always compare less. For NumPy this only
# occurs on dev builds prior to 1.10.0 which are unsupported anyway.
assert parse('invalid') < Version('0.0.0')
assert parse('1.9.0-f16acvda') < Version('1.0.0')
| 2,277
| 32.5
| 85
|
py
|
scipy
|
scipy-main/scipy/cluster/hierarchy.py
|
"""
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
cut_tree
optimal_leaf_ordering
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
Utility classes:
.. autosummary::
:toctree: generated/
DisjointSet -- data structure for incremental connectivity queries
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import bisect
from collections import deque
import numpy as np
from . import _hierarchy, _optimal_leaf_ordering
import scipy.spatial.distance as distance
from scipy._lib._array_api import array_namespace, as_xparray
from scipy._lib._disjoint_set import DisjointSet
_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
'median': 4, 'ward': 5, 'weighted': 6}
_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete',
'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster',
'fclusterdata', 'from_mlab_linkage', 'inconsistent',
'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage',
'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists',
'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering',
'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree',
'ward', 'weighted']
class ClusterWarning(UserWarning):
pass
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3)
def int_floor(arr, xp):
# numpy.array_api is strict about not allowing `int()` on a float array.
# That's typically not needed, here it is - so explicitly convert
return int(xp.astype(arr, xp.int64))
def single(y):
"""
Perform single/min/nearest linkage on the condensed distance matrix ``y``.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import single, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = single(y)
>>> Z
array([[ 0., 1., 1., 2.],
[ 2., 12., 1., 3.],
[ 3., 4., 1., 2.],
[ 5., 14., 1., 3.],
[ 6., 7., 1., 2.],
[ 8., 16., 1., 3.],
[ 9., 10., 1., 2.],
[11., 18., 1., 3.],
[13., 15., 2., 6.],
[17., 20., 2., 9.],
[19., 21., 2., 12.]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32)
>>> fcluster(Z, 1, criterion='distance')
array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
>>> fcluster(Z, 2, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Perform complete/max/farthest point linkage on a condensed distance matrix.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the `linkage` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import complete, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = complete(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.41421356, 3. ],
[ 5. , 13. , 1.41421356, 3. ],
[ 8. , 14. , 1.41421356, 3. ],
[11. , 15. , 1.41421356, 3. ],
[16. , 17. , 4.12310563, 6. ],
[18. , 19. , 4.12310563, 6. ],
[20. , 21. , 5.65685425, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, 1.5, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, 4.5, criterion='distance')
array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 6, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Perform average/UPGMA linkage on a condensed distance matrix.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
`linkage` for more information on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import average, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = average(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.20710678, 3. ],
[ 5. , 13. , 1.20710678, 3. ],
[ 8. , 14. , 1.20710678, 3. ],
[11. , 15. , 1.20710678, 3. ],
[16. , 17. , 3.39675184, 6. ],
[18. , 19. , 3.39675184, 6. ],
[20. , 21. , 4.09206523, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, 1.5, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, 4, criterion='distance')
array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 6, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Perform weighted/WPGMA linkage on the condensed distance matrix.
See `linkage` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
`linkage` for more information on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import weighted, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = weighted(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 11. , 1. , 2. ],
[ 2. , 12. , 1.20710678, 3. ],
[ 8. , 13. , 1.20710678, 3. ],
[ 5. , 14. , 1.20710678, 3. ],
[10. , 15. , 1.20710678, 3. ],
[18. , 19. , 3.05595762, 6. ],
[16. , 17. , 3.32379407, 6. ],
[20. , 21. , 4.06357713, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32)
>>> fcluster(Z, 1.5, criterion='distance')
array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 4, criterion='distance')
array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32)
>>> fcluster(Z, 6, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Perform centroid/UPGMC linkage.
See `linkage` for more information on the input matrix,
return structure, and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
an m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the `linkage` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import centroid, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = centroid(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3.33333333, 6. ],
[16. , 17. , 3.33333333, 6. ],
[20. , 21. , 3.33333333, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32)
>>> fcluster(Z, 1.1, criterion='distance')
array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)
>>> fcluster(Z, 2, criterion='distance')
array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 4, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Perform median/WPGMC linkage.
See `linkage` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See `linkage`
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
an m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import median, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = median(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3. , 6. ],
[16. , 17. , 3.5 , 6. ],
[20. , 21. , 3.25 , 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32)
>>> fcluster(Z, 1.1, criterion='distance')
array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)
>>> fcluster(Z, 2, criterion='distance')
array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 4, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Perform Ward's linkage on a condensed distance matrix.
See `linkage` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``y``.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
an m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix. See
`linkage` for more information on the return structure and
algorithm.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import ward, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = ward(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, 1.1, criterion='distance')
array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)
>>> fcluster(Z, 3, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, 9, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean', optimal_ordering=False):
"""
Perform hierarchical/agglomerative clustering.
The input y may be either a 1-D condensed distance matrix
or a 2-D array of observation vectors.
If y is a 1-D condensed distance matrix,
then y must be a :math:`\\binom{n}{2}` sized
vector, where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall, :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest (also called WPGMA).
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may choose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in :math:`n` dimensions may be passed as
an :math:`m` by :math:`n` array. All elements of the condensed distance
matrix must be finite, i.e., no NaNs or infs.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``pdist``
function for a list of valid distance metrics. A custom distance
function can also be used.
optimal_ordering : bool, optional
If True, the linkage matrix will be reordered so that the distance
between successive leaves is minimal. This results in a more intuitive
tree structure when the data are visualized. defaults to False, because
this algorithm can be slow, particularly on large datasets [2]_. See
also the `optimal_leaf_ordering` function.
.. versionadded:: 1.0.0
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
Notes
-----
1. For method 'single', an optimized algorithm based on minimum spanning
tree is implemented. It has time complexity :math:`O(n^2)`.
For methods 'complete', 'average', 'weighted' and 'ward', an algorithm
called nearest-neighbors chain is implemented. It also has time
complexity :math:`O(n^2)`.
For other methods, a naive algorithm is implemented with :math:`O(n^3)`
time complexity.
All algorithms use :math:`O(n^2)` memory.
Refer to [1]_ for details about the algorithms.
2. Methods 'centroid', 'median', and 'ward' are correctly defined only if
Euclidean pairwise metric is used. If `y` is passed as precomputed
pairwise distances, then it is the user's responsibility to assure that
these distances are in fact Euclidean, otherwise the produced result
will be incorrect.
See Also
--------
scipy.spatial.distance.pdist : pairwise distance metrics
References
----------
.. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering
algorithms", :arXiv:`1109.2378v1`.
.. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal
leaf ordering for hierarchical clustering", 2001. Bioinformatics
:doi:`10.1093/bioinformatics/17.suppl_1.S22`
Examples
--------
>>> from scipy.cluster.hierarchy import dendrogram, linkage
>>> from matplotlib import pyplot as plt
>>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]
>>> Z = linkage(X, 'ward')
>>> fig = plt.figure(figsize=(25, 10))
>>> dn = dendrogram(Z)
>>> Z = linkage(X, 'single')
>>> fig = plt.figure(figsize=(25, 10))
>>> dn = dendrogram(Z)
>>> plt.show()
"""
xp = array_namespace(y)
y = as_xparray(y, order='C', dtype=xp.float64, xp=xp)
if method not in _LINKAGE_METHODS:
raise ValueError(f"Invalid method: {method}")
if method in _EUCLIDEAN_METHODS and metric != 'euclidean' and y.ndim == 2:
msg = f"`method={method}` requires the distance metric to be Euclidean"
raise ValueError(msg)
if y.ndim == 1:
distance.is_valid_y(y, throw=True, name='y')
elif y.ndim == 2:
if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and
xp.all(y >= 0) and np.allclose(y, y.T)):
warnings.warn('The symmetric non-negative hollow observation '
'matrix looks suspiciously like an uncondensed '
'distance matrix',
ClusterWarning, stacklevel=2)
y = distance.pdist(y, metric)
y = xp.asarray(y)
else:
raise ValueError("`y` must be 1 or 2 dimensional.")
if not xp.all(xp.isfinite(y)):
raise ValueError("The condensed distance matrix must contain only "
"finite values.")
n = int(distance.num_obs_y(y))
method_code = _LINKAGE_METHODS[method]
y = np.asarray(y)
if method == 'single':
result = _hierarchy.mst_single_linkage(y, n)
elif method in ['complete', 'average', 'weighted', 'ward']:
result = _hierarchy.nn_chain(y, n, method_code)
else:
result = _hierarchy.fast_linkage(y, n, method_code)
result = xp.asarray(result)
if optimal_ordering:
y = xp.asarray(y)
return optimal_leaf_ordering(result, y)
else:
return result
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The `to_tree` function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
All parameter names are also attributes.
Parameters
----------
id : int
The node id.
left : ClusterNode instance, optional
The left child tree node.
right : ClusterNode instance, optional
The right child tree node.
dist : float, optional
Distance for this cluster in the linkage matrix.
count : int, optional
The number of samples in this cluster.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def __lt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist < node.dist
def __gt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist > node.dist
def __eq__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist == node.dist
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Return a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Return True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Perform pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the ``i``-th leaf node in the pre-order traversal ``n[i]``,
the result of ``func(n[i])`` is stored in ``L[i]``. If not
provided, the index of the original observation to which the node
corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def _order_cluster_tree(Z):
"""
Return clustering nodes in bottom-up order by distance.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
Returns
-------
nodes : list
A list of ClusterNode objects.
"""
q = deque()
tree = to_tree(Z)
q.append(tree)
nodes = []
while q:
node = q.popleft()
if not node.is_leaf():
bisect.insort_left(nodes, node)
q.append(node.get_right())
q.append(node.get_left())
return nodes
def cut_tree(Z, n_clusters=None, height=None):
"""
Given a linkage matrix Z, return the cut tree.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
n_clusters : array_like, optional
Number of clusters in the tree at the cut point.
height : array_like, optional
The height at which to cut the tree. Only possible for ultrametric
trees.
Returns
-------
cutree : array
An array indicating group membership at each agglomeration step. I.e.,
for a full cut tree, in the first column each data point is in its own
cluster. At the next step, two nodes are merged. Finally, all
singleton and non-singleton clusters are in one group. If `n_clusters`
or `height` are given, the columns correspond to the columns of
`n_clusters` or `height`.
Examples
--------
>>> from scipy import cluster
>>> import numpy as np
>>> from numpy.random import default_rng
>>> rng = default_rng()
>>> X = rng.random((50, 4))
>>> Z = cluster.hierarchy.ward(X)
>>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])
>>> cutree[:10]
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[3, 4],
[2, 2],
[0, 0],
[1, 5],
[3, 6],
[4, 7]]) # random
"""
xp = array_namespace(Z)
nobs = num_obs_linkage(Z)
nodes = _order_cluster_tree(Z)
if height is not None and n_clusters is not None:
raise ValueError("At least one of either height or n_clusters "
"must be None")
elif height is None and n_clusters is None: # return the full cut tree
cols_idx = xp.arange(nobs)
elif height is not None:
height = xp.asarray(height)
heights = xp.asarray([x.dist for x in nodes])
cols_idx = xp.searchsorted(heights, height)
else:
n_clusters = xp.asarray(n_clusters)
cols_idx = nobs - xp.searchsorted(xp.arange(nobs), n_clusters)
try:
n_cols = len(cols_idx)
except TypeError: # scalar
n_cols = 1
cols_idx = xp.asarray([cols_idx])
groups = xp.zeros((n_cols, nobs), dtype=xp.int64)
last_group = xp.arange(nobs)
if 0 in cols_idx:
groups[0] = last_group
for i, node in enumerate(nodes):
idx = node.pre_order()
this_group = as_xparray(last_group, copy=True, xp=xp)
# TODO ARRAY_API complex indexing not supported
this_group[idx] = xp.min(last_group[idx])
this_group[this_group > xp.max(last_group[idx])] -= 1
if i + 1 in cols_idx:
groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group
last_group = this_group
return groups.T
def to_tree(Z, rd=False):
"""
Convert a linkage matrix into an easy-to-use tree object.
The reference to the root `ClusterNode` object is returned (by default).
Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``,
and ``count`` attribute. The left and right attributes point to
ClusterNode objects that were combined to generate the cluster.
If both are None then the `ClusterNode` object is a leaf node, its count
must be 1, and its distance is meaningless but set to 0.
*Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.*
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the `linkage`
function documentation).
rd : bool, optional
When False (default), a reference to the root `ClusterNode` object is
returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a
reference to the root node while ``d`` is a list of `ClusterNode`
objects - one per original entry in the linkage matrix plus entries
for all clustering steps. If a cluster id is
less than the number of samples ``n`` in the data that the linkage
matrix describes, then it corresponds to a singleton cluster (leaf
node).
See `linkage` for more information on the assignment of cluster ids
to clusters.
Returns
-------
tree : ClusterNode or tuple (ClusterNode, list of ClusterNode)
If ``rd`` is False, a `ClusterNode`.
If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number
of samples. See the description of `rd` above for more details.
See Also
--------
linkage, is_valid_linkage, ClusterNode
Examples
--------
>>> import numpy as np
>>> from scipy.cluster import hierarchy
>>> rng = np.random.default_rng()
>>> x = rng.random((5, 2))
>>> Z = hierarchy.linkage(x)
>>> hierarchy.to_tree(Z)
<scipy.cluster.hierarchy.ClusterNode object at ...
>>> rootnode, nodelist = hierarchy.to_tree(Z, rd=True)
>>> rootnode
<scipy.cluster.hierarchy.ClusterNode object at ...
>>> len(nodelist)
9
"""
xp = array_namespace(Z)
Z = as_xparray(Z, order='c', xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
# Number of original objects is equal to the number of rows plus 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in range(0, n):
d[i] = ClusterNode(i)
nd = None
for i in range(Z.shape[0]):
row = Z[i, :]
fi = int_floor(row[0], xp)
fj = int_floor(row[1], xp)
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], row[2])
# ^ id ^ left ^ right ^ dist
if row[3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def optimal_leaf_ordering(Z, y, metric='euclidean'):
"""
Given a linkage matrix Z and distance, reorder the cut tree.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix. See
`linkage` for more information on the return structure and
algorithm.
y : ndarray
The condensed distance matrix from which Z was generated.
Alternatively, a collection of m observation vectors in n
dimensions may be passed as an m by n array.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``pdist``
function for a list of valid distance metrics. A custom distance
function can also be used.
Returns
-------
Z_ordered : ndarray
A copy of the linkage matrix Z, reordered to minimize the distance
between adjacent leaves.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster import hierarchy
>>> rng = np.random.default_rng()
>>> X = rng.standard_normal((10, 10))
>>> Z = hierarchy.ward(X)
>>> hierarchy.leaves_list(Z)
array([0, 3, 1, 9, 2, 5, 7, 4, 6, 8], dtype=int32)
>>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X))
array([3, 0, 2, 5, 7, 4, 8, 6, 9, 1], dtype=int32)
"""
xp = array_namespace(Z, y)
Z = as_xparray(Z, order='C', xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
y = as_xparray(y, order='C', dtype=xp.float64, xp=xp)
if y.ndim == 1:
distance.is_valid_y(y, throw=True, name='y')
elif y.ndim == 2:
if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and
np.all(y >= 0) and np.allclose(y, y.T)):
warnings.warn('The symmetric non-negative hollow observation '
'matrix looks suspiciously like an uncondensed '
'distance matrix',
ClusterWarning, stacklevel=2)
y = distance.pdist(y, metric)
y = xp.asarray(y)
else:
raise ValueError("`y` must be 1 or 2 dimensional.")
if not xp.all(xp.isfinite(y)):
raise ValueError("The condensed distance matrix must contain only "
"finite values.")
Z = np.asarray(Z)
y = np.asarray(y)
return xp.asarray(_optimal_leaf_ordering.optimal_leaf_ordering(Z, y))
def cophenet(Z, Y=None):
"""
Calculate the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see `linkage` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``Y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
See Also
--------
linkage :
for a description of what a linkage matrix is.
scipy.spatial.distance.squareform :
transforming condensed matrices into square ones.
Examples
--------
>>> from scipy.cluster.hierarchy import single, cophenet
>>> from scipy.spatial.distance import pdist, squareform
Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance
between two points of ``X`` is the distance between the largest two
distinct clusters that each of the points:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
``X`` corresponds to this dataset ::
x x x x
x x
x x
x x x x
>>> Z = single(pdist(X))
>>> Z
array([[ 0., 1., 1., 2.],
[ 2., 12., 1., 3.],
[ 3., 4., 1., 2.],
[ 5., 14., 1., 3.],
[ 6., 7., 1., 2.],
[ 8., 16., 1., 3.],
[ 9., 10., 1., 2.],
[11., 18., 1., 3.],
[13., 15., 2., 6.],
[17., 20., 2., 9.],
[19., 21., 2., 12.]])
>>> cophenet(Z)
array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2.,
2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.])
The output of the `scipy.cluster.hierarchy.cophenet` method is
represented in condensed form. We can use
`scipy.spatial.distance.squareform` to see the output as a
regular matrix (where each element ``ij`` denotes the cophenetic distance
between each ``i``, ``j`` pair of points in ``X``):
>>> squareform(cophenet(Z))
array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.],
[2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.],
[2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.],
[2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.],
[2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]])
In this example, the cophenetic distance between points on ``X`` that are
very close (i.e., in the same corner) is 1. For other pairs of points is 2,
because the points will be located in clusters at different
corners - thus, the distance between these clusters will be larger.
"""
xp = array_namespace(Z, Y)
# Ensure float64 C-contiguous array. Cython code doesn't deal with striding.
Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
zz = np.zeros((n * (n-1)) // 2, dtype=np.double)
Z = np.asarray(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
zz = xp.asarray(zz)
if Y is None:
return zz
Y = as_xparray(Y, order='C', xp=xp)
distance.is_valid_y(Y, throw=True, name='Y')
z = xp.mean(zz)
y = xp.mean(Y)
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy**2
denomB = Zz**2
c = xp.sum(numerator) / xp.sqrt(xp.sum(denomA) * xp.sum(denomB))
return (c, zz)
def inconsistent(Z, d=2):
r"""
Calculate inconsistency statistics on a linkage matrix.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical
clustering). See `linkage` documentation for more information on its
form.
d : int, optional
The number of links up to `d` levels below each non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link
statistics for the non-singleton cluster ``i``. The link statistics are
computed over the link heights for links :math:`d` levels below the
cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is the number
of links included in the calculation; and ``R[i,3]`` is the
inconsistency coefficient,
.. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]}
Notes
-----
This function behaves similarly to the MATLAB(TM) ``inconsistent``
function.
Examples
--------
>>> from scipy.cluster.hierarchy import inconsistent, linkage
>>> from matplotlib import pyplot as plt
>>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]
>>> Z = linkage(X, 'ward')
>>> print(Z)
[[ 5. 6. 0. 2. ]
[ 2. 7. 0. 2. ]
[ 0. 4. 1. 2. ]
[ 1. 8. 1.15470054 3. ]
[ 9. 10. 2.12132034 4. ]
[ 3. 12. 4.11096096 5. ]
[11. 13. 14.07183949 8. ]]
>>> inconsistent(Z)
array([[ 0. , 0. , 1. , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 1. , 0. , 1. , 0. ],
[ 0.57735027, 0.81649658, 2. , 0.70710678],
[ 1.04044011, 1.06123822, 3. , 1.01850858],
[ 3.11614065, 1.40688837, 2. , 0.70710678],
[ 6.44583366, 6.76770586, 3. , 1.12682288]])
"""
xp = array_namespace(Z)
Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
n = Z.shape[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
Z = np.asarray(Z)
_hierarchy.inconsistent(Z, R, int(n), int(d))
R = xp.asarray(R)
return R
def from_mlab_linkage(Z):
"""
Convert a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the
number of original observations (leaves) in the non-singleton
cluster ``i``.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with ``scipy.cluster.hierarchy``.
See Also
--------
linkage : for a description of what a linkage matrix is.
to_mlab_linkage : transform from SciPy to MATLAB format.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.hierarchy import ward, from_mlab_linkage
Given a linkage matrix in MATLAB format ``mZ``, we can use
`scipy.cluster.hierarchy.from_mlab_linkage` to import
it into SciPy format:
>>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1],
... [10, 11, 1], [3, 13, 1.29099445],
... [6, 14, 1.29099445],
... [9, 15, 1.29099445],
... [12, 16, 1.29099445],
... [17, 18, 5.77350269],
... [19, 20, 5.77350269],
... [21, 22, 8.16496581]])
>>> Z = from_mlab_linkage(mZ)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[ 11. , 15. , 1.29099445, 3. ],
[ 16. , 17. , 5.77350269, 6. ],
[ 18. , 19. , 5.77350269, 6. ],
[ 20. , 21. , 8.16496581, 12. ]])
As expected, the linkage matrix ``Z`` returned includes an
additional column counting the number of original samples in
each cluster. Also, all cluster indices are reduced by 1
(MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing).
"""
xp = array_namespace(Z)
Z = as_xparray(Z, dtype=xp.float64, order='C', xp=xp)
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return as_xparray(Z, copy=True, xp=xp)
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return as_xparray(Z, copy=True, xp=xp)
Zpart = as_xparray(Z, copy=True, xp=xp)
if xp.min(Zpart[:, 0:2]) != 1.0 and xp.max(Zpart[:, 0:2]) != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
Zpart = np.asarray(Zpart)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
res = np.hstack([Zpart, CS.reshape(Zs[0], 1)])
return xp.asarray(res)
def to_mlab_linkage(Z):
"""
Convert a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by ``scipy.cluster.hierarchy``.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
See Also
--------
linkage : for a description of what a linkage matrix is.
from_mlab_linkage : transform from Matlab to SciPy format.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, to_mlab_linkage
>>> from scipy.spatial.distance import pdist
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
After a linkage matrix ``Z`` has been created, we can use
`scipy.cluster.hierarchy.to_mlab_linkage` to convert it
into MATLAB format:
>>> mZ = to_mlab_linkage(Z)
>>> mZ
array([[ 1. , 2. , 1. ],
[ 4. , 5. , 1. ],
[ 7. , 8. , 1. ],
[ 10. , 11. , 1. ],
[ 3. , 13. , 1.29099445],
[ 6. , 14. , 1.29099445],
[ 9. , 15. , 1.29099445],
[ 12. , 16. , 1.29099445],
[ 17. , 18. , 5.77350269],
[ 19. , 20. , 5.77350269],
[ 21. , 22. , 8.16496581]])
The new linkage matrix ``mZ`` uses 1-indexing for all the
clusters (instead of 0-indexing). Also, the last column of
the original linkage matrix has been dropped.
"""
xp = array_namespace(Z)
Z = as_xparray(Z, order='C', dtype=xp.float64)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return as_xparray(Z, copy=True, xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
ZP = as_xparray(Z[:, 0:3], copy=True, xp=xp)
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Return True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
See Also
--------
linkage : for a description of what a linkage matrix is.
Examples
--------
>>> from scipy.cluster.hierarchy import median, ward, is_monotonic
>>> from scipy.spatial.distance import pdist
By definition, some hierarchical clustering algorithms - such as
`scipy.cluster.hierarchy.ward` - produce monotonic assignments of
samples to clusters; however, this is not always true for other
hierarchical methods - e.g. `scipy.cluster.hierarchy.median`.
Given a linkage matrix ``Z`` (as the result of a hierarchical clustering
method) we can test programmatically whether it has the monotonicity
property or not, using `scipy.cluster.hierarchy.is_monotonic`:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
>>> is_monotonic(Z)
True
>>> Z = median(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3. , 6. ],
[16. , 17. , 3.5 , 6. ],
[20. , 21. , 3.25 , 12. ]])
>>> is_monotonic(Z)
False
Note that this method is equivalent to just verifying that the distances
in the third column of the linkage matrix appear in a monotonically
increasing order.
"""
xp = array_namespace(Z)
Z = as_xparray(Z, order='c', xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return xp.all(Z[1:, 2] >= Z[:-1, 2])
def is_valid_im(R, warning=False, throw=False, name=None):
"""Return True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
See Also
--------
linkage : for a description of what a linkage matrix is.
inconsistent : for the creation of a inconsistency matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im
>>> from scipy.spatial.distance import pdist
Given a data set ``X``, we can apply a clustering method to obtain a
linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
be also used to obtain the inconsistency matrix ``R`` associated to
this clustering process:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> R = inconsistent(Z)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
>>> R
array([[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1.14549722, 0.20576415, 2. , 0.70710678],
[1.14549722, 0.20576415, 2. , 0.70710678],
[1.14549722, 0.20576415, 2. , 0.70710678],
[1.14549722, 0.20576415, 2. , 0.70710678],
[2.78516386, 2.58797734, 3. , 1.15470054],
[2.78516386, 2.58797734, 3. , 1.15470054],
[6.57065706, 1.38071187, 3. , 1.15470054]])
Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that
``R`` is correct:
>>> is_valid_im(R)
True
However, if ``R`` is wrongly constructed (e.g., one of the standard
deviations is set to a negative value), then the check will fail:
>>> R[-1,1] = R[-1,1] * -1
>>> is_valid_im(R)
False
"""
xp = array_namespace(R)
R = as_xparray(R, order='c', xp=xp)
valid = True
name_str = "%r " % name if name else ''
try:
if R.dtype != xp.float64:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if xp.any(R[:, 0] < 0):
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if xp.any(R[:, 1] < 0):
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if xp.any(R[:, 2] < 0):
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Check the validity of a linkage matrix.
A linkage matrix is valid if it is a 2-D array (type double)
with :math:`n` rows and 4 columns. The first two columns must contain
indices between 0 and :math:`2n-1`. For a given row ``i``, the following
two expressions have to hold:
.. math::
0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1
0 \\leq Z[i,1] \\leq i+n-1
I.e., a cluster cannot join another cluster unless the cluster being joined
has been generated.
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
See Also
--------
linkage: for a description of what a linkage matrix is.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, is_valid_linkage
>>> from scipy.spatial.distance import pdist
All linkage matrices generated by the clustering methods in this module
will be valid (i.e., they will have the appropriate dimensions and the two
required expressions will hold for all the rows).
We can check this using `scipy.cluster.hierarchy.is_valid_linkage`:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
>>> is_valid_linkage(Z)
True
However, if we create a linkage matrix in a wrong way - or if we modify
a valid one in a way that any of the required expressions don't hold
anymore, then the check will fail:
>>> Z[3][1] = 20 # the cluster number 20 is not defined at this point
>>> is_valid_linkage(Z)
False
"""
xp = array_namespace(Z)
Z = as_xparray(Z, order='c', xp=xp)
valid = True
name_str = "%r " % name if name else ''
try:
if Z.dtype != xp.float64:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if (xp.any(Z[:, 0] < 0) or xp.any(Z[:, 1] < 0)):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if xp.any(Z[:, 2] < 0):
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if xp.any(Z[:, 3] < 0):
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in range(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set()
for i in range(0, n - 1):
if (float(Z[i, 0]) in chosen) or (float(Z[i, 1]) in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(float(Z[i, 0]))
chosen.add(float(Z[i, 1]))
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set()
for i in range(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Return the number of original observations of the linkage matrix passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, num_obs_linkage
>>> from scipy.spatial.distance import pdist
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
``Z`` is a linkage matrix obtained after using the Ward clustering method
with ``X``, a dataset with 12 data points.
>>> num_obs_linkage(Z)
12
"""
Z = as_xparray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Check for correspondence between linkage and condensed distance matrices.
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
See Also
--------
linkage : for a description of what a linkage matrix is.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, correspond
>>> from scipy.spatial.distance import pdist
This method can be used to check if a given linkage matrix ``Z`` has been
obtained from the application of a cluster method over a dataset ``X``:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> X_condensed = pdist(X)
>>> Z = ward(X_condensed)
Here, we can compare ``Z`` and ``X`` (in condensed form):
>>> correspond(Z, X_condensed)
True
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
xp = array_namespace(Z, Y)
Z = as_xparray(Z, order='c', xp=xp)
Y = as_xparray(Y, order='c', xp=xp)
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Form flat clusters from the hierarchical clustering defined by
the given linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : scalar
For criteria 'inconsistent', 'distance' or 'monocrit',
this is the threshold to apply when forming flat clusters.
For 'maxclust' or 'maxclust_monocrit' criteria,
this would be max number of clusters requested.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` :
If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t`, then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` :
Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` :
Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` :
Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` :
Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do::
MI = maxinconsts(Z, R)
fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the ``'inconsistent'``
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e., given a node c with
index i, for all node indices j corresponding to nodes
below c, ``monocrit[i] >= monocrit[j]``.
Returns
-------
fcluster : ndarray
An array of length ``n``. ``T[i]`` is the flat cluster number to
which original observation ``i`` belongs.
See Also
--------
linkage : for information about hierarchical clustering methods work.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, fcluster
>>> from scipy.spatial.distance import pdist
All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward`
generate a linkage matrix ``Z`` as their output:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
This matrix represents a dendrogram, where the first and second elements
are the two clusters merged at each step, the third element is the
distance between these clusters, and the fourth element is the size of
the new cluster - the number of original data points included.
`scipy.cluster.hierarchy.fcluster` can be used to flatten the
dendrogram, obtaining as a result an assignation of the original data
points to single clusters.
This assignation mostly depends on a distance threshold ``t`` - the maximum
inter-cluster distance allowed:
>>> fcluster(Z, t=0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, t=1.1, criterion='distance')
array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)
>>> fcluster(Z, t=3, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, t=9, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
In the first case, the threshold ``t`` is too small to allow any two
samples in the data to form a cluster, so 12 different clusters are
returned.
In the second case, the threshold is large enough to allow the first
4 points to be merged with their nearest neighbors. So, here, only 8
clusters are returned.
The third case, with a much higher threshold, allows for up to 8 data
points to be connected - so 4 clusters are returned here.
Lastly, the threshold of the fourth case is large enough to allow for
all data points to be merged together - so a single cluster is returned.
"""
xp = array_namespace(Z)
Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
if monocrit is not None:
monocrit = np.asarray(monocrit, order='C', dtype=np.float64)
Z = np.asarray(Z)
monocrit = np.asarray(monocrit)
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = as_xparray(R, order='C', dtype=xp.float64, xp=xp)
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
R = np.asarray(R)
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), t)
elif criterion == 'monocrit':
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return xp.asarray(T)
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is
the index of the flat cluster to which the original observation ``i``
belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : scalar
For criteria 'inconsistent', 'distance' or 'monocrit',
this is the threshold to apply when forming flat clusters.
For 'maxclust' or 'maxclust_monocrit' criteria,
this would be max number of clusters requested.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str or function, optional
The distance metric for calculating pairwise distances. See
``distance.pdist`` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
See Also
--------
scipy.spatial.distance.pdist : pairwise distance metrics
Notes
-----
This function is similar to the MATLAB function ``clusterdata``.
Examples
--------
>>> from scipy.cluster.hierarchy import fclusterdata
This is a convenience method that abstracts all the steps to perform in a
typical SciPy's hierarchical clustering workflow.
* Transform the input data into a condensed matrix with `scipy.spatial.distance.pdist`.
* Apply a clustering method.
* Obtain flat clusters at a user defined distance threshold ``t`` using `scipy.cluster.hierarchy.fcluster`.
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> fclusterdata(X, t=1)
array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
The output here (for the dataset ``X``, distance threshold ``t``, and the
default settings) is four clusters with three data points each.
"""
xp = array_namespace(X)
X = as_xparray(X, order='C', dtype=xp.float64)
if X.ndim != 2:
raise TypeError('The observation matrix X must be an n by m '
'array.')
Y = distance.pdist(X, metric=metric)
Y = xp.asarray(Y)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = as_xparray(R, order='c', xp=xp)
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Return a list of leaf node ids.
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See `linkage` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
See Also
--------
dendrogram : for information about dendrogram structure.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list
>>> from scipy.spatial.distance import pdist
>>> from matplotlib import pyplot as plt
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
The linkage matrix ``Z`` represents a dendrogram, that is, a tree that
encodes the structure of the clustering performed.
`scipy.cluster.hierarchy.leaves_list` shows the mapping between
indices in the ``X`` dataset and leaves in the dendrogram:
>>> leaves_list(Z)
array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32)
>>> fig = plt.figure(figsize=(25, 10))
>>> dn = dendrogram(Z)
>>> plt.show()
"""
xp = array_namespace(Z)
Z = as_xparray(Z, order='C', xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
Z = np.asarray(Z)
_hierarchy.prelist(Z, ML, n)
return xp.asarray(ML)
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Remove duplicates AND preserve the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set()
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='C0'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError as e:
raise ImportError("You must install the matplotlib library to plot "
"the dendrogram. Use no_plot=True to calculate the "
"dendrogram without plotting.") from e
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Dependent variable plot height
dvw = mh + mh * 0.05
iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation in ('top', 'bottom'):
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
else:
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(iv_ticks)
if orientation == 'top':
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
leaf_rot = (float(_get_tick_rotation(len(ivl)))
if (leaf_rotation is None) else leaf_rotation)
leaf_font = (float(_get_tick_text_size(len(ivl)))
if (leaf_font_size is None) else leaf_font_size)
ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
elif orientation in ('left', 'right'):
if orientation == 'left':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
else:
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(iv_ticks)
if orientation == 'left':
ax.yaxis.set_ticks_position('right')
else:
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
leaf_font = (float(_get_tick_text_size(len(ivl)))
if (leaf_font_size is None) else leaf_font_size)
if leaf_rotation is not None:
ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
else:
ax.set_yticklabels(ivl, size=leaf_font)
# Let's use collections instead. This way there is a separate legend item
# for each tree grouping, rather than stupidly one for each line segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there's a grouping of links above the color threshold, it goes last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
Ellipse = matplotlib.patches.Ellipse
for (x, y) in contraction_marks:
if orientation in ('left', 'right'):
e = Ellipse((y, x), width=dvw / 100, height=1.0)
else:
e = Ellipse((x, y), width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
# C0 is used for above threshhold color
_link_line_colors_default = ('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9')
_link_line_colors = list(_link_line_colors_default)
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for use by dendrogram.
Note that this palette is global (i.e., setting it once changes the colors
for all subsequent calls to `dendrogram`) and that it affects only the
the colors below ``color_threshold``.
Note that `dendrogram` also accepts a custom coloring function through its
``link_color_func`` keyword, which is more flexible and non-global.
Parameters
----------
palette : list of str or None
A list of matplotlib color codes. The order of the color codes is the
order in which the colors are cycled through when color thresholding in
the dendrogram.
If ``None``, resets the palette to its default (which are matplotlib
default colors C1 to C9).
Returns
-------
None
See Also
--------
dendrogram
Notes
-----
Ability to reset the palette with ``None`` added in SciPy 0.17.0.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster import hierarchy
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
... 400., 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['C1', 'C0', 'C0', 'C0', 'C0']
>>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])
>>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b')
>>> dn['color_list']
['c', 'b', 'b', 'b', 'b']
>>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,
... above_threshold_color='k')
>>> dn['color_list']
['c', 'm', 'm', 'k', 'k']
Now, reset the color palette to its default:
>>> hierarchy.set_link_color_palette(None)
"""
if palette is None:
# reset to its default
palette = _link_line_colors_default
elif type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, str) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
global _link_line_colors
_link_line_colors = palette
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, leaf_font_size=None,
leaf_rotation=None, leaf_label_func=None,
show_contracted=False, link_color_func=None, ax=None,
above_threshold_color='C0'):
"""
Plot the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The top of the U-link indicates a
cluster merge. The two legs of the U-link indicate which clusters
were merged. The length of the two legs of the U-link represents
the distance between the child clusters. It is also the
cophenetic distance between original observations in the two
children clusters.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None``
No truncation is performed (default).
Note: ``'none'`` is an alias for ``None`` that's kept for
backward compatibility.
``'lastp'``
The last ``p`` non-singleton clusters formed in the linkage are the
only non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'level'``
No more than ``p`` levels of the dendrogram tree are displayed.
A "level" includes all nodes with ``p`` merges from the final merge.
Note: ``'mtica'`` is an alias for ``'level'`` that's kept for
backward compatibility.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
with de default matplotlib color ``'C0'``. If :math:`t` is less
than or equal to zero, all nodes are colored ``'C0'``.
If ``color_threshold`` is None or 'default',
corresponding with MATLAB(TM) behavior, the threshold is set to
``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default, ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`-sized
sequence, with ``n == Z.shape[0] + 1``. The ``labels[i]`` value is the
text to put under the :math:`i` th leaf node only if it corresponds to
an original observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descending'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note, ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When ``leaf_label_func`` is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
# leaf_label_func can also be used together with ``truncate_mode`` parameter,
# in which case you will get your leaves labeled after truncation:
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90,
truncate_mode='level', p=2)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is ``'C0'``.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
``'leaves_color_list'``
A list of color names. The k'th element represents the color of the
k'th leaf.
See Also
--------
linkage, set_link_color_palette
Notes
-----
It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise
crossings appear in the dendrogram.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster import hierarchy
>>> import matplotlib.pyplot as plt
A very basic example:
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
... 400., 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> plt.figure()
>>> dn = hierarchy.dendrogram(Z)
Now, plot in given axes, improve the color scheme and use both vertical and
horizontal orientations:
>>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])
>>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))
>>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',
... orientation='top')
>>> dn2 = hierarchy.dendrogram(Z, ax=axes[1],
... above_threshold_color='#bcbddc',
... orientation='right')
>>> hierarchy.set_link_color_palette(None) # reset to default after use
>>> plt.show()
"""
# This feature was thought about but never implemented (still useful?):
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = as_xparray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
if labels is not None:
try:
len_labels = len(labels)
except (TypeError, AttributeError):
len_labels = labels.shape[0]
if Z.shape[0] + 1 != len_labels:
raise ValueError("Dimensions of Z and labels must be consistent.")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mtica', 'level', 'none', None):
# 'mtica' is kept working for backwards compat.
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica':
# 'mtica' is an alias
truncate_mode = 'level'
if truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
ivl = [] # list of leaves
if color_threshold is None or (isinstance(color_threshold, str) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
# Empty list will be filled in _dendrogram_calculate_info
contraction_marks = [] if show_contracted else None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2*n - 2,
iv=0.0,
ivl=ivl,
n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list,
lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
R["leaves_color_list"] = _get_leaves_color_list(R)
return R
def _get_leaves_color_list(R):
leaves_color_list = [None] * len(R['leaves'])
for link_x, link_y, link_color in zip(R['icoord'],
R['dcoord'],
R['color_list']):
for (xi, yi) in zip(link_x, link_y):
if yi == 0.0 and (xi % 5 == 0 and xi % 2 == 1):
# if yi is 0.0 and xi is divisible by 5 and odd,
# the point is a leaf
# xi of leaves are 5, 15, 25, 35, ... (see `iv_ticks`)
# index of leaves are 0, 1, 2, 3, ... as below
leaf_index = (int(xi) - 5) // 10
# each leaf has a same color of its link.
leaves_color_list[leaf_index] = link_color
return leaves_color_list
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(np.asarray(Z[i - n, 3], dtype=np.int64)) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks, xp):
_append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp),
n, contraction_marks, xp)
_append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp),
n, contraction_marks, xp)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks, xp):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp),
n, contraction_marks, xp)
_append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp),
n, contraction_marks, xp)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='C0'):
"""
Calculate the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the ``max(Z[*,2]``) for all nodes ``*`` below and including
the target node.
"""
xp = array_namespace(Z)
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-singleton
# cluster, its label is either the empty string or the number of
# original observations belonging to cluster i.
if 2*n - p > i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode == 'level':
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# Otherwise, only truncate if we have a leaf node.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int_floor(Z[i - n, 0], xp)
ab = int_floor(Z[i - n, 1], xp)
if aa >= n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab >= n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort is True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort is True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, str):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determine if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
See Also
--------
linkage : for a description of what a linkage matrix is.
fcluster : for the creation of flat cluster assignments.
Examples
--------
>>> from scipy.cluster.hierarchy import fcluster, is_isomorphic
>>> from scipy.cluster.hierarchy import single, complete
>>> from scipy.spatial.distance import pdist
Two flat cluster assignments can be isomorphic if they represent the same
cluster assignment, with different labels.
For example, we can use the `scipy.cluster.hierarchy.single`: method
and flatten the output to four clusters:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = single(pdist(X))
>>> T = fcluster(Z, 1, criterion='distance')
>>> T
array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
We can then do the same using the
`scipy.cluster.hierarchy.complete`: method:
>>> Z = complete(pdist(X))
>>> T_ = fcluster(Z, 1.5, criterion='distance')
>>> T_
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
As we can see, in both cases we obtain four clusters and all the data
points are distributed in the same way - the only thing that changes
are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both
cluster assignments are isomorphic:
>>> is_isomorphic(T, T_)
True
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d1 = {}
d2 = {}
for i in range(0, n):
if T1[i] in d1:
if T2[i] not in d2:
return False
if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]:
return False
elif T2[i] in d2:
return False
else:
d1[T1[i]] = T2[i]
d2[T2[i]] = T1[i]
return True
def maxdists(Z):
"""
Return the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
See Also
--------
linkage : for a description of what a linkage matrix is.
is_monotonic : for testing for monotonicity of a linkage matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import median, maxdists
>>> from scipy.spatial.distance import pdist
Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists`
computes for each new cluster generated (i.e., for each row of the linkage
matrix) what is the maximum distance between any two child clusters.
Due to the nature of hierarchical clustering, in many cases this is going
to be just the distance between the two child clusters that were merged
to form the current one - that is, Z[:,2].
However, for non-monotonic cluster assignments such as
`scipy.cluster.hierarchy.median` clustering this is not always the
case: There may be cluster formations were the distance between the two
clusters merged is smaller than the distance between their children.
We can see this in an example:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = median(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3. , 6. ],
[16. , 17. , 3.5 , 6. ],
[20. , 21. , 3.25 , 12. ]])
>>> maxdists(Z)
array([1. , 1. , 1. , 1. , 1.11803399,
1.11803399, 1.11803399, 1.11803399, 3. , 3.5 ,
3.5 ])
Note that while the distance between the two clusters merged when creating the
last cluster is 3.25, there are two children (clusters 16 and 17) whose distance
is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in
this case.
"""
xp = array_namespace(Z)
Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
Z = np.asarray(Z)
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
MD = xp.asarray(MD)
return MD
def maxinconsts(Z, R):
"""
Return the maximum inconsistency coefficient for each
non-singleton cluster and its children.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
`linkage` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
See Also
--------
linkage : for a description of what a linkage matrix is.
inconsistent : for the creation of a inconsistency matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts
>>> from scipy.spatial.distance import pdist
Given a data set ``X``, we can apply a clustering method to obtain a
linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
be also used to obtain the inconsistency matrix ``R`` associated to
this clustering process:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = median(pdist(X))
>>> R = inconsistent(Z)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3. , 6. ],
[16. , 17. , 3.5 , 6. ],
[20. , 21. , 3.25 , 12. ]])
>>> R
array([[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.74535599, 1.08655358, 3. , 1.15470054],
[1.91202266, 1.37522872, 3. , 1.15470054],
[3.25 , 0.25 , 3. , 0. ]])
Here, `scipy.cluster.hierarchy.maxinconsts` can be used to compute
the maximum value of the inconsistency statistic (the last column of
``R``) for each non-singleton cluster and its children:
>>> maxinconsts(Z, R)
array([0. , 0. , 0. , 0. , 0.70710678,
0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
1.15470054])
"""
xp = array_namespace(Z, R)
Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)
R = as_xparray(R, order='C', dtype=xp.float64, xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
Z = np.asarray(Z)
R = np.asarray(R)
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
MI = xp.asarray(MI)
return MI
def maxRstat(Z, R, i):
"""
Return the maximum statistic for each non-singleton cluster and its
children.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See `linkage` for more
information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
See Also
--------
linkage : for a description of what a linkage matrix is.
inconsistent : for the creation of a inconsistency matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat
>>> from scipy.spatial.distance import pdist
Given a data set ``X``, we can apply a clustering method to obtain a
linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
be also used to obtain the inconsistency matrix ``R`` associated to
this clustering process:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = median(pdist(X))
>>> R = inconsistent(Z)
>>> R
array([[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.74535599, 1.08655358, 3. , 1.15470054],
[1.91202266, 1.37522872, 3. , 1.15470054],
[3.25 , 0.25 , 3. , 0. ]])
`scipy.cluster.hierarchy.maxRstat` can be used to compute
the maximum value of each column of ``R``, for each non-singleton
cluster and its children:
>>> maxRstat(Z, R, 0)
array([1. , 1. , 1. , 1. , 1.05901699,
1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266,
3.25 ])
>>> maxRstat(Z, R, 1)
array([0. , 0. , 0. , 0. , 0.08346263,
0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872,
1.37522872])
>>> maxRstat(Z, R, 3)
array([0. , 0. , 0. , 0. , 0.70710678,
0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
1.15470054])
"""
xp = array_namespace(Z, R)
Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)
R = as_xparray(R, order='C', dtype=xp.float64, xp=xp)
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
Z = np.asarray(Z)
R = np.asarray(R)
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
MR = xp.asarray(MR)
return MR
def leaders(Z, T):
"""
Return the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z, such that:
* leaf descendants belong only to flat cluster j
(i.e., ``T[p]==j`` for all :math:`p` in :math:`S(i)`, where
:math:`S(i)` is the set of leaf ids of descendant leaf nodes
with cluster node :math:`i`)
* there does not exist a leaf that is not a descendant with
:math:`i` that also belongs to cluster :math:`j`
(i.e., ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
`linkage` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array,
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array, where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
See Also
--------
fcluster : for the creation of flat cluster assignments.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, fcluster, leaders
>>> from scipy.spatial.distance import pdist
Given a linkage matrix ``Z`` - obtained after apply a clustering method
to a dataset ``X`` - and a flat cluster assignment array ``T``:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
>>> T = fcluster(Z, 3, criterion='distance')
>>> T
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
`scipy.cluster.hierarchy.leaders` returns the indices of the nodes
in the dendrogram that are the leaders of each flat cluster:
>>> L, M = leaders(Z, T)
>>> L
array([16, 17, 18, 19], dtype=int32)
(remember that indices 0-11 point to the 12 data points in ``X``,
whereas indices 12-22 point to the 11 rows of ``Z``)
`scipy.cluster.hierarchy.leaders` also returns the indices of
the flat clusters in ``T``:
>>> M
array([1, 2, 3, 4], dtype=int32)
"""
xp = array_namespace(Z, T)
Z = as_xparray(Z, order='C', dtype=xp.float64)
T = as_xparray(T, order='C')
is_valid_linkage(Z, throw=True, name='Z')
if T.dtype != xp.int32:
raise TypeError('T must be a 1-D array of dtype int32.')
if T.shape[0] != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
n_clusters = int(xp.unique_values(T).shape[0])
n_obs = int(Z.shape[0] + 1)
L = np.zeros(n_clusters, dtype=np.int32)
M = np.zeros(n_clusters, dtype=np.int32)
Z = np.asarray(Z)
T = np.asarray(T, dtype=np.int32)
s = _hierarchy.leaders(Z, T, L, M, n_clusters, n_obs)
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
L, M = xp.asarray(L), xp.asarray(M)
return (L, M)
| 148,497
| 34.653782
| 111
|
py
|
scipy
|
scipy-main/scipy/cluster/vq.py
|
"""
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Perform k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
----------------------
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroid. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid.
The minimization is achieved by iteratively reclassifying
the observations into clusters and recalculating the centroids until
a configuration is reached in which the centroids are stable. One can
also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and, vice versa, is often referred to as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be an M by N array, where the rows are
the observation vectors. The codebook is a k by N array, where the
ith row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh-tone colors would be represented in the
code book.
"""
import warnings
import numpy as np
from collections import deque
from scipy._lib._array_api import (
as_xparray, array_namespace, size, atleast_nd
)
from scipy._lib._util import check_random_state, rng_integers
from scipy.spatial.distance import cdist
from . import _vq
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
class ClusterError(Exception):
pass
def whiten(obs, check_finite=True):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set by its standard deviation (i.e. "whiten"
it - as in "white noise" where each frequency has equal power).
Each feature is divided by its standard deviation across all observations
to give it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]] #o3
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
xp = array_namespace(obs)
obs = as_xparray(obs, check_finite=check_finite, xp=xp)
std_dev = xp.std(obs, axis=0)
zero_std_mask = std_dev == 0
if xp.any(zero_std_mask):
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.vq import vq
>>> code_book = np.array([[1., 1., 1.],
... [2., 2., 2.]])
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7]])
>>> vq(features, code_book)
(array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239]))
"""
xp = array_namespace(obs, code_book)
obs = as_xparray(obs, xp=xp, check_finite=check_finite)
code_book = as_xparray(code_book, xp=xp, check_finite=check_finite)
ct = xp.result_type(obs, code_book)
c_obs = xp.astype(obs, ct, copy=False)
c_code_book = xp.astype(code_book, ct, copy=False)
if xp.isdtype(ct, kind='real floating'):
c_obs = np.asarray(c_obs)
c_code_book = np.asarray(c_code_book)
result = _vq.vq(c_obs, c_code_book)
return xp.asarray(result[0]), xp.asarray(result[1])
return py_vq(obs, code_book, check_finite=False)
def py_vq(obs, code_book, check_finite=True):
""" Python version of vq algorithm.
The algorithm computes the Euclidean distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (e.g., columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation; its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
xp = array_namespace(obs, code_book)
obs = as_xparray(obs, xp=xp, check_finite=check_finite)
code_book = as_xparray(code_book, xp=xp, check_finite=check_finite)
if obs.ndim != code_book.ndim:
raise ValueError("Observation and code_book should have the same rank")
if obs.ndim == 1:
obs = obs[:, xp.newaxis]
code_book = code_book[:, xp.newaxis]
# Once `cdist` has array API support, this `xp.asarray` call can be removed
dist = xp.asarray(cdist(obs, code_book))
code = xp.argmin(dist, axis=1)
min_dist = xp.min(dist, axis=1)
return code, min_dist
def _kmeans(obs, guess, thresh=1e-5, xp=None):
""" "raw" version of k-means.
Returns
-------
code_book
The lowest distortion codebook found.
avg_dist
The average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> import numpy as np
>>> from scipy.cluster.vq import _kmeans
>>> features = np.array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = np.array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
xp = np if xp is None else xp
code_book = guess
diff = xp.inf
prev_avg_dists = deque([diff], maxlen=2)
while diff > thresh:
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book, check_finite=False)
prev_avg_dists.append(xp.mean(distort, axis=-1))
# recalc code_book as centroids of associated obs
obs = np.asarray(obs)
obs_code = np.asarray(obs_code)
code_book, has_members = _vq.update_cluster_means(obs, obs_code,
code_book.shape[0])
obs = xp.asarray(obs)
obs_code = xp.asarray(obs_code)
code_book = xp.asarray(code_book)
has_members = xp.asarray(has_members)
code_book = code_book[has_members]
diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1])
return code_book, prev_avg_dists[1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True,
*, seed=None):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the classification of the observations
into clusters and updates the cluster centroids until the position of
the centroids is stable over successive iterations. In this
implementation of the algorithm, the stability of the centroids is
determined by comparing the absolute value of the change in the average
Euclidean distance between the observations and their corresponding
centroids against a threshold. This yields
a code book mapping centroids to codes and vice versa.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to threshold.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
Seed for initializing the pseudo-random number generator.
If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
The default is None.
Returns
-------
codebook : ndarray
A k by N array of k centroids. The ith centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
Note that the number of centroids is not necessarily the same as the
``k_or_guess`` parameter, because centroids assigned to no observations
are removed during iterations.
distortion : float
The mean (non-squared) Euclidean distance between the observations
passed and the centroids generated. Note the difference to the standard
definition of distortion in the context of the k-means algorithm, which
is the sum of the squared distances.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Notes
-----
For more functionalities or optimal performance, you can use
`sklearn.cluster.KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_.
`This <https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html#comparison-of-high-performance-implementations>`_
is a benchmark result of several implementations.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> import matplotlib.pyplot as plt
>>> features = np.array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = np.array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398], # random
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398], # random
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
>>> # Create 50 datapoints in two clusters a and b
>>> pts = 50
>>> rng = np.random.default_rng()
>>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
>>> b = rng.multivariate_normal([30, 10],
... [[10, 2], [2, 1]],
... size=pts)
>>> features = np.concatenate((a, b))
>>> # Whiten data
>>> whitened = whiten(features)
>>> # Find 2 clusters in the data
>>> codebook, distortion = kmeans(whitened, 2)
>>> # Plot whitened data and cluster centers in red
>>> plt.scatter(whitened[:, 0], whitened[:, 1])
>>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
>>> plt.show()
"""
xp = array_namespace(obs, k_or_guess)
obs = as_xparray(obs, xp=xp, check_finite=check_finite)
guess = as_xparray(k_or_guess, xp=xp, check_finite=check_finite)
if iter < 1:
raise ValueError("iter must be at least 1, got %s" % iter)
# Determine whether a count (scalar) or an initial guess (array) was passed.
if size(guess) != 1:
if size(guess) < 1:
raise ValueError("Asked for 0 clusters. Initial book was %s" %
guess)
return _kmeans(obs, guess, thresh=thresh, xp=xp)
# k_or_guess is a scalar, now verify that it's an integer
k = int(guess)
if k != guess:
raise ValueError("If k_or_guess is a scalar, it must be an integer.")
if k < 1:
raise ValueError("Asked for %d clusters." % k)
rng = check_random_state(seed)
# initialize best distance value to a large value
best_dist = xp.inf
for i in range(iter):
# the initial code book is randomly selected from observations
guess = _kpoints(obs, k, rng, xp)
book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp)
if dist < best_dist:
best_book = book
best_dist = dist
return best_book, best_dist
def _kpoints(data, k, rng, xp):
"""Pick k points at random in data (one row = one observation).
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
idx = rng.choice(data.shape[0], size=int(k), replace=False)
return data[idx, ...]
def _krandinit(data, k, rng, xp):
"""Returns k samples of a random variable whose parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable whose mean and covariances are the ones estimated from the data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
mu = xp.mean(data, axis=0)
if data.ndim == 1:
cov = xp.cov(data)
x = rng.standard_normal(size=k)
x = xp.asarray(x)
x *= xp.sqrt(cov)
elif data.shape[1] > data.shape[0]:
# initialize when the covariance matrix is rank deficient
_, s, vh = xp.linalg.svd(data - mu, full_matrices=False)
x = rng.standard_normal(size=(k, size(s)))
x = xp.asarray(x)
sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.))
x = xp.matmul(x, sVh)
else:
# TODO ARRAY_API cov not supported
cov = atleast_nd(xp.cov(data.T), ndim=2, xp=xp)
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = rng.standard_normal(size=(k, size(mu)))
x = xp.asarray(x)
x = xp.matmul(x, xp.linalg.cholesky(cov).T)
x += mu
return x
def _kpp(data, k, rng, xp):
""" Picks k points in the data based on the kmeans++ method.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
init : ndarray
A 'k' by 'N' containing the initial centroids.
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
"""
dims = data.shape[1] if len(data.shape) > 1 else 1
init = xp.empty((int(k), dims))
for i in range(k):
if i == 0:
init[i, :] = data[rng_integers(rng, data.shape[0]), :]
else:
D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0)
probs = D2/D2.sum()
cumprobs = probs.cumsum()
r = rng.uniform()
cumprobs = np.asarray(cumprobs)
init[i, :] = data[np.searchsorted(cumprobs, r), :]
return init
_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmeans with a different initialization.")
def _missing_raise():
"""Raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmeans with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True, *, seed=None):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidean distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' 1-D observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algorithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', '++' and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'++': choose k observations accordingly to the kmeans++ method
(careful seeding)
'matrix': interpret the k parameter as a k by M (or length k
array for 1-D data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
Seed for initializing the pseudo-random number generator.
If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
The default is None.
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
ith observation is closest to.
See Also
--------
kmeans
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
Examples
--------
>>> from scipy.cluster.vq import kmeans2
>>> import matplotlib.pyplot as plt
>>> import numpy as np
Create z, an array with shape (100, 2) containing a mixture of samples
from three multivariate normal distributions.
>>> rng = np.random.default_rng()
>>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
>>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
>>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
>>> z = np.concatenate((a, b, c))
>>> rng.shuffle(z)
Compute three clusters.
>>> centroid, label = kmeans2(z, 3, minit='points')
>>> centroid
array([[ 2.22274463, -0.61666946], # may vary
[ 0.54069047, 5.86541444],
[ 6.73846769, 4.01991898]])
How many points are in each cluster?
>>> counts = np.bincount(label)
>>> counts
array([29, 51, 20]) # may vary
Plot the clusters.
>>> w0 = z[label == 0]
>>> w1 = z[label == 1]
>>> w2 = z[label == 2]
>>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
>>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
>>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
>>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
>>> plt.axis('equal')
>>> plt.legend(shadow=True)
>>> plt.show()
"""
if int(iter) < 1:
raise ValueError("Invalid iter (%s), "
"must be a positive integer." % iter)
try:
miss_meth = _valid_miss_meth[missing]
except KeyError as e:
raise ValueError(f"Unknown missing method {missing!r}") from e
xp = array_namespace(data, k)
data = as_xparray(data, xp=xp, check_finite=check_finite)
code_book = as_xparray(k, xp=xp, copy=True)
if data.ndim == 1:
d = 1
elif data.ndim == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 is not supported.")
if size(data) < 1 or size(code_book) < 1:
raise ValueError("Empty input is not supported.")
# If k is not a single value, it should be compatible with data's shape
if minit == 'matrix' or size(code_book) > 1:
if data.ndim != code_book.ndim:
raise ValueError("k array doesn't match data rank")
nc = code_book.shape[0]
if data.ndim > 1 and code_book.shape[1] != d:
raise ValueError("k array doesn't match data dimension")
else:
nc = int(code_book)
if nc < 1:
raise ValueError("Cannot ask kmeans2 for %d clusters"
" (k was %s)" % (nc, code_book))
elif nc != code_book:
warnings.warn("k was not an integer, was converted.")
try:
init_meth = _valid_init_meth[minit]
except KeyError as e:
raise ValueError(f"Unknown init method {minit!r}") from e
else:
rng = check_random_state(seed)
code_book = init_meth(data, code_book, rng, xp)
for i in range(iter):
# Compute the nearest neighbor for each obs using the current code book
label = vq(data, code_book, check_finite=check_finite)[0]
# Update the code book by computing centroids
data = np.asarray(data)
label = np.asarray(label)
new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
miss_meth()
# Set the empty clusters to their previous positions
new_code_book[~has_members] = code_book[~has_members]
code_book = new_code_book
return xp.asarray(code_book), xp.asarray(label)
| 30,365
| 35.986602
| 134
|
py
|
scipy
|
scipy-main/scipy/cluster/setup.py
|
DEFINE_MACROS = [("SCIPY_PY3K", None)]
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('cluster', parent_package, top_path)
config.add_data_dir('tests')
config.add_extension('_vq',
sources=[('_vq.c')],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_hierarchy',
sources=[('_hierarchy.c')],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_optimal_leaf_ordering',
sources=[('_optimal_leaf_ordering.c')],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 797
| 27.5
| 79
|
py
|
scipy
|
scipy-main/scipy/cluster/__init__.py
|
"""
=========================================
Clustering package (:mod:`scipy.cluster`)
=========================================
.. currentmodule:: scipy.cluster
.. toctree::
:hidden:
cluster.vq
cluster.hierarchy
Clustering algorithms are useful in information theory, target detection,
communications, compression, and other areas. The `vq` module only
supports vector quantization and the k-means algorithms.
The `hierarchy` module provides functions for hierarchical and
agglomerative clustering. Its features include generating hierarchical
clusters from distance matrices,
calculating statistics on clusters, cutting linkages
to generate flat clusters, and visualizing clusters with dendrograms.
"""
__all__ = ['vq', 'hierarchy']
from . import vq, hierarchy
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 876
| 26.40625
| 73
|
py
|
scipy
|
scipy-main/scipy/cluster/tests/test_hierarchy.py
|
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
import pytest
from pytest import raises as assert_raises
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette, cut_tree, optimal_leaf_ordering,
_order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
from scipy.spatial.distance import pdist
from scipy.cluster._hierarchy import Heap
from scipy.conftest import (
array_api_compatible,
skip_if_array_api,
skip_if_array_api_gpu,
skip_if_array_api_backend,
)
from . import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except Exception:
have_matplotlib = False
@skip_if_array_api_gpu
class TestLinkage:
@array_api_compatible
def test_linkage_non_finite_elements_in_distance_matrix(self, xp):
# Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
# Exception expected.
y = xp.zeros((6,))
y[0] = xp.nan
assert_raises(ValueError, linkage, y)
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
@array_api_compatible
def test_linkage_tdist(self, xp):
for method in ['single', 'complete', 'average', 'weighted']:
self.check_linkage_tdist(method, xp)
def check_linkage_tdist(self, method, xp):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
@array_api_compatible
def test_linkage_X(self, xp):
for method in ['centroid', 'median', 'ward']:
self.check_linkage_q(method, xp)
def check_linkage_q(self, method, xp):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(xp.asarray(hierarchy_test_data.X), method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
metric="euclidean")
Z = linkage(xp.asarray(y), method)
assert_allclose(Z, expectedZ, atol=1e-06)
@array_api_compatible
def test_compare_with_trivial(self, xp):
rng = np.random.RandomState(0)
n = 20
X = rng.rand(n, 2)
d = pdist(X)
for method, code in _LINKAGE_METHODS.items():
Z_trivial = _hierarchy.linkage(d, n, code)
Z = linkage(xp.asarray(d), method)
assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15)
@array_api_compatible
def test_optimal_leaf_ordering(self, xp):
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), optimal_ordering=True)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
assert_allclose(Z, expectedZ, atol=1e-10)
@skip_if_array_api_gpu
class TestLinkageTies:
_expectations = {
'single': np.array([[0, 1, 1.41421356, 2],
[2, 3, 1.41421356, 3]]),
'complete': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.82842712, 3]]),
'average': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'weighted': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'centroid': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'median': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'ward': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.44948974, 3]]),
}
@array_api_compatible
def test_linkage_ties(self, xp):
for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
self.check_linkage_ties(method, xp)
def check_linkage_ties(self, method, xp):
X = xp.asarray([[-1, -1], [0, 0], [1, 1]])
Z = linkage(X, method=method)
expectedZ = self._expectations[method]
assert_allclose(Z, expectedZ, atol=1e-06)
@skip_if_array_api_gpu
class TestInconsistent:
@array_api_compatible
def test_inconsistent_tdist(self, xp):
for depth in hierarchy_test_data.inconsistent_ytdist:
self.check_inconsistent_tdist(depth, xp)
def check_inconsistent_tdist(self, depth, xp):
Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
@skip_if_array_api_gpu
class TestCopheneticDistance:
@array_api_compatible
def test_linkage_cophenet_tdist_Z(self, xp):
# Tests cophenet(Z) on tdist data set.
expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
@array_api_compatible
def test_linkage_cophenet_tdist_Z_Y(self, xp):
# Tests cophenet(Z, Y) on tdist data set.
Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
(c, M) = cophenet(Z, xp.asarray(hierarchy_test_data.ytdist))
expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
@skip_if_array_api_gpu
class TestMLabLinkageConversion:
@skip_if_array_api
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
@array_api_compatible
def test_mlab_linkage_conversion_single_row(self, xp):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = xp.asarray([[0., 1., 3., 2.]])
Zm = xp.asarray([[1, 2, 3]])
assert_allclose(from_mlab_linkage(Zm), Z, rtol=1e-15)
assert_allclose(to_mlab_linkage(Z), Zm, rtol=1e-15)
@array_api_compatible
def test_mlab_linkage_conversion_multiple_rows(self, xp):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = xp.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = xp.asarray([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=xp.float64)
assert_allclose(from_mlab_linkage(Zm), Z, rtol=1e-15)
assert_allclose(to_mlab_linkage(Z), Zm, rtol=1e-15)
@skip_if_array_api_gpu
class TestFcluster:
@array_api_compatible
def test_fclusterdata(self, xp):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fclusterdata(t, 'inconsistent', xp)
for t in hierarchy_test_data.fcluster_distance:
self.check_fclusterdata(t, 'distance', xp)
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fclusterdata(t, 'maxclust', xp)
def check_fclusterdata(self, t, criterion, xp):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
X = xp.asarray(hierarchy_test_data.Q_X)
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
@array_api_compatible
def test_fcluster(self, xp):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fcluster(t, 'inconsistent', xp)
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster(t, 'distance', xp)
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster(t, 'maxclust', xp)
def check_fcluster(self, t, criterion, xp):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
Z = single(xp.asarray(hierarchy_test_data.Q_X))
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
@array_api_compatible
def test_fcluster_monocrit(self, xp):
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster_monocrit(t, xp)
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster_maxclust_monocrit(t, xp)
def check_fcluster_monocrit(self, t, xp):
expectedT = xp.asarray(hierarchy_test_data.fcluster_distance[t])
Z = single(xp.asarray(hierarchy_test_data.Q_X))
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t, xp):
expectedT = xp.asarray(hierarchy_test_data.fcluster_maxclust[t])
Z = single(xp.asarray(hierarchy_test_data.Q_X))
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders:
@skip_if_array_api_gpu
@array_api_compatible
def test_leaders_single(self, xp):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Y = xp.asarray(Y)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (xp.asarray([53, 55, 56]), xp.asarray([2, 3, 1]))
T = xp.asarray(T, dtype=xp.int32)
L = leaders(Z, T)
assert_allclose(np.concatenate(L), np.concatenate(Lright), rtol=1e-15)
class TestIsIsomorphic:
@skip_if_array_api
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
@skip_if_array_api
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = np.asarray([1, 7, 1])
b = np.asarray([2, 3, 2])
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
@skip_if_array_api
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = np.asarray([])
b = np.asarray([])
assert_(is_isomorphic(a, b))
@skip_if_array_api
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = np.asarray([1, 2, 3])
b = np.asarray([1, 3, 2])
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
@skip_if_array_api
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = np.asarray([1, 2, 3, 3])
b = np.asarray([1, 3, 2, 3])
assert_(is_isomorphic(a, b) is False)
assert_(is_isomorphic(b, a) is False)
@skip_if_array_api
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = np.asarray([7, 2, 3])
b = np.asarray([6, 3, 2])
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
@skip_if_array_api
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc)
@skip_if_array_api
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc, True, 5)
@skip_if_array_api
def test_is_isomorphic_7(self):
# Regression test for gh-6271
a = np.asarray([1, 2, 3])
b = np.asarray([1, 1, 1])
assert_(not is_isomorphic(a, b))
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in range(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
@skip_if_array_api_gpu
class TestIsValidLinkage:
@array_api_compatible
def test_is_valid_linkage_various_size(self, xp):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_linkage_various_size(nrow, ncol, valid, xp)
def check_is_valid_linkage_various_size(self, nrow, ncol, valid, xp):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = xp.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=xp.float64)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
@array_api_compatible
def test_is_valid_linkage_int_type(self, xp):
# Tests is_valid_linkage(Z) with integer type.
Z = xp.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=xp.int64)
assert_(is_valid_linkage(Z) is False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
@array_api_compatible
def test_is_valid_linkage_empty(self, xp):
# Tests is_valid_linkage(Z) with empty linkage.
Z = xp.zeros((0, 4), dtype=xp.float64)
assert_(is_valid_linkage(Z) is False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
@array_api_compatible
def test_is_valid_linkage_4_and_up(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
assert_(is_valid_linkage(Z) is True)
@array_api_compatible
def test_is_valid_linkage_4_and_up_neg_index_left(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) is False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
@array_api_compatible
def test_is_valid_linkage_4_and_up_neg_index_right(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) is False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
@array_api_compatible
def test_is_valid_linkage_4_and_up_neg_dist(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) is False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
@array_api_compatible
def test_is_valid_linkage_4_and_up_neg_counts(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) is False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
@skip_if_array_api_gpu
class TestIsValidInconsistent:
@array_api_compatible
def test_is_valid_im_int_type(self, xp):
# Tests is_valid_im(R) with integer type.
R = xp.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=xp.int64)
assert_(is_valid_im(R) is False)
assert_raises(TypeError, is_valid_im, R, throw=True)
@array_api_compatible
def test_is_valid_im_various_size(self, xp):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_im_various_size(nrow, ncol, valid, xp)
def check_is_valid_im_various_size(self, nrow, ncol, valid, xp):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = xp.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=xp.float64)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
@skip_if_array_api_gpu
@array_api_compatible
def test_is_valid_im_empty(self, xp):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = xp.zeros((0, 4), dtype=xp.float64)
assert_(is_valid_im(R) is False)
assert_raises(ValueError, is_valid_im, R, throw=True)
@array_api_compatible
def test_is_valid_im_4_and_up(self, xp):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) is True)
@array_api_compatible
def test_is_valid_im_4_and_up_neg_index_left(self, xp):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) is False)
assert_raises(ValueError, is_valid_im, R, throw=True)
@array_api_compatible
def test_is_valid_im_4_and_up_neg_index_right(self, xp):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) is False)
assert_raises(ValueError, is_valid_im, R, throw=True)
@array_api_compatible
def test_is_valid_im_4_and_up_neg_dist(self, xp):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) is False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage:
@skip_if_array_api_gpu
@array_api_compatible
def test_num_obs_linkage_empty(self, xp):
# Tests num_obs_linkage(Z) with empty linkage.
Z = xp.zeros((0, 4), dtype=xp.float64)
assert_raises(ValueError, num_obs_linkage, Z)
@array_api_compatible
def test_num_obs_linkage_1x4(self, xp):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64)
assert_equal(num_obs_linkage(Z), 2)
@array_api_compatible
def test_num_obs_linkage_2x4(self, xp):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = xp.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=xp.float64)
assert_equal(num_obs_linkage(Z), 3)
@skip_if_array_api_gpu
@array_api_compatible
def test_num_obs_linkage_4_and_up(self, xp):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
@skip_if_array_api_gpu
class TestLeavesList:
@array_api_compatible
def test_leaves_list_1x4(self, xp):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64)
to_tree(Z)
assert_allclose(leaves_list(Z), [0, 1], rtol=1e-15)
@array_api_compatible
def test_leaves_list_2x4(self, xp):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = xp.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=xp.float64)
to_tree(Z)
assert_allclose(leaves_list(Z), [0, 1, 2], rtol=1e-15)
@array_api_compatible
def test_leaves_list_Q(self, xp):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
self.check_leaves_list_Q(method, xp)
def check_leaves_list_Q(self, method, xp):
# Tests leaves_list(Z) on the Q data set
X = xp.asarray(hierarchy_test_data.Q_X)
Z = linkage(X, method)
node = to_tree(Z)
assert_allclose(node.pre_order(), leaves_list(Z), rtol=1e-15)
@array_api_compatible
def test_Q_subtree_pre_order(self, xp):
# Tests that pre_order() works when called on sub-trees.
X = xp.asarray(hierarchy_test_data.Q_X)
Z = linkage(X, 'single')
node = to_tree(Z)
assert_allclose(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()),
rtol=1e-15)
@skip_if_array_api_gpu
class TestCorrespond:
@array_api_compatible
def test_correspond_empty(self, xp):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = xp.zeros((0,), dtype=xp.float64)
Z = xp.zeros((0,4), dtype=xp.float64)
assert_raises(ValueError, correspond, Z, y)
@array_api_compatible
def test_correspond_2_and_up(self, xp):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in range(2, 4):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
assert_(correspond(Z, y))
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
y = xp.asarray(y)
Z = linkage(y)
assert_(correspond(Z, y))
@array_api_compatible
def test_correspond_4_and_up(self, xp):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
y = xp.asarray(y)
y2 = xp.asarray(y2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
@array_api_compatible
def test_correspond_4_and_up_2(self, xp):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
y = xp.asarray(y)
y2 = xp.asarray(y2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
@array_api_compatible
def test_num_obs_linkage_multi_matrix(self, xp):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in range(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Y = xp.asarray(Y)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
@skip_if_array_api_gpu
class TestIsMonotonic:
@array_api_compatible
def test_is_monotonic_empty(self, xp):
# Tests is_monotonic(Z) on an empty linkage.
Z = xp.zeros((0, 4), dtype=xp.float64)
assert_raises(ValueError, is_monotonic, Z)
@array_api_compatible
def test_is_monotonic_1x4(self, xp):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = xp.asarray([[0, 1, 0.3, 2]], dtype=xp.float64)
assert_allclose(is_monotonic(Z), True)
@array_api_compatible
def test_is_monotonic_2x4_T(self, xp):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = xp.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=xp.float64)
assert_allclose(is_monotonic(Z), True)
@array_api_compatible
def test_is_monotonic_2x4_F(self, xp):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = xp.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=xp.float64)
assert_allclose(is_monotonic(Z), False)
@array_api_compatible
def test_is_monotonic_3x4_T(self, xp):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = xp.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=xp.float64)
assert_allclose(is_monotonic(Z), True)
@array_api_compatible
def test_is_monotonic_3x4_F1(self, xp):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = xp.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=xp.float64)
assert_allclose(is_monotonic(Z), False)
@array_api_compatible
def test_is_monotonic_3x4_F2(self, xp):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = xp.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=xp.float64)
assert_allclose(is_monotonic(Z), False)
@array_api_compatible
def test_is_monotonic_3x4_F3(self, xp):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = xp.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=xp.float64)
assert_allclose(is_monotonic(Z), False)
@array_api_compatible
def test_is_monotonic_tdist_linkage1(self, xp):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
assert_allclose(is_monotonic(Z), True)
@array_api_compatible
def test_is_monotonic_tdist_linkage2(self, xp):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
Z[2,2] = 0.0
assert_allclose(is_monotonic(Z), False)
@array_api_compatible
def test_is_monotonic_Q_linkage(self, xp):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = xp.asarray(hierarchy_test_data.Q_X)
Z = linkage(X, 'single')
assert_allclose(is_monotonic(Z), True)
@skip_if_array_api_gpu
class TestMaxDists:
@array_api_compatible
def test_maxdists_empty_linkage(self, xp):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = xp.zeros((0, 4), dtype=xp.float64)
assert_raises(ValueError, maxdists, Z)
@array_api_compatible
def test_maxdists_one_cluster_linkage(self, xp):
# Tests maxdists(Z) on linkage with one cluster.
Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z, xp)
assert_allclose(MD, expectedMD, atol=1e-15)
@array_api_compatible
def test_maxdists_Q_linkage(self, xp):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxdists_Q_linkage(method, xp)
def check_maxdists_Q_linkage(self, method, xp):
# Tests maxdists(Z) on the Q data set
X = xp.asarray(hierarchy_test_data.Q_X)
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z, xp)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts:
@skip_if_array_api_gpu
@array_api_compatible
def test_maxinconsts_empty_linkage(self, xp):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = xp.zeros((0, 4), dtype=xp.float64)
R = xp.zeros((0, 4), dtype=xp.float64)
assert_raises(ValueError, maxinconsts, Z, R)
@array_api_compatible
def test_maxinconsts_difrow_linkage(self, xp):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
R = np.random.rand(2, 4)
R = xp.asarray(R)
assert_raises(ValueError, maxinconsts, Z, R)
@skip_if_array_api_gpu
@array_api_compatible
def test_maxinconsts_one_cluster_linkage(self, xp):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp)
assert_allclose(MD, expectedMD, atol=1e-15)
@skip_if_array_api_gpu
@array_api_compatible
def test_maxinconsts_Q_linkage(self, xp):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxinconsts_Q_linkage(method, xp)
def check_maxinconsts_Q_linkage(self, method, xp):
# Tests maxinconsts(Z, R) on the Q data set
X = xp.asarray(hierarchy_test_data.Q_X)
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat:
@array_api_compatible
def test_maxRstat_invalid_index(self, xp):
for i in [3.3, -1, 4]:
self.check_maxRstat_invalid_index(i, xp)
def check_maxRstat_invalid_index(self, i, xp):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
@skip_if_array_api_gpu
@array_api_compatible
def test_maxRstat_empty_linkage(self, xp):
for i in range(4):
self.check_maxRstat_empty_linkage(i, xp)
def check_maxRstat_empty_linkage(self, i, xp):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = xp.zeros((0, 4), dtype=xp.float64)
R = xp.zeros((0, 4), dtype=xp.float64)
assert_raises(ValueError, maxRstat, Z, R, i)
@array_api_compatible
def test_maxRstat_difrow_linkage(self, xp):
for i in range(4):
self.check_maxRstat_difrow_linkage(i, xp)
def check_maxRstat_difrow_linkage(self, i, xp):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
R = np.random.rand(2, 4)
R = xp.asarray(R)
assert_raises(ValueError, maxRstat, Z, R, i)
@skip_if_array_api_gpu
@array_api_compatible
def test_maxRstat_one_cluster_linkage(self, xp):
for i in range(4):
self.check_maxRstat_one_cluster_linkage(i, xp)
def check_maxRstat_one_cluster_linkage(self, i, xp):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp)
assert_allclose(MD, expectedMD, atol=1e-15)
@skip_if_array_api_gpu
@array_api_compatible
def test_maxRstat_Q_linkage(self, xp):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
self.check_maxRstat_Q_linkage(method, i, xp)
def check_maxRstat_Q_linkage(self, method, i, xp):
# Tests maxRstat(Z, R, i) on the Q data set
X = xp.asarray(hierarchy_test_data.Q_X)
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
@skip_if_array_api_gpu
class TestDendrogram:
@array_api_compatible
def test_dendrogram_single_linkage_tdist(self, xp):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
@array_api_compatible
def test_valid_orientation(self, xp):
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@array_api_compatible
def test_labels_as_array_or_list(self, xp):
# test for gh-12418
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
labels = xp.asarray([1, 3, 2, 6, 4, 5])
result1 = dendrogram(Z, labels=labels, no_plot=True)
result2 = dendrogram(Z, labels=list(labels), no_plot=True)
assert result1 == result2
@array_api_compatible
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_valid_label_size(self, xp):
link = xp.asarray([
[0, 1, 1.0, 4],
[2, 3, 1.0, 5],
[4, 5, 2.0, 6],
])
plt.figure()
with pytest.raises(ValueError) as exc_info:
dendrogram(link, labels=list(range(100)))
assert "Dimensions of Z and labels must be consistent."\
in str(exc_info.value)
with pytest.raises(
ValueError,
match="Dimensions of Z and labels must be consistent."):
dendrogram(link, labels=[])
plt.close()
@array_api_compatible
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_plot(self, xp):
for orientation in ['top', 'bottom', 'left', 'right']:
self.check_dendrogram_plot(orientation, xp)
def check_dendrogram_plot(self, orientation, xp):
# Tests dendrogram plotting.
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
}
fig = plt.figure()
ax = fig.add_subplot(221)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
R1['dcoord'] = np.asarray(R1['dcoord'])
assert_equal(R1, expected)
# test that dendrogram accepts and handle the leaf_font_size and
# leaf_rotation keywords
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20, leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_allclose(testlabel.get_rotation(), 90, rtol=1e-15)
assert_allclose(testlabel.get_size(), 20, rtol=1e-15)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_allclose(testlabel.get_rotation(), 90, rtol=1e-15)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_allclose(testlabel.get_size(), 20, rtol=1e-15)
plt.close()
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
R2['dcoord'] = np.asarray(R2['dcoord'])
assert_equal(R2, expected)
@array_api_compatible
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_truncate_mode(self, xp):
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
R['dcoord'] = np.asarray(R['dcoord'])
assert_equal(R, {'color_list': ['C0'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9],
'leaves_color_list': ['C0', 'C0'],
})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
R['dcoord'] = np.asarray(R['dcoord'])
assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
})
@array_api_compatible
def test_dendrogram_colors(self, xp):
# Tests dendrogram plots with alternate colors
Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
# reset color palette (global list)
set_link_color_palette(None)
@array_api_compatible
def test_dendrogram_leaf_colors_zero_dist(self, xp):
# tests that the colors of leafs are correct for tree
# with two identical points
x = xp.asarray([[1, 0, 0],
[0, 0, 1],
[0, 2, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0]])
z = linkage(x, "single")
d = dendrogram(z, no_plot=True)
exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
colors = d["leaves_color_list"]
assert_equal(colors, exp_colors)
@array_api_compatible
def test_dendrogram_leaf_colors(self, xp):
# tests that the colors are correct for a tree
# with two near points ((0, 0, 1.1) and (0, 0, 1))
x = xp.asarray([[1, 0, 0],
[0, 0, 1.1],
[0, 2, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0]])
z = linkage(x, "single")
d = dendrogram(z, no_plot=True)
exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
colors = d["leaves_color_list"]
assert_equal(colors, exp_colors)
def calculate_maximum_distances(Z, xp):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = xp.zeros((n-1,))
q = xp.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[xp.asarray(left, dtype=xp.int64) - n]
if right >= n:
q[1] = B[xp.asarray(right, dtype=xp.int64) - n]
q[2] = Z[i, 2]
B[i] = xp.max(q)
return B
def calculate_maximum_inconsistencies(Z, R, k=3, xp=np):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = xp.zeros((n-1,))
q = xp.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[xp.asarray(left, dtype=xp.int64) - n]
if right >= n:
q[1] = B[xp.asarray(right, dtype=xp.int64) - n]
q[2] = R[i, k]
B[i] = xp.max(q)
return B
@skip_if_array_api_gpu
@array_api_compatible
def test_unsupported_uncondensed_distance_matrix_linkage_warning(xp):
assert_warns(ClusterWarning, linkage, xp.asarray([[0, 1], [1, 0]]))
@array_api_compatible
def test_euclidean_linkage_value_error(xp):
for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
assert_raises(ValueError, linkage, xp.asarray([[1, 1], [1, 1]]),
method=method, metric='cityblock')
@skip_if_array_api_gpu
@array_api_compatible
def test_2x2_linkage(xp):
Z1 = linkage(xp.asarray([1]), method='single', metric='euclidean')
Z2 = linkage(xp.asarray([[0, 1], [0, 0]]), method='single', metric='euclidean')
assert_allclose(Z1, Z2, rtol=1e-15)
@skip_if_array_api_gpu
@array_api_compatible
def test_node_compare(xp):
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
X = xp.asarray(X)
Z = scipy.cluster.hierarchy.ward(X)
tree = to_tree(Z)
assert_(tree > tree.get_left())
assert_(tree.get_right() > tree.get_left())
assert_(tree.get_right() == tree.get_right())
assert_(tree.get_right() != tree.get_left())
@skip_if_array_api_gpu
@array_api_compatible
@skip_if_array_api_backend('numpy.array_api')
def test_cut_tree(xp):
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
X = xp.asarray(X)
Z = scipy.cluster.hierarchy.ward(X)
cutree = cut_tree(Z)
assert_allclose(cutree[:, 0], xp.arange(nobs), rtol=1e-15)
assert_allclose(cutree[:, -1], xp.zeros(nobs), rtol=1e-15)
assert_equal(np.asarray(cutree).max(0), np.arange(nobs - 1, -1, -1))
assert_allclose(cutree[:, [-5]], cut_tree(Z, n_clusters=5), rtol=1e-15)
assert_allclose(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]), rtol=1e-15)
assert_allclose(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]), rtol=1e-15)
nodes = _order_cluster_tree(Z)
heights = xp.asarray([node.dist for node in nodes])
assert_allclose(cutree[:, np.searchsorted(heights, [5])],
cut_tree(Z, height=5), rtol=1e-15)
assert_allclose(cutree[:, np.searchsorted(heights, [5, 10])],
cut_tree(Z, height=[5, 10]), rtol=1e-15)
assert_allclose(cutree[:, np.searchsorted(heights, [10, 5])],
cut_tree(Z, height=[10, 5]), rtol=1e-15)
@skip_if_array_api_gpu
@array_api_compatible
def test_optimal_leaf_ordering(xp):
# test with the distance vector y
Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.ytdist)),
xp.asarray(hierarchy_test_data.ytdist))
expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
assert_allclose(Z, expectedZ, atol=1e-10)
# test with the observation matrix X
Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.X), 'ward'),
xp.asarray(hierarchy_test_data.X))
expectedZ = hierarchy_test_data.linkage_X_ward_olo
assert_allclose(Z, expectedZ, atol=1e-06)
@skip_if_array_api
def test_Heap():
values = np.array([2, -1, 0, -1.5, 3])
heap = Heap(values)
pair = heap.get_min()
assert_equal(pair['key'], 3)
assert_equal(pair['value'], -1.5)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], -1)
heap.change_value(1, 2.5)
pair = heap.get_min()
assert_equal(pair['key'], 2)
assert_equal(pair['value'], 0)
heap.remove_min()
heap.remove_min()
heap.change_value(1, 10)
pair = heap.get_min()
assert_equal(pair['key'], 4)
assert_equal(pair['value'], 3)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], 10)
| 49,477
| 37.806275
| 100
|
py
|
scipy
|
scipy-main/scipy/cluster/tests/test_vq.py
|
import warnings
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_equal, assert_,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
ClusterError, _krandinit)
from scipy.cluster import _vq
from scipy.conftest import (
array_api_compatible,
skip_if_array_api,
skip_if_array_api_gpu,
skip_if_array_api_backend,
)
from scipy.sparse._sputils import matrix
from scipy._lib._array_api import SCIPY_ARRAY_API, as_xparray
TESTDATA_2D = np.array([
-2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
-2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
-4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
-0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
-2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
-2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
-2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
-2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
-1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
-1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
-0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
-1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
-2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
-0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
-2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
-2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
-1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
-3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
-1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
-2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
-0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
-2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
2.11]).reshape((200, 2))
# Global data
X = np.array([[3.0, 3], [4, 3], [4, 2],
[9, 2], [5, 1], [6, 2], [9, 4],
[5, 2], [5, 4], [7, 4], [6, 5]])
CODET1 = np.array([[3.0000, 3.0000],
[6.2000, 4.0000],
[5.8000, 1.8000]])
CODET2 = np.array([[11.0/3, 8.0/3],
[6.7500, 4.2500],
[6.2500, 1.7500]])
LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
class TestWhiten:
@array_api_compatible
def test_whiten(self, xp):
desired = xp.asarray([[5.08738849, 2.97091878],
[3.19909255, 0.69660580],
[4.51041982, 0.02640918],
[4.38567074, 0.95120889],
[2.32191480, 1.63195503]])
obs = xp.asarray([[0.98744510, 0.82766775],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
if "cupy" in xp.__name__:
import cupy as cp
cp.testing.assert_allclose(whiten(obs), desired, rtol=1e-5)
else:
assert_allclose(whiten(obs), desired, rtol=1e-5)
@array_api_compatible
def test_whiten_zero_std(self, xp):
desired = np.array([[0., 1.0, 2.86666544],
[0., 1.0, 1.32460034],
[0., 1.0, 3.74382172]])
obs = xp.asarray([[0., 1., 0.74109533],
[0., 1., 0.34243798],
[0., 1., 0.96785929]])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
if "cupy" in xp.__name__:
import cupy as cp
cp.testing.assert_allclose(whiten(obs), desired, rtol=1e-5)
else:
assert_allclose(whiten(obs), desired, rtol=1e-5)
assert_equal(len(w), 1)
assert_(issubclass(w[-1].category, RuntimeWarning))
@array_api_compatible
def test_whiten_not_finite(self, xp):
arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix]
for tp in arrays:
for bad_value in xp.nan, xp.inf, -xp.inf:
obs = tp([[0.98744510, bad_value],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_raises(ValueError, whiten, obs)
class TestVq:
@skip_if_array_api_gpu
@array_api_compatible
def test_py_vq(self, xp):
initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix]
for tp in arrays:
label1 = py_vq(tp(X), tp(initc))[0]
assert_array_equal(label1, LABEL1)
@skip_if_array_api
def test_vq(self):
initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
for tp in [np.asarray, matrix]:
label1, dist = _vq.vq(tp(X), tp(initc))
assert_array_equal(label1, LABEL1)
tlabel1, tdist = vq(tp(X), tp(initc))
@skip_if_array_api_gpu
@array_api_compatible
def test_vq_1d(self, xp):
# Test special rank 1 vq algo, python implementation.
data = X[:, 0]
initc = data[:3]
a, b = _vq.vq(data, initc)
data = xp.asarray(data)
initc = xp.asarray(initc)
ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
assert_array_equal(a, ta)
assert_array_equal(b, tb)
@skip_if_array_api
def test__vq_sametype(self):
a = np.array([1.0, 2.0], dtype=np.float64)
b = a.astype(np.float32)
assert_raises(TypeError, _vq.vq, a, b)
@skip_if_array_api
def test__vq_invalid_type(self):
a = np.array([1, 2], dtype=int)
assert_raises(TypeError, _vq.vq, a, a)
@skip_if_array_api_gpu
@array_api_compatible
def test_vq_large_nfeat(self, xp):
X = np.random.rand(20, 20)
code_book = np.random.rand(3, 20)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(
xp.asarray(X), xp.asarray(code_book)
)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
X = X.astype(np.float32)
code_book = code_book.astype(np.float32)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(
xp.asarray(X), xp.asarray(code_book)
)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
@skip_if_array_api_gpu
@array_api_compatible
def test_vq_large_features(self, xp):
X = np.random.rand(10, 5) * 1000000
code_book = np.random.rand(2, 5) * 1000000
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(
xp.asarray(X), xp.asarray(code_book)
)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
# Class-level skip on GPU for now; once pdist/cdist are hooked up for CuPy,
# more tests will work so use per-test markers then
@skip_if_array_api_gpu
class TestKMean:
@array_api_compatible
def test_large_features(self, xp):
# Generate a data set with large values, and run kmeans on it to
# (regression for 1077).
d = 300
n = 100
m1 = np.random.randn(d)
m2 = np.random.randn(d)
x = 10000 * np.random.randn(n, d) - 20000 * m1
y = 10000 * np.random.randn(n, d) + 20000 * m2
data = np.empty((x.shape[0] + y.shape[0], d), np.double)
data[:x.shape[0]] = x
data[x.shape[0]:] = y
kmeans(xp.asarray(data), xp.asarray(2))
@array_api_compatible
def test_kmeans_simple(self, xp):
np.random.seed(54321)
initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix]
for tp in arrays:
code1 = kmeans(tp(X), tp(initc), iter=1)[0]
assert_array_almost_equal(code1, CODET2)
@array_api_compatible
def test_kmeans_lost_cluster(self, xp):
# This will cause kmeans to have a cluster with no points.
data = xp.asarray(TESTDATA_2D)
initk = xp.asarray([[-1.8127404, -0.67128041],
[2.04621601, 0.07401111],
[-2.31149087, -0.05160469]])
kmeans(data, initk)
with suppress_warnings() as sup:
sup.filter(UserWarning,
"One of the clusters is empty. Re-run kmeans with a "
"different initialization")
kmeans2(data, initk, missing='warn')
assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
@array_api_compatible
def test_kmeans2_simple(self, xp):
np.random.seed(12345678)
initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]]))
arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix]
for tp in arrays:
code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
assert_array_almost_equal(code1, CODET1)
assert_array_almost_equal(code2, CODET2)
@array_api_compatible
def test_kmeans2_rank1(self, xp):
data = xp.asarray(TESTDATA_2D)
data1 = data[:, 0]
initc = data1[:3]
code = as_xparray(initc, copy=True, xp=xp)
kmeans2(data1, code, iter=1)[0]
kmeans2(data1, code, iter=2)[0]
@array_api_compatible
@skip_if_array_api_backend('numpy.array_api')
def test_kmeans2_rank1_2(self, xp):
data = xp.asarray(TESTDATA_2D)
data1 = data[:, 0]
kmeans2(data1, xp.asarray(2), iter=1)
@array_api_compatible
def test_kmeans2_high_dim(self, xp):
# test kmeans2 when the number of dimensions exceeds the number
# of input points
data = xp.asarray(TESTDATA_2D)
data = xp.reshape(data, (20, 20))[:10, :]
kmeans2(data, xp.asarray(2))
@skip_if_array_api_gpu
@array_api_compatible
@skip_if_array_api_backend('numpy.array_api')
def test_kmeans2_init(self, xp):
np.random.seed(12345)
data = xp.asarray(TESTDATA_2D)
k = xp.asarray(3)
kmeans2(data, k, minit='points')
kmeans2(data[:, :1], k, minit='points') # special case (1-D)
kmeans2(data, k, minit='++')
kmeans2(data[:, :1], k, minit='++') # special case (1-D)
# minit='random' can give warnings, filter those
with suppress_warnings() as sup:
sup.filter(message="One of the clusters is empty. Re-run.")
kmeans2(data, k, minit='random')
kmeans2(data[:, :1], k, minit='random') # special case (1-D)
@array_api_compatible
@skip_if_array_api_backend('numpy.array_api')
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MemoryError in Wine.')
def test_krandinit(self, xp):
data = xp.asarray(TESTDATA_2D)
datas = [xp.reshape(data, (200, 2)), xp.reshape(data, (20, 20))[:10, :]]
k = int(1e6)
for data in datas:
# check that np.random.Generator can be used (numpy >= 1.17)
if hasattr(np.random, 'default_rng'):
rng = np.random.default_rng(1234)
else:
rng = np.random.RandomState(1234)
init = _krandinit(data, k, rng, xp)
orig_cov = xp.cov(data.T)
init_cov = xp.cov(init.T)
assert_allclose(orig_cov, init_cov, atol=1e-2)
@array_api_compatible
def test_kmeans2_empty(self, xp):
# Regression test for gh-1032.
assert_raises(ValueError, kmeans2, xp.asarray([]), xp.asarray(2))
@skip_if_array_api
def test_kmeans_0k(self):
# Regression test for gh-1073: fail when k arg is 0.
assert_raises(ValueError, kmeans, X, 0)
assert_raises(ValueError, kmeans2, X, 0)
assert_raises(ValueError, kmeans2, X, np.array([]))
@array_api_compatible
def test_kmeans_large_thres(self, xp):
# Regression test for gh-1774
x = xp.asarray([1, 2, 3, 4, 10], dtype=xp.float64)
res = kmeans(x, xp.asarray(1), thresh=1e16)
assert_allclose(res[0], xp.asarray([4.]))
assert_allclose(res[1], 2.3999999999999999)
@skip_if_array_api_gpu
@array_api_compatible
def test_kmeans2_kpp_low_dim(self, xp):
# Regression test for gh-11462
prev_res = xp.asarray([[-1.95266667, 0.898],
[-3.153375, 3.3945]])
np.random.seed(42)
res, _ = kmeans2(xp.asarray(TESTDATA_2D), xp.asarray(2), minit='++')
assert_allclose(res, prev_res)
@skip_if_array_api_gpu
@array_api_compatible
def test_kmeans2_kpp_high_dim(self, xp):
# Regression test for gh-11462
n_dim = 100
size = 10
centers = np.vstack([5 * np.ones(n_dim),
-5 * np.ones(n_dim)])
np.random.seed(42)
data = np.vstack([
np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size),
np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size)
])
data = xp.asarray(data)
res, _ = kmeans2(data, xp.asarray(2), minit='++')
assert_array_almost_equal(res, centers, decimal=0)
@array_api_compatible
def test_kmeans_diff_convergence(self, xp):
# Regression test for gh-8727
obs = xp.asarray([-3, -1, 0, 1, 1, 8], dtype=xp.float64)
res = kmeans(obs, xp.asarray([-3., 0.99]))
assert_allclose(res[0], xp.asarray([-0.4, 8.]))
assert_allclose(res[1], 1.0666666666666667)
@skip_if_array_api
def test_kmeans_and_kmeans2_random_seed(self):
seed_list = [
1234, np.random.RandomState(1234), np.random.default_rng(1234)
]
for seed in seed_list:
# test for kmeans
res1, _ = kmeans(TESTDATA_2D, 2, seed=seed)
res2, _ = kmeans(TESTDATA_2D, 2, seed=seed)
assert_allclose(res1, res1) # should be same results
# test for kmeans2
for minit in ["random", "points", "++"]:
res1, _ = kmeans2(TESTDATA_2D, 2, minit=minit, seed=seed)
res2, _ = kmeans2(TESTDATA_2D, 2, minit=minit, seed=seed)
assert_allclose(res1, res1) # should be same results
| 16,092
| 38.06068
| 80
|
py
|
scipy
|
scipy-main/scipy/cluster/tests/test_disjoint_set.py
|
import pytest
from pytest import raises as assert_raises
import numpy as np
from scipy.cluster.hierarchy import DisjointSet
import string
def generate_random_token():
k = len(string.ascii_letters)
tokens = list(np.arange(k, dtype=int))
tokens += list(np.arange(k, dtype=float))
tokens += list(string.ascii_letters)
tokens += [None for i in range(k)]
tokens = np.array(tokens, dtype=object)
rng = np.random.RandomState(seed=0)
while 1:
size = rng.randint(1, 3)
element = rng.choice(tokens, size)
if size == 1:
yield element[0]
else:
yield tuple(element)
def get_elements(n):
# dict is deterministic without difficulty of comparing numpy ints
elements = {}
for element in generate_random_token():
if element not in elements:
elements[element] = len(elements)
if len(elements) >= n:
break
return list(elements.keys())
def test_init():
n = 10
elements = get_elements(n)
dis = DisjointSet(elements)
assert dis.n_subsets == n
assert list(dis) == elements
def test_len():
n = 10
elements = get_elements(n)
dis = DisjointSet(elements)
assert len(dis) == n
dis.add("dummy")
assert len(dis) == n + 1
@pytest.mark.parametrize("n", [10, 100])
def test_contains(n):
elements = get_elements(n)
dis = DisjointSet(elements)
for x in elements:
assert x in dis
assert "dummy" not in dis
@pytest.mark.parametrize("n", [10, 100])
def test_add(n):
elements = get_elements(n)
dis1 = DisjointSet(elements)
dis2 = DisjointSet()
for i, x in enumerate(elements):
dis2.add(x)
assert len(dis2) == i + 1
# test idempotency by adding element again
dis2.add(x)
assert len(dis2) == i + 1
assert list(dis1) == list(dis2)
def test_element_not_present():
elements = get_elements(n=10)
dis = DisjointSet(elements)
with assert_raises(KeyError):
dis["dummy"]
with assert_raises(KeyError):
dis.merge(elements[0], "dummy")
with assert_raises(KeyError):
dis.connected(elements[0], "dummy")
@pytest.mark.parametrize("direction", ["forwards", "backwards"])
@pytest.mark.parametrize("n", [10, 100])
def test_linear_union_sequence(n, direction):
elements = get_elements(n)
dis = DisjointSet(elements)
assert elements == list(dis)
indices = list(range(n - 1))
if direction == "backwards":
indices = indices[::-1]
for it, i in enumerate(indices):
assert not dis.connected(elements[i], elements[i + 1])
assert dis.merge(elements[i], elements[i + 1])
assert dis.connected(elements[i], elements[i + 1])
assert dis.n_subsets == n - 1 - it
roots = [dis[i] for i in elements]
if direction == "forwards":
assert all(elements[0] == r for r in roots)
else:
assert all(elements[-2] == r for r in roots)
assert not dis.merge(elements[0], elements[-1])
@pytest.mark.parametrize("n", [10, 100])
def test_self_unions(n):
elements = get_elements(n)
dis = DisjointSet(elements)
for x in elements:
assert dis.connected(x, x)
assert not dis.merge(x, x)
assert dis.connected(x, x)
assert dis.n_subsets == len(elements)
assert elements == list(dis)
roots = [dis[x] for x in elements]
assert elements == roots
@pytest.mark.parametrize("order", ["ab", "ba"])
@pytest.mark.parametrize("n", [10, 100])
def test_equal_size_ordering(n, order):
elements = get_elements(n)
dis = DisjointSet(elements)
rng = np.random.RandomState(seed=0)
indices = np.arange(n)
rng.shuffle(indices)
for i in range(0, len(indices), 2):
a, b = elements[indices[i]], elements[indices[i + 1]]
if order == "ab":
assert dis.merge(a, b)
else:
assert dis.merge(b, a)
expected = elements[min(indices[i], indices[i + 1])]
assert dis[a] == expected
assert dis[b] == expected
@pytest.mark.parametrize("kmax", [5, 10])
def test_binary_tree(kmax):
n = 2**kmax
elements = get_elements(n)
dis = DisjointSet(elements)
rng = np.random.RandomState(seed=0)
for k in 2**np.arange(kmax):
for i in range(0, n, 2 * k):
r1, r2 = rng.randint(0, k, size=2)
a, b = elements[i + r1], elements[i + k + r2]
assert not dis.connected(a, b)
assert dis.merge(a, b)
assert dis.connected(a, b)
assert elements == list(dis)
roots = [dis[i] for i in elements]
expected_indices = np.arange(n) - np.arange(n) % (2 * k)
expected = [elements[i] for i in expected_indices]
assert roots == expected
@pytest.mark.parametrize("n", [10, 100])
def test_subsets(n):
elements = get_elements(n)
dis = DisjointSet(elements)
rng = np.random.RandomState(seed=0)
for i, j in rng.randint(0, n, (n, 2)):
x = elements[i]
y = elements[j]
expected = {element for element in dis if {dis[element]} == {dis[x]}}
assert dis.subset_size(x) == len(dis.subset(x))
assert expected == dis.subset(x)
expected = {dis[element]: set() for element in dis}
for element in dis:
expected[dis[element]].add(element)
expected = list(expected.values())
assert expected == dis.subsets()
dis.merge(x, y)
assert dis.subset(x) == dis.subset(y)
| 5,525
| 26.221675
| 77
|
py
|
scipy
|
scipy-main/scipy/cluster/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/cluster/tests/hierarchy_test_data.py
|
from numpy import array
Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02],
[7.50205180e-01, 4.60299830e-01, 8.98696460e-01],
[6.65461230e-01, 6.94011420e-01, 9.10465700e-01],
[9.64047590e-01, 1.43082200e-03, 7.39874220e-01],
[1.08159060e-01, 5.53028790e-01, 6.63804780e-02],
[9.31359130e-01, 8.25424910e-01, 9.52315440e-01],
[6.78086960e-01, 3.41903970e-01, 5.61481950e-01],
[9.82730940e-01, 7.04605210e-01, 8.70978630e-02],
[6.14691610e-01, 4.69989230e-02, 6.02406450e-01],
[5.80161260e-01, 9.17354970e-01, 5.88163850e-01],
[1.38246310e+00, 1.96358160e+00, 1.94437880e+00],
[2.10675860e+00, 1.67148730e+00, 1.34854480e+00],
[1.39880070e+00, 1.66142050e+00, 1.32224550e+00],
[1.71410460e+00, 1.49176380e+00, 1.45432170e+00],
[1.54102340e+00, 1.84374950e+00, 1.64658950e+00],
[2.08512480e+00, 1.84524350e+00, 2.17340850e+00],
[1.30748740e+00, 1.53801650e+00, 2.16007740e+00],
[1.41447700e+00, 1.99329070e+00, 1.99107420e+00],
[1.61943490e+00, 1.47703280e+00, 1.89788160e+00],
[1.59880600e+00, 1.54988980e+00, 1.57563350e+00],
[3.37247380e+00, 2.69635310e+00, 3.39981700e+00],
[3.13705120e+00, 3.36528090e+00, 3.06089070e+00],
[3.29413250e+00, 3.19619500e+00, 2.90700170e+00],
[2.65510510e+00, 3.06785900e+00, 2.97198540e+00],
[3.30941040e+00, 2.59283970e+00, 2.57714110e+00],
[2.59557220e+00, 3.33477370e+00, 3.08793190e+00],
[2.58206180e+00, 3.41615670e+00, 3.26441990e+00],
[2.71127000e+00, 2.77032450e+00, 2.63466500e+00],
[2.79617850e+00, 3.25473720e+00, 3.41801560e+00],
[2.64741750e+00, 2.54538040e+00, 3.25354110e+00]])
ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754.,
564., 138., 219., 869., 669.])
linkage_ytdist_single = array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]])
linkage_ytdist_complete = array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[1., 6., 400., 3.],
[0., 7., 412., 3.],
[8., 9., 996., 6.]])
linkage_ytdist_average = array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 333.5, 3.],
[1., 6., 347.5, 3.],
[8., 9., 680.77777778, 6.]])
linkage_ytdist_weighted = array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 333.5, 3.],
[1., 6., 347.5, 3.],
[8., 9., 670.125, 6.]])
# the optimal leaf ordering of linkage_ytdist_single
linkage_ytdist_single_olo = array([[5., 2., 138., 2.],
[4., 3., 219., 2.],
[7., 0., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]])
X = array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
linkage_X_centroid = array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.43614494, 4.],
[7., 9., 15.17363237, 6.]])
linkage_X_median = array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.43614494, 4.],
[7., 9., 15.17363237, 6.]])
linkage_X_ward = array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
# the optimal leaf ordering of linkage_X_ward
linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.],
[5., 1., 1.77045373, 2.],
[2., 0., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
inconsistent_ytdist = {
1: array([[138., 0., 1., 0.],
[219., 0., 1., 0.],
[255., 0., 1., 0.],
[268., 0., 1., 0.],
[295., 0., 1., 0.]]),
2: array([[138., 0., 1., 0.],
[219., 0., 1., 0.],
[237., 25.45584412, 2., 0.70710678],
[261.5, 9.19238816, 2., 0.70710678],
[233.66666667, 83.9424406, 3., 0.7306594]]),
3: array([[138., 0., 1., 0.],
[219., 0., 1., 0.],
[237., 25.45584412, 2., 0.70710678],
[247.33333333, 25.38372182, 3., 0.81417007],
[239., 69.36377537, 4., 0.80733783]]),
4: array([[138., 0., 1., 0.],
[219., 0., 1., 0.],
[237., 25.45584412, 2., 0.70710678],
[247.33333333, 25.38372182, 3., 0.81417007],
[235., 60.73302232, 5., 0.98793042]])}
fcluster_inconsistent = {
0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1])}
fcluster_distance = {
0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3,
1, 1, 1, 2, 1, 1, 1, 1, 1]),
1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1])}
fcluster_maxclust = {
8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4,
1, 1, 1, 3, 1, 1, 1, 1, 2]),
4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1])}
| 6,850
| 45.924658
| 78
|
py
|
scipy
|
scipy-main/scipy/odr/setup.py
|
from os.path import join
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
from scipy._build_utils import (uses_blas64, blas_ilp64_pre_build_hook,
combine_dict)
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f',
'd_lpk.f']
if uses_blas64():
blas_info = get_info('blas_ilp64_opt')
pre_build_hook = blas_ilp64_pre_build_hook(blas_info)
else:
blas_info = get_info('blas_opt')
pre_build_hook = None
odrpack_src = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=odrpack_src,
_pre_build_hook=pre_build_hook)
sources = ['__odrpack.c']
cfg = combine_dict(blas_info, numpy_nodepr_api,
libraries=['odrpack'],
include_dirs=['.'])
ext = config.add_extension('__odrpack',
sources=sources,
depends=(['odrpack.h'] + odrpack_src),
**cfg
)
ext._pre_build_hook = pre_build_hook
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,460
| 29.4375
| 75
|
py
|
scipy
|
scipy-main/scipy/odr/odrpack.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.odr` namespace for importing the functions
# included below.
import warnings
from . import _odrpack
__all__ = [ # noqa: F822
'odr', 'OdrWarning', 'OdrError', 'OdrStop',
'Data', 'RealData', 'Model', 'Output', 'ODR',
'odr_error', 'odr_stop'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.odr.odrpack is deprecated and has no attribute "
f"{name}. Try looking in scipy.odr instead.")
warnings.warn(f"Please use `{name}` from the `scipy.odr` namespace, "
"the `scipy.odr.odrpack` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_odrpack, name)
| 837
| 26.933333
| 76
|
py
|
scipy
|
scipy-main/scipy/odr/_odrpack.py
|
"""
Python wrappers for Orthogonal Distance Regression (ODRPACK).
Notes
=====
* Array formats -- FORTRAN stores its arrays in memory column first, i.e., an
array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently,
NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For
efficiency and convenience, the input and output arrays of the fitting
function (and its Jacobians) are passed to FORTRAN without transposition.
Therefore, where the ODRPACK documentation says that the X array is of shape
(N, M), it will be passed to the Python function as an array of shape (M, N).
If M==1, the 1-D case, then nothing matters; if M>1, then your
Python functions will be dealing with arrays that are indexed in reverse of
the ODRPACK documentation. No real issue, but watch out for your indexing of
the Jacobians: the i,jth elements (@f_i/@x_j) evaluated at the nth
observation will be returned as jacd[j, i, n]. Except for the Jacobians, it
really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course,
you can always use the transpose() function from SciPy explicitly.
* Examples -- See the accompanying file test/test.py for examples of how to set
up fits of your own. Some are taken from the User's Guide; some are from
other sources.
* Models -- Some common models are instantiated in the accompanying module
models.py . Contributions are welcome.
Credits
=======
* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs.
Robert Kern
robert.kern@gmail.com
"""
import os
import numpy
from warnings import warn
from scipy.odr import __odrpack
__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop',
'Data', 'RealData', 'Model', 'Output', 'ODR',
'odr_error', 'odr_stop']
odr = __odrpack.odr
class OdrWarning(UserWarning):
"""
Warning indicating that the data passed into
ODR will cause problems when passed into 'odr'
that the user should be aware of.
"""
pass
class OdrError(Exception):
"""
Exception indicating an error in fitting.
This is raised by `~scipy.odr.odr` if an error occurs during fitting.
"""
pass
class OdrStop(Exception):
"""
Exception stopping fitting.
You can raise this exception in your objective function to tell
`~scipy.odr.odr` to stop fitting.
"""
pass
# Backwards compatibility
odr_error = OdrError
odr_stop = OdrStop
__odrpack._set_exceptions(OdrError, OdrStop)
def _conv(obj, dtype=None):
""" Convert an object to the preferred form for input to the odr routine.
"""
if obj is None:
return obj
else:
if dtype is None:
obj = numpy.asarray(obj)
else:
obj = numpy.asarray(obj, dtype)
if obj.shape == ():
# Scalar.
return obj.dtype.type(obj)
else:
return obj
def _report_error(info):
""" Interprets the return code of the odr routine.
Parameters
----------
info : int
The return code of the odr routine.
Returns
-------
problems : list(str)
A list of messages about why the odr() routine stopped.
"""
stopreason = ('Blank',
'Sum of squares convergence',
'Parameter convergence',
'Both sum of squares and parameter convergence',
'Iteration limit reached')[info % 5]
if info >= 5:
# questionable results or fatal error
I = (info//10000 % 10,
info//1000 % 10,
info//100 % 10,
info//10 % 10,
info % 10)
problems = []
if I[0] == 0:
if I[1] != 0:
problems.append('Derivatives possibly not correct')
if I[2] != 0:
problems.append('Error occurred in callback')
if I[3] != 0:
problems.append('Problem is not full rank at solution')
problems.append(stopreason)
elif I[0] == 1:
if I[1] != 0:
problems.append('N < 1')
if I[2] != 0:
problems.append('M < 1')
if I[3] != 0:
problems.append('NP < 1 or NP > N')
if I[4] != 0:
problems.append('NQ < 1')
elif I[0] == 2:
if I[1] != 0:
problems.append('LDY and/or LDX incorrect')
if I[2] != 0:
problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')
if I[3] != 0:
problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')
if I[4] != 0:
problems.append('LWORK and/or LIWORK too small')
elif I[0] == 3:
if I[1] != 0:
problems.append('STPB and/or STPD incorrect')
if I[2] != 0:
problems.append('SCLB and/or SCLD incorrect')
if I[3] != 0:
problems.append('WE incorrect')
if I[4] != 0:
problems.append('WD incorrect')
elif I[0] == 4:
problems.append('Error in derivatives')
elif I[0] == 5:
problems.append('Error occurred in callback')
elif I[0] == 6:
problems.append('Numerical error detected')
return problems
else:
return [stopreason]
class Data:
"""
The data to fit.
Parameters
----------
x : array_like
Observed data for the independent variable of the regression
y : array_like, optional
If array-like, observed data for the dependent variable of the
regression. A scalar input implies that the model to be used on
the data is implicit.
we : array_like, optional
If `we` is a scalar, then that value is used for all data points (and
all dimensions of the response variable).
If `we` is a rank-1 array of length q (the dimensionality of the
response variable), then this vector is the diagonal of the covariant
weighting matrix for all data points.
If `we` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th response variable
observation (single-dimensional only).
If `we` is a rank-2 array of shape (q, q), then this is the full
covariant weighting matrix broadcast to each observation.
If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
diagonal of the covariant weighting matrix for the i'th observation.
If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
If the fit is implicit, then only a positive scalar value is used.
wd : array_like, optional
If `wd` is a scalar, then that value is used for all data points
(and all dimensions of the input variable). If `wd` = 0, then the
covariant weighting matrix for each observation is set to the identity
matrix (so each dimension of each observation has the same weight).
If `wd` is a rank-1 array of length m (the dimensionality of the input
variable), then this vector is the diagonal of the covariant weighting
matrix for all data points.
If `wd` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the ith input variable observation
(single-dimensional only).
If `wd` is a rank-2 array of shape (m, m), then this is the full
covariant weighting matrix broadcast to each observation.
If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the
diagonal of the covariant weighting matrix for the ith observation.
If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
fix : array_like of ints, optional
The `fix` argument is the same as ifixx in the class ODR. It is an
array of integers with the same shape as data.x that determines which
input observations are treated as fixed. One can use a sequence of
length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
meta : dict, optional
Free-form dictionary for metadata.
Notes
-----
Each argument is attached to the member of the instance of the same name.
The structures of `x` and `y` are described in the Model class docstring.
If `y` is an integer, then the Data instance can only be used to fit with
implicit models where the dimensionality of the response is equal to the
specified value of `y`.
The `we` argument weights the effect a deviation in the response variable
has on the fit. The `wd` argument weights the effect a deviation in the
input variable has on the fit. To handle multidimensional inputs and
responses easily, the structure of these arguments has the n'th
dimensional axis first. These arguments heavily use the structured
arguments feature of ODRPACK to conveniently and flexibly support all
options. See the ODRPACK User's Guide for a full explanation of how these
weights are used in the algorithm. Basically, a higher value of the weight
for a particular data point makes a deviation at that point more
detrimental to the fit.
"""
def __init__(self, x, y=None, we=None, wd=None, fix=None, meta=None):
self.x = _conv(x)
if not isinstance(self.x, numpy.ndarray):
raise ValueError(("Expected an 'ndarray' of data for 'x', "
"but instead got data of type '{name}'").format(
name=type(self.x).__name__))
self.y = _conv(y)
self.we = _conv(we)
self.wd = _conv(wd)
self.fix = _conv(fix)
self.meta = {} if meta is None else meta
def set_meta(self, **kwds):
""" Update the metadata dictionary with the keywords and data provided
by keywords.
Examples
--------
::
data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay")
"""
self.meta.update(kwds)
def __getattr__(self, attr):
""" Dispatch attribute access to the metadata dictionary.
"""
if attr in self.meta:
return self.meta[attr]
else:
raise AttributeError("'%s' not in metadata" % attr)
class RealData(Data):
"""
The data, with weightings as actual standard deviations and/or
covariances.
Parameters
----------
x : array_like
Observed data for the independent variable of the regression
y : array_like, optional
If array-like, observed data for the dependent variable of the
regression. A scalar input implies that the model to be used on
the data is implicit.
sx : array_like, optional
Standard deviations of `x`.
`sx` are standard deviations of `x` and are converted to weights by
dividing 1.0 by their squares.
sy : array_like, optional
Standard deviations of `y`.
`sy` are standard deviations of `y` and are converted to weights by
dividing 1.0 by their squares.
covx : array_like, optional
Covariance of `x`
`covx` is an array of covariance matrices of `x` and are converted to
weights by performing a matrix inversion on each observation's
covariance matrix.
covy : array_like, optional
Covariance of `y`
`covy` is an array of covariance matrices and are converted to
weights by performing a matrix inversion on each observation's
covariance matrix.
fix : array_like, optional
The argument and member fix is the same as Data.fix and ODR.ifixx:
It is an array of integers with the same shape as `x` that
determines which input observations are treated as fixed. One can
use a sequence of length m (the dimensionality of the input
observations) to fix some dimensions for all observations. A value
of 0 fixes the observation, a value > 0 makes it free.
meta : dict, optional
Free-form dictionary for metadata.
Notes
-----
The weights `wd` and `we` are computed from provided values as follows:
`sx` and `sy` are converted to weights by dividing 1.0 by their squares.
For example, ``wd = 1./numpy.power(`sx`, 2)``.
`covx` and `covy` are arrays of covariance matrices and are converted to
weights by performing a matrix inversion on each observation's covariance
matrix. For example, ``we[i] = numpy.linalg.inv(covy[i])``.
These arguments follow the same structured argument conventions as wd and
we only restricted by their natures: `sx` and `sy` can't be rank-3, but
`covx` and `covy` can be.
Only set *either* `sx` or `covx` (not both). Setting both will raise an
exception. Same with `sy` and `covy`.
"""
def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None,
fix=None, meta=None):
if (sx is not None) and (covx is not None):
raise ValueError("cannot set both sx and covx")
if (sy is not None) and (covy is not None):
raise ValueError("cannot set both sy and covy")
# Set flags for __getattr__
self._ga_flags = {}
if sx is not None:
self._ga_flags['wd'] = 'sx'
else:
self._ga_flags['wd'] = 'covx'
if sy is not None:
self._ga_flags['we'] = 'sy'
else:
self._ga_flags['we'] = 'covy'
self.x = _conv(x)
if not isinstance(self.x, numpy.ndarray):
raise ValueError(("Expected an 'ndarray' of data for 'x', "
"but instead got data of type '{name}'").format(
name=type(self.x).__name__))
self.y = _conv(y)
self.sx = _conv(sx)
self.sy = _conv(sy)
self.covx = _conv(covx)
self.covy = _conv(covy)
self.fix = _conv(fix)
self.meta = {} if meta is None else meta
def _sd2wt(self, sd):
""" Convert standard deviation to weights.
"""
return 1./numpy.power(sd, 2)
def _cov2wt(self, cov):
""" Convert covariance matrix(-ices) to weights.
"""
from scipy.linalg import inv
if len(cov.shape) == 2:
return inv(cov)
else:
weights = numpy.zeros(cov.shape, float)
for i in range(cov.shape[-1]): # n
weights[:,:,i] = inv(cov[:,:,i])
return weights
def __getattr__(self, attr):
lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx),
('wd', 'covx'): (self._cov2wt, self.covx),
('we', 'sy'): (self._sd2wt, self.sy),
('we', 'covy'): (self._cov2wt, self.covy)}
if attr not in ('wd', 'we'):
if attr in self.meta:
return self.meta[attr]
else:
raise AttributeError("'%s' not in metadata" % attr)
else:
func, arg = lookup_tbl[(attr, self._ga_flags[attr])]
if arg is not None:
return func(*(arg,))
else:
return None
class Model:
"""
The Model class stores information about the function you wish to fit.
It stores the function itself, at the least, and optionally stores
functions which compute the Jacobians used during fitting. Also, one
can provide a function that will provide reasonable starting values
for the fit parameters possibly given the set of data.
Parameters
----------
fcn : function
fcn(beta, x) --> y
fjacb : function
Jacobian of fcn wrt the fit parameters beta.
fjacb(beta, x) --> @f_i(x,B)/@B_j
fjacd : function
Jacobian of fcn wrt the (possibly multidimensional) input
variable.
fjacd(beta, x) --> @f_i(x,B)/@x_j
extra_args : tuple, optional
If specified, `extra_args` should be a tuple of extra
arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called
by `apply(fcn, (beta, x) + extra_args)`
estimate : array_like of rank-1
Provides estimates of the fit parameters from the data
estimate(data) --> estbeta
implicit : boolean
If TRUE, specifies that the model
is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit
against
meta : dict, optional
freeform dictionary of metadata for the model
Notes
-----
Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and
return a NumPy array. The `estimate` object takes an instance of the
Data class.
Here are the rules for the shapes of the argument and return
arrays of the callback functions:
`x`
if the input data is single-dimensional, then `x` is rank-1
array; i.e., ``x = array([1, 2, 3, ...]); x.shape = (n,)``
If the input data is multi-dimensional, then `x` is a rank-2 array;
i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``.
In all cases, it has the same shape as the input data array passed to
`~scipy.odr.odr`. `m` is the dimensionality of the input data,
`n` is the number of observations.
`y`
if the response variable is single-dimensional, then `y` is a
rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``.
If the response variable is multi-dimensional, then `y` is a rank-2
array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape =
(q, n)`` where `q` is the dimensionality of the response variable.
`beta`
rank-1 array of length `p` where `p` is the number of parameters;
i.e. ``beta = array([B_1, B_2, ..., B_p])``
`fjacb`
if the response variable is multi-dimensional, then the
return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] =
d f_l(X,B)/d B_k`` evaluated at the ith data point. If `q == 1`, then
the return array is only rank-2 and with shape `(p, n)`.
`fjacd`
as with fjacb, only the return array's shape is `(q, m, n)`
such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the ith data
point. If `q == 1`, then the return array's shape is `(m, n)`. If
`m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`.
"""
def __init__(self, fcn, fjacb=None, fjacd=None,
extra_args=None, estimate=None, implicit=0, meta=None):
self.fcn = fcn
self.fjacb = fjacb
self.fjacd = fjacd
if extra_args is not None:
extra_args = tuple(extra_args)
self.extra_args = extra_args
self.estimate = estimate
self.implicit = implicit
self.meta = meta if meta is not None else {}
def set_meta(self, **kwds):
""" Update the metadata dictionary with the keywords and data provided
here.
Examples
--------
set_meta(name="Exponential", equation="y = a exp(b x) + c")
"""
self.meta.update(kwds)
def __getattr__(self, attr):
""" Dispatch attribute access to the metadata.
"""
if attr in self.meta:
return self.meta[attr]
else:
raise AttributeError("'%s' not in metadata" % attr)
class Output:
"""
The Output class stores the output of an ODR run.
Attributes
----------
beta : ndarray
Estimated parameter values, of shape (q,).
sd_beta : ndarray
Standard deviations of the estimated parameters, of shape (p,).
cov_beta : ndarray
Covariance matrix of the estimated parameters, of shape (p,p).
Note that this `cov_beta` is not scaled by the residual variance
`res_var`, whereas `sd_beta` is. This means
``np.sqrt(np.diag(output.cov_beta * output.res_var))`` is the same
result as `output.sd_beta`.
delta : ndarray, optional
Array of estimated errors in input variables, of same shape as `x`.
eps : ndarray, optional
Array of estimated errors in response variables, of same shape as `y`.
xplus : ndarray, optional
Array of ``x + delta``.
y : ndarray, optional
Array ``y = fcn(x + delta)``.
res_var : float, optional
Residual variance.
sum_square : float, optional
Sum of squares error.
sum_square_delta : float, optional
Sum of squares of delta error.
sum_square_eps : float, optional
Sum of squares of eps error.
inv_condnum : float, optional
Inverse condition number (cf. ODRPACK UG p. 77).
rel_error : float, optional
Relative error in function values computed within fcn.
work : ndarray, optional
Final work array.
work_ind : dict, optional
Indices into work for drawing out values (cf. ODRPACK UG p. 83).
info : int, optional
Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38).
stopreason : list of str, optional
`info` interpreted into English.
Notes
-----
Takes one argument for initialization, the return value from the
function `~scipy.odr.odr`. The attributes listed as "optional" above are
only present if `~scipy.odr.odr` was run with ``full_output=1``.
"""
def __init__(self, output):
self.beta = output[0]
self.sd_beta = output[1]
self.cov_beta = output[2]
if len(output) == 4:
# full output
self.__dict__.update(output[3])
self.stopreason = _report_error(self.info)
def pprint(self):
""" Pretty-print important results.
"""
print('Beta:', self.beta)
print('Beta Std Error:', self.sd_beta)
print('Beta Covariance:', self.cov_beta)
if hasattr(self, 'info'):
print('Residual Variance:',self.res_var)
print('Inverse Condition #:', self.inv_condnum)
print('Reason(s) for Halting:')
for r in self.stopreason:
print(' %s' % r)
class ODR:
"""
The ODR class gathers all information and coordinates the running of the
main fitting routine.
Members of instances of the ODR class have the same names as the arguments
to the initialization routine.
Parameters
----------
data : Data class instance
instance of the Data class
model : Model class instance
instance of the Model class
Other Parameters
----------------
beta0 : array_like of rank-1
a rank-1 sequence of initial parameter values. Optional if
model provides an "estimate" function to estimate these values.
delta0 : array_like of floats of rank-1, optional
a (double-precision) float array to hold the initial values of
the errors in the input variables. Must be same shape as data.x
ifixb : array_like of ints of rank-1, optional
sequence of integers with the same length as beta0 that determines
which parameters are held fixed. A value of 0 fixes the parameter,
a value > 0 makes the parameter free.
ifixx : array_like of ints with same shape as data.x, optional
an array of integers with the same shape as data.x that determines
which input observations are treated as fixed. One can use a sequence
of length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
job : int, optional
an integer telling ODRPACK what tasks to perform. See p. 31 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_job post-initialization for a more readable interface.
iprint : int, optional
an integer telling ODRPACK what to print. See pp. 33-34 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_iprint post-initialization for a more readable interface.
errfile : str, optional
string with the filename to print ODRPACK errors to. If the file already
exists, an error will be thrown. The `overwrite` argument can be used to
prevent this. *Do Not Open This File Yourself!*
rptfile : str, optional
string with the filename to print ODRPACK summaries to. If the file
already exists, an error will be thrown. The `overwrite` argument can be
used to prevent this. *Do Not Open This File Yourself!*
ndigit : int, optional
integer specifying the number of reliable digits in the computation
of the function.
taufac : float, optional
float specifying the initial trust region. The default value is 1.
The initial trust region is equal to taufac times the length of the
first computed Gauss-Newton step. taufac must be less than 1.
sstol : float, optional
float specifying the tolerance for convergence based on the relative
change in the sum-of-squares. The default value is eps**(1/2) where eps
is the smallest value such that 1 + eps > 1 for double precision
computation on the machine. sstol must be less than 1.
partol : float, optional
float specifying the tolerance for convergence based on the relative
change in the estimated parameters. The default value is eps**(2/3) for
explicit models and ``eps**(1/3)`` for implicit models. partol must be less
than 1.
maxit : int, optional
integer specifying the maximum number of iterations to perform. For
first runs, maxit is the total number of iterations performed and
defaults to 50. For restarts, maxit is the number of additional
iterations to perform and defaults to 10.
stpb : array_like, optional
sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute
finite difference derivatives wrt the parameters.
stpd : optional
array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative
step sizes to compute finite difference derivatives wrt the input
variable errors. If stpd is a rank-1 array with length m (the
dimensionality of the input variable), then the values are broadcast to
all observations.
sclb : array_like, optional
sequence (``len(stpb) == len(beta0)``) of scaling factors for the
parameters. The purpose of these scaling factors are to scale all of
the parameters to around unity. Normally appropriate scaling factors
are computed if this argument is not specified. Specify them yourself
if the automatic procedure goes awry.
scld : array_like, optional
array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling
factors for the *errors* in the input variables. Again, these factors
are automatically computed if you do not provide them. If scld.shape ==
(m,), then the scaling factors are broadcast to all observations.
work : ndarray, optional
array to hold the double-valued working data for ODRPACK. When
restarting, takes the value of self.output.work.
iwork : ndarray, optional
array to hold the integer-valued working data for ODRPACK. When
restarting, takes the value of self.output.iwork.
overwrite : bool, optional
If it is True, output files defined by `errfile` and `rptfile` are
overwritten. The default is False.
Attributes
----------
data : Data
The data for this fit
model : Model
The model used in fit
output : Output
An instance if the Output class containing all of the returned
data from an invocation of ODR.run() or ODR.restart()
"""
def __init__(self, data, model, beta0=None, delta0=None, ifixb=None,
ifixx=None, job=None, iprint=None, errfile=None, rptfile=None,
ndigit=None, taufac=None, sstol=None, partol=None, maxit=None,
stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None,
overwrite=False):
self.data = data
self.model = model
if beta0 is None:
if self.model.estimate is not None:
self.beta0 = _conv(self.model.estimate(self.data))
else:
raise ValueError(
"must specify beta0 or provide an estimater with the model"
)
else:
self.beta0 = _conv(beta0)
if ifixx is None and data.fix is not None:
ifixx = data.fix
if overwrite:
# remove output files for overwriting.
if rptfile is not None and os.path.exists(rptfile):
os.remove(rptfile)
if errfile is not None and os.path.exists(errfile):
os.remove(errfile)
self.delta0 = _conv(delta0)
# These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit
# platforms.
# XXX: some other FORTRAN compilers may not agree.
self.ifixx = _conv(ifixx, dtype=numpy.int32)
self.ifixb = _conv(ifixb, dtype=numpy.int32)
self.job = job
self.iprint = iprint
self.errfile = errfile
self.rptfile = rptfile
self.ndigit = ndigit
self.taufac = taufac
self.sstol = sstol
self.partol = partol
self.maxit = maxit
self.stpb = _conv(stpb)
self.stpd = _conv(stpd)
self.sclb = _conv(sclb)
self.scld = _conv(scld)
self.work = _conv(work)
self.iwork = _conv(iwork)
self.output = None
self._check()
def _check(self):
""" Check the inputs for consistency, but don't bother checking things
that the builtin function odr will check.
"""
x_s = list(self.data.x.shape)
if isinstance(self.data.y, numpy.ndarray):
y_s = list(self.data.y.shape)
if self.model.implicit:
raise OdrError("an implicit model cannot use response data")
else:
# implicit model with q == self.data.y
y_s = [self.data.y, x_s[-1]]
if not self.model.implicit:
raise OdrError("an explicit model needs response data")
self.set_job(fit_type=1)
if x_s[-1] != y_s[-1]:
raise OdrError("number of observations do not match")
n = x_s[-1]
if len(x_s) == 2:
m = x_s[0]
else:
m = 1
if len(y_s) == 2:
q = y_s[0]
else:
q = 1
p = len(self.beta0)
# permissible output array shapes
fcn_perms = [(q, n)]
fjacd_perms = [(q, m, n)]
fjacb_perms = [(q, p, n)]
if q == 1:
fcn_perms.append((n,))
fjacd_perms.append((m, n))
fjacb_perms.append((p, n))
if m == 1:
fjacd_perms.append((q, n))
if p == 1:
fjacb_perms.append((q, n))
if m == q == 1:
fjacd_perms.append((n,))
if p == q == 1:
fjacb_perms.append((n,))
# try evaluating the supplied functions to make sure they provide
# sensible outputs
arglist = (self.beta0, self.data.x)
if self.model.extra_args is not None:
arglist = arglist + self.model.extra_args
res = self.model.fcn(*arglist)
if res.shape not in fcn_perms:
print(res.shape)
print(fcn_perms)
raise OdrError("fcn does not output %s-shaped array" % y_s)
if self.model.fjacd is not None:
res = self.model.fjacd(*arglist)
if res.shape not in fjacd_perms:
raise OdrError(
"fjacd does not output %s-shaped array" % repr((q, m, n)))
if self.model.fjacb is not None:
res = self.model.fjacb(*arglist)
if res.shape not in fjacb_perms:
raise OdrError(
"fjacb does not output %s-shaped array" % repr((q, p, n)))
# check shape of delta0
if self.delta0 is not None and self.delta0.shape != self.data.x.shape:
raise OdrError(
"delta0 is not a %s-shaped array" % repr(self.data.x.shape))
if self.data.x.size == 0:
warn(("Empty data detected for ODR instance. "
"Do not expect any fitting to occur"),
OdrWarning)
def _gen_work(self):
""" Generate a suitable work array if one does not already exist.
"""
n = self.data.x.shape[-1]
p = self.beta0.shape[0]
if len(self.data.x.shape) == 2:
m = self.data.x.shape[0]
else:
m = 1
if self.model.implicit:
q = self.data.y
elif len(self.data.y.shape) == 2:
q = self.data.y.shape[0]
else:
q = 1
if self.data.we is None:
ldwe = ld2we = 1
elif len(self.data.we.shape) == 3:
ld2we, ldwe = self.data.we.shape[1:]
else:
we = self.data.we
ldwe = 1
ld2we = 1
if we.ndim == 1 and q == 1:
ldwe = n
elif we.ndim == 2:
if we.shape == (q, q):
ld2we = q
elif we.shape == (q, n):
ldwe = n
if self.job % 10 < 2:
# ODR not OLS
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p +
2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q)
else:
# OLS not ODR
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p +
5*q + q*(p+m) + ldwe*ld2we*q)
if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\
and self.work.dtype.str.endswith('f8'):
# the existing array is fine
return
else:
self.work = numpy.zeros((lwork,), float)
def set_job(self, fit_type=None, deriv=None, var_calc=None,
del_init=None, restart=None):
"""
Sets the "job" parameter is a hopefully comprehensible way.
If an argument is not specified, then the value is left as is. The
default value from class initialization is for all of these options set
to 0.
Parameters
----------
fit_type : {0, 1, 2} int
0 -> explicit ODR
1 -> implicit ODR
2 -> ordinary least-squares
deriv : {0, 1, 2, 3} int
0 -> forward finite differences
1 -> central finite differences
2 -> user-supplied derivatives (Jacobians) with results
checked by ODRPACK
3 -> user-supplied derivatives, no checking
var_calc : {0, 1, 2} int
0 -> calculate asymptotic covariance matrix and fit
parameter uncertainties (V_B, s_B) using derivatives
recomputed at the final solution
1 -> calculate V_B and s_B using derivatives from last iteration
2 -> do not calculate V_B and s_B
del_init : {0, 1} int
0 -> initial input variable offsets set to 0
1 -> initial offsets provided by user in variable "work"
restart : {0, 1} int
0 -> fit is not a restart
1 -> fit is a restart
Notes
-----
The permissible values are different from those given on pg. 31 of the
ODRPACK User's Guide only in that one cannot specify numbers greater than
the last value for each variable.
If one does not supply functions to compute the Jacobians, the fitting
procedure will change deriv to 0, finite differences, as a default. To
initialize the input variable offsets by yourself, set del_init to 1 and
put the offsets into the "work" variable correctly.
"""
if self.job is None:
job_l = [0, 0, 0, 0, 0]
else:
job_l = [self.job // 10000 % 10,
self.job // 1000 % 10,
self.job // 100 % 10,
self.job // 10 % 10,
self.job % 10]
if fit_type in (0, 1, 2):
job_l[4] = fit_type
if deriv in (0, 1, 2, 3):
job_l[3] = deriv
if var_calc in (0, 1, 2):
job_l[2] = var_calc
if del_init in (0, 1):
job_l[1] = del_init
if restart in (0, 1):
job_l[0] = restart
self.job = (job_l[0]*10000 + job_l[1]*1000 +
job_l[2]*100 + job_l[3]*10 + job_l[4])
def set_iprint(self, init=None, so_init=None,
iter=None, so_iter=None, iter_step=None, final=None, so_final=None):
""" Set the iprint parameter for the printing of computation reports.
If any of the arguments are specified here, then they are set in the
iprint member. If iprint is not set manually or with this method, then
ODRPACK defaults to no printing. If no filename is specified with the
member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to
print to stdout in addition to the specified filename by setting the
so_* arguments to this function, but one cannot specify to print to
stdout but not a file since one can do that by not specifying a rptfile
filename.
There are three reports: initialization, iteration, and final reports.
They are represented by the arguments init, iter, and final
respectively. The permissible values are 0, 1, and 2 representing "no
report", "short report", and "long report" respectively.
The argument iter_step (0 <= iter_step <= 9) specifies how often to make
the iteration report; the report will be made for every iter_step'th
iteration starting with iteration one. If iter_step == 0, then no
iteration report is made, regardless of the other arguments.
If the rptfile is None, then any so_* arguments supplied will raise an
exception.
"""
if self.iprint is None:
self.iprint = 0
ip = [self.iprint // 1000 % 10,
self.iprint // 100 % 10,
self.iprint // 10 % 10,
self.iprint % 10]
# make a list to convert iprint digits to/from argument inputs
# rptfile, stdout
ip2arg = [[0, 0], # none, none
[1, 0], # short, none
[2, 0], # long, none
[1, 1], # short, short
[2, 1], # long, short
[1, 2], # short, long
[2, 2]] # long, long
if (self.rptfile is None and
(so_init is not None or
so_iter is not None or
so_final is not None)):
raise OdrError(
"no rptfile specified, cannot output to stdout twice")
iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]]
if init is not None:
iprint_l[0] = init
if so_init is not None:
iprint_l[1] = so_init
if iter is not None:
iprint_l[2] = iter
if so_iter is not None:
iprint_l[3] = so_iter
if final is not None:
iprint_l[4] = final
if so_final is not None:
iprint_l[5] = so_final
if iter_step in range(10):
# 0..9
ip[2] = iter_step
ip[0] = ip2arg.index(iprint_l[0:2])
ip[1] = ip2arg.index(iprint_l[2:4])
ip[3] = ip2arg.index(iprint_l[4:6])
self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3]
def run(self):
""" Run the fitting routine with all of the information given and with ``full_output=1``.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
"""
args = (self.model.fcn, self.beta0, self.data.y, self.data.x)
kwds = {'full_output': 1}
kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile',
'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb',
'stpd', 'sclb', 'scld', 'work', 'iwork']
if self.delta0 is not None and (self.job // 10000) % 10 == 0:
# delta0 provided and fit is not a restart
self._gen_work()
d0 = numpy.ravel(self.delta0)
self.work[:len(d0)] = d0
# set the kwds from other objects explicitly
if self.model.fjacb is not None:
kwds['fjacb'] = self.model.fjacb
if self.model.fjacd is not None:
kwds['fjacd'] = self.model.fjacd
if self.data.we is not None:
kwds['we'] = self.data.we
if self.data.wd is not None:
kwds['wd'] = self.data.wd
if self.model.extra_args is not None:
kwds['extra_args'] = self.model.extra_args
# implicitly set kwds from self's members
for attr in kwd_l:
obj = getattr(self, attr)
if obj is not None:
kwds[attr] = obj
self.output = Output(odr(*args, **kwds))
return self.output
def restart(self, iter=None):
""" Restarts the run with iter more iterations.
Parameters
----------
iter : int, optional
ODRPACK's default for the number of new iterations is 10.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
"""
if self.output is None:
raise OdrError("cannot restart: run() has not been called before")
self.set_job(restart=1)
self.work = self.output.work
self.iwork = self.output.iwork
self.maxit = iter
return self.run()
| 42,457
| 35.823938
| 97
|
py
|
scipy
|
scipy-main/scipy/odr/_models.py
|
""" Collection of Model instances for use with the odrpack fitting package.
"""
import numpy as np
from scipy.odr._odrpack import Model
__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic',
'polynomial']
def _lin_fcn(B, x):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + (x*b).sum(axis=0)
def _lin_fjb(B, x):
a = np.ones(x.shape[-1], float)
res = np.concatenate((a, x.ravel()))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _lin_fjd(B, x):
b = B[1:]
b = np.repeat(b, (x.shape[-1],)*b.shape[-1], axis=0)
b.shape = x.shape
return b
def _lin_est(data):
# Eh. The answer is analytical, so just return all ones.
# Don't return zeros since that will interfere with
# ODRPACK's auto-scaling procedures.
if len(data.x.shape) == 2:
m = data.x.shape[0]
else:
m = 1
return np.ones((m + 1,), float)
def _poly_fcn(B, x, powers):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + np.sum(b * np.power(x, powers), axis=0)
def _poly_fjacb(B, x, powers):
res = np.concatenate((np.ones(x.shape[-1], float),
np.power(x, powers).flat))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _poly_fjacd(B, x, powers):
b = B[1:]
b.shape = (b.shape[0], 1)
b = b * powers
return np.sum(b * np.power(x, powers-1), axis=0)
def _exp_fcn(B, x):
return B[0] + np.exp(B[1] * x)
def _exp_fjd(B, x):
return B[1] * np.exp(B[1] * x)
def _exp_fjb(B, x):
res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x)))
res.shape = (2, x.shape[-1])
return res
def _exp_est(data):
# Eh.
return np.array([1., 1.])
class _MultilinearModel(Model):
r"""
Arbitrary-dimensional linear model
This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i`
Examples
--------
We can calculate orthogonal distance regression with an arbitrary
dimensional linear model:
>>> from scipy import odr
>>> import numpy as np
>>> x = np.linspace(0.0, 5.0)
>>> y = 10.0 + 5.0 * x
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.multilinear)
>>> output = odr_obj.run()
>>> print(output.beta)
[10. 5.]
"""
def __init__(self):
super().__init__(
_lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est,
meta={'name': 'Arbitrary-dimensional Linear',
'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]',
'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'})
multilinear = _MultilinearModel()
def polynomial(order):
"""
Factory function for a general polynomial model.
Parameters
----------
order : int or sequence
If an integer, it becomes the order of the polynomial to fit. If
a sequence of numbers, then these are the explicit powers in the
polynomial.
A constant term (power 0) is always included, so don't include 0.
Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)).
Returns
-------
polynomial : Model instance
Model instance.
Examples
--------
We can fit an input data using orthogonal distance regression (ODR) with
a polynomial model:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy import odr
>>> x = np.linspace(0.0, 5.0)
>>> y = np.sin(x)
>>> poly_model = odr.polynomial(3) # using third order polynomial model
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, poly_model)
>>> output = odr_obj.run() # running ODR fitting
>>> poly = np.poly1d(output.beta[::-1])
>>> poly_y = poly(x)
>>> plt.plot(x, y, label="input data")
>>> plt.plot(x, poly_y, label="polynomial ODR")
>>> plt.legend()
>>> plt.show()
"""
powers = np.asarray(order)
if powers.shape == ():
# Scalar.
powers = np.arange(1, powers + 1)
powers.shape = (len(powers), 1)
len_beta = len(powers) + 1
def _poly_est(data, len_beta=len_beta):
# Eh. Ignore data and return all ones.
return np.ones((len_beta,), float)
return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb,
estimate=_poly_est, extra_args=(powers,),
meta={'name': 'Sorta-general Polynomial',
'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1),
'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' %
(len_beta-1)})
class _ExponentialModel(Model):
r"""
Exponential model
This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}`
Examples
--------
We can calculate orthogonal distance regression with an exponential model:
>>> from scipy import odr
>>> import numpy as np
>>> x = np.linspace(0.0, 5.0)
>>> y = -10.0 + np.exp(0.5*x)
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.exponential)
>>> output = odr_obj.run()
>>> print(output.beta)
[-10. 0.5]
"""
def __init__(self):
super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb,
estimate=_exp_est,
meta={'name': 'Exponential',
'equ': 'y= B_0 + exp(B_1 * x)',
'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'})
exponential = _ExponentialModel()
def _unilin(B, x):
return x*B[0] + B[1]
def _unilin_fjd(B, x):
return np.ones(x.shape, float) * B[0]
def _unilin_fjb(B, x):
_ret = np.concatenate((x, np.ones(x.shape, float)))
_ret.shape = (2,) + x.shape
return _ret
def _unilin_est(data):
return (1., 1.)
def _quadratic(B, x):
return x*(x*B[0] + B[1]) + B[2]
def _quad_fjd(B, x):
return 2*x*B[0] + B[1]
def _quad_fjb(B, x):
_ret = np.concatenate((x*x, x, np.ones(x.shape, float)))
_ret.shape = (3,) + x.shape
return _ret
def _quad_est(data):
return (1.,1.,1.)
class _UnilinearModel(Model):
r"""
Univariate linear model
This model is defined by :math:`y = \beta_0 x + \beta_1`
Examples
--------
We can calculate orthogonal distance regression with an unilinear model:
>>> from scipy import odr
>>> import numpy as np
>>> x = np.linspace(0.0, 5.0)
>>> y = 1.0 * x + 2.0
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.unilinear)
>>> output = odr_obj.run()
>>> print(output.beta)
[1. 2.]
"""
def __init__(self):
super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb,
estimate=_unilin_est,
meta={'name': 'Univariate Linear',
'equ': 'y = B_0 * x + B_1',
'TeXequ': '$y = \\beta_0 x + \\beta_1$'})
unilinear = _UnilinearModel()
class _QuadraticModel(Model):
r"""
Quadratic model
This model is defined by :math:`y = \beta_0 x^2 + \beta_1 x + \beta_2`
Examples
--------
We can calculate orthogonal distance regression with a quadratic model:
>>> from scipy import odr
>>> import numpy as np
>>> x = np.linspace(0.0, 5.0)
>>> y = 1.0 * x ** 2 + 2.0 * x + 3.0
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.quadratic)
>>> output = odr_obj.run()
>>> print(output.beta)
[1. 2. 3.]
"""
def __init__(self):
super().__init__(
_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est,
meta={'name': 'Quadratic',
'equ': 'y = B_0*x**2 + B_1*x + B_2',
'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'})
quadratic = _QuadraticModel()
| 7,800
| 23.686709
| 78
|
py
|
scipy
|
scipy-main/scipy/odr/_add_newdocs.py
|
from numpy import add_newdoc
add_newdoc('scipy.odr', 'odr',
"""
odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None, ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0, taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None, full_output=0)
Low-level function for ODR.
See Also
--------
ODR : The ODR class gathers all information and coordinates the running of the main fitting routine.
Model : The Model class stores information about the function you wish to fit.
Data : The data to fit.
RealData : Data with weights as actual std. dev.s and/or covariances.
Notes
-----
This is a function performing the same operation as the `ODR`,
`Model`, and `Data` classes together. The parameters of this
function are explained in the class documentation.
""")
add_newdoc('scipy.odr.__odrpack', '_set_exceptions',
"""
_set_exceptions(odr_error, odr_stop)
Internal function: set exception classes.
""")
| 1,090
| 34.193548
| 292
|
py
|
scipy
|
scipy-main/scipy/odr/models.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.odr` namespace for importing the functions
# included below.
import warnings
from . import _models
__all__ = [ # noqa: F822
'Model', 'exponential', 'multilinear', 'unilinear',
'quadratic', 'polynomial'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.odr.models is deprecated and has no attribute "
f"{name}. Try looking in scipy.odr instead.")
warnings.warn(f"Please use `{name}` from the `scipy.odr` namespace, "
"the `scipy.odr.models` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_models, name)
| 793
| 26.37931
| 76
|
py
|
scipy
|
scipy-main/scipy/odr/__init__.py
|
"""
=================================================
Orthogonal distance regression (:mod:`scipy.odr`)
=================================================
.. currentmodule:: scipy.odr
Package Content
===============
.. autosummary::
:toctree: generated/
Data -- The data to fit.
RealData -- Data with weights as actual std. dev.s and/or covariances.
Model -- Stores information about the function to be fit.
ODR -- Gathers all info & manages the main fitting routine.
Output -- Result from the fit.
odr -- Low-level function for ODR.
OdrWarning -- Warning about potential problems when running ODR.
OdrError -- Error exception.
OdrStop -- Stop exception.
polynomial -- Factory function for a general polynomial model.
exponential -- Exponential model
multilinear -- Arbitrary-dimensional linear model
unilinear -- Univariate linear model
quadratic -- Quadratic model
Usage information
=================
Introduction
------------
Why Orthogonal Distance Regression (ODR)? Sometimes one has
measurement errors in the explanatory (a.k.a., "independent")
variable(s), not just the response (a.k.a., "dependent") variable(s).
Ordinary Least Squares (OLS) fitting procedures treat the data for
explanatory variables as fixed, i.e., not subject to error of any kind.
Furthermore, OLS procedures require that the response variables be an
explicit function of the explanatory variables; sometimes making the
equation explicit is impractical and/or introduces errors. ODR can
handle both of these cases with ease, and can even reduce to the OLS
case if that is sufficient for the problem.
ODRPACK is a FORTRAN-77 library for performing ODR with possibly
non-linear fitting functions. It uses a modified trust-region
Levenberg-Marquardt-type algorithm [1]_ to estimate the function
parameters. The fitting functions are provided by Python functions
operating on NumPy arrays. The required derivatives may be provided
by Python functions as well, or may be estimated numerically. ODRPACK
can do explicit or implicit ODR fits, or it can do OLS. Input and
output variables may be multidimensional. Weights can be provided to
account for different variances of the observations, and even
covariances between dimensions of the variables.
The `scipy.odr` package offers an object-oriented interface to
ODRPACK, in addition to the low-level `odr` function.
Additional background information about ODRPACK can be found in the
`ODRPACK User's Guide
<https://docs.scipy.org/doc/external/odrpack_guide.pdf>`_, reading
which is recommended.
Basic usage
-----------
1. Define the function you want to fit against.::
def f(B, x):
'''Linear function y = m*x + b'''
# B is a vector of the parameters.
# x is an array of the current x values.
# x is in the same format as the x passed to Data or RealData.
#
# Return an array in the same format as y passed to Data or RealData.
return B[0]*x + B[1]
2. Create a Model.::
linear = Model(f)
3. Create a Data or RealData instance.::
mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2))
or, when the actual covariances are known::
mydata = RealData(x, y, sx=sx, sy=sy)
4. Instantiate ODR with your data, model and initial parameter estimate.::
myodr = ODR(mydata, linear, beta0=[1., 2.])
5. Run the fit.::
myoutput = myodr.run()
6. Examine output.::
myoutput.pprint()
References
----------
.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression,"
in "Statistical analysis of measurement error models and
applications: proceedings of the AMS-IMS-SIAM joint summer research
conference held June 10-16, 1989," Contemporary Mathematics,
vol. 112, pg. 186, 1990.
"""
# version: 0.7
# author: Robert Kern <robert.kern@gmail.com>
# date: 2006-09-21
from ._odrpack import *
from ._models import *
from . import _add_newdocs
# Deprecated namespaces, to be removed in v2.0.0
from . import models, odrpack
__all__ = [s for s in dir()
if not (s.startswith('_') or s in ('odr_stop', 'odr_error'))]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 4,325
| 31.772727
| 80
|
py
|
scipy
|
scipy-main/scipy/odr/tests/test_odr.py
|
import tempfile
import shutil
import os
import numpy as np
from numpy import pi
from numpy.testing import (assert_array_almost_equal,
assert_equal, assert_warns)
import pytest
from pytest import raises as assert_raises
from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning,
multilinear, exponential, unilinear, quadratic,
polynomial)
class TestODR:
# Bad Data for 'x'
def test_bad_data(self):
assert_raises(ValueError, Data, 2, 1)
assert_raises(ValueError, RealData, 2, 1)
# Empty Data for 'x'
def empty_data_func(self, B, x):
return B[0]*x + B[1]
def test_empty_data(self):
beta0 = [0.02, 0.0]
linear = Model(self.empty_data_func)
empty_dat = Data([], [])
assert_warns(OdrWarning, ODR,
empty_dat, linear, beta0=beta0)
empty_dat = RealData([], [])
assert_warns(OdrWarning, ODR,
empty_dat, linear, beta0=beta0)
# Explicit Example
def explicit_fcn(self, B, x):
ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2)
return ret
def explicit_fjd(self, B, x):
eBx = np.exp(B[2]*x)
ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx
return ret
def explicit_fjb(self, B, x):
eBx = np.exp(B[2]*x)
res = np.vstack([np.ones(x.shape[-1]),
np.power(eBx-1.0, 2),
B[1]*2.0*(eBx-1.0)*eBx*x])
return res
def test_explicit(self):
explicit_mod = Model(
self.explicit_fcn,
fjacb=self.explicit_fjb,
fjacd=self.explicit_fjd,
meta=dict(name='Sample Explicit Model',
ref='ODRPACK UG, pg. 39'),
)
explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.],
[1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6,
1213.8,1215.5,1212.])
explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1],
ifixx=[0,0,1,1,1,1,1,1,1,1,1,0])
explicit_odr.set_job(deriv=2)
explicit_odr.set_iprint(init=0, iter=0, final=0)
out = explicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([1.2646548050648876e+03, -5.4018409956678255e+01,
-8.7849712165253724e-02]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[4.4949592379003039e-01, -3.7421976890364739e-01,
-8.0978217468468912e-04],
[-3.7421976890364739e-01, 1.0529686462751804e+00,
-1.9453521827942002e-03],
[-8.0978217468468912e-04, -1.9453521827942002e-03,
1.6827336938454476e-05]]),
)
# Implicit Example
def implicit_fcn(self, B, x):
return (B[2]*np.power(x[0]-B[0], 2) +
2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) +
B[4]*np.power(x[1]-B[1], 2) - 1.0)
def test_implicit(self):
implicit_mod = Model(
self.implicit_fcn,
implicit=1,
meta=dict(name='Sample Implicit Model',
ref='ODRPACK UG, pg. 49'),
)
implicit_dat = Data([
[0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28,
-0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44],
[-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32,
-6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]],
1,
)
implicit_odr = ODR(implicit_dat, implicit_mod,
beta0=[-1.0, -3.0, 0.09, 0.02, 0.08])
out = implicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354,
0.0162299708984738, 0.0797537982976416]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314,
0.0027500347539902, 0.0034962501532468]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[2.1089274602333052e+00, -1.9437686411979040e+00,
7.0263550868344446e-02, -4.7175267373474862e-02,
5.2515575927380355e-02],
[-1.9437686411979040e+00, 2.0481509222414456e+00,
-6.1600515853057307e-02, 4.6268827806232933e-02,
-5.8822307501391467e-02],
[7.0263550868344446e-02, -6.1600515853057307e-02,
2.8659542561579308e-03, -1.4628662260014491e-03,
1.4528860663055824e-03],
[-4.7175267373474862e-02, 4.6268827806232933e-02,
-1.4628662260014491e-03, 1.2855592885514335e-03,
-1.2692942951415293e-03],
[5.2515575927380355e-02, -5.8822307501391467e-02,
1.4528860663055824e-03, -1.2692942951415293e-03,
2.0778813389755596e-03]]),
)
# Multi-variable Example
def multi_fcn(self, B, x):
if (x < 0.0).any():
raise OdrStop
theta = pi*B[3]/2.
ctheta = np.cos(theta)
stheta = np.sin(theta)
omega = np.power(2.*pi*x*np.exp(-B[2]), B[3])
phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta))
r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) +
np.power(omega*stheta, 2)), -B[4])
ret = np.vstack([B[1] + r*np.cos(B[4]*phi),
r*np.sin(B[4]*phi)])
return ret
def test_multi(self):
multi_mod = Model(
self.multi_fcn,
meta=dict(name='Sample Multi-Response Model',
ref='ODRPACK UG, pg. 56'),
)
multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0,
700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0,
15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0])
multi_y = np.array([
[4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713,
3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984,
2.934, 2.876, 2.838, 2.798, 2.759],
[0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309,
0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218,
0.202, 0.182, 0.168, 0.153, 0.139],
])
n = len(multi_x)
multi_we = np.zeros((2, 2, n), dtype=float)
multi_ifixx = np.ones(n, dtype=int)
multi_delta = np.zeros(n, dtype=float)
multi_we[0,0,:] = 559.6
multi_we[1,0,:] = multi_we[0,1,:] = -1634.0
multi_we[1,1,:] = 8397.0
for i in range(n):
if multi_x[i] < 100.0:
multi_ifixx[i] = 0
elif multi_x[i] <= 150.0:
pass # defaults are fine
elif multi_x[i] <= 1000.0:
multi_delta[i] = 25.0
elif multi_x[i] <= 10000.0:
multi_delta[i] = 560.0
elif multi_x[i] <= 100000.0:
multi_delta[i] = 9500.0
else:
multi_delta[i] = 144000.0
if multi_x[i] == 100.0 or multi_x[i] == 150.0:
multi_we[:,:,i] = 0.0
multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2),
we=multi_we)
multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5],
delta0=multi_delta, ifixx=multi_ifixx)
multi_odr.set_job(deriv=1, del_init=1)
out = multi_odr.run()
assert_array_almost_equal(
out.beta,
np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978,
0.5101147161764654, 0.5173902330489161]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757,
0.0132642749596149, 0.0288529201353984]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406,
-0.0058700836512467, 0.011281212888768],
[0.0036159705923791, 0.0064793789429006, 0.0517610978353126,
-0.0051181304940204, 0.0130726943624117],
[0.0438637051470406, 0.0517610978353126, 0.5182263323095322,
-0.0563083340093696, 0.1269490939468611],
[-0.0058700836512467, -0.0051181304940204, -0.0563083340093696,
0.0066939246261263, -0.0140184391377962],
[0.011281212888768, 0.0130726943624117, 0.1269490939468611,
-0.0140184391377962, 0.0316733013820852]]),
)
# Pearson's Data
# K. Pearson, Philosophical Magazine, 2, 559 (1901)
def pearson_fcn(self, B, x):
return B[0] + B[1]*x
def test_pearson(self):
p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4])
p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5])
p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.])
p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04])
p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy)
# Reverse the data to test invariance of results
pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx)
p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit'))
p_odr = ODR(p_dat, p_mod, beta0=[1.,1.])
pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.])
out = p_odr.run()
assert_array_almost_equal(
out.beta,
np.array([5.4767400299231674, -0.4796082367610305]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.3590121690702467, 0.0706291186037444]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[0.0854275622946333, -0.0161807025443155],
[-0.0161807025443155, 0.003306337993922]]),
)
rout = pr_odr.run()
assert_array_almost_equal(
rout.beta,
np.array([11.4192022410781231, -2.0850374506165474]),
)
assert_array_almost_equal(
rout.sd_beta,
np.array([0.9820231665657161, 0.3070515616198911]),
)
assert_array_almost_equal(
rout.cov_beta,
np.array([[0.6391799462548782, -0.1955657291119177],
[-0.1955657291119177, 0.0624888159223392]]),
)
# Lorentz Peak
# The data is taken from one of the undergraduate physics labs I performed.
def lorentz(self, beta, x):
return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x -
beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0)))
def test_lorentz(self):
l_sy = np.array([.29]*18)
l_sx = np.array([.000972971,.000948268,.000707632,.000706679,
.000706074, .000703918,.000698955,.000456856,
.000455207,.000662717,.000654619,.000652694,
.000000859202,.00106589,.00106378,.00125483, .00140818,.00241839])
l_dat = RealData(
[3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608,
3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982,
3.6562, 3.62498, 3.55525, 3.41886],
[652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122,
957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5],
sx=l_sx,
sy=l_sy,
)
l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak'))
l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8))
out = l_odr.run()
assert_array_almost_equal(
out.beta,
np.array([1.4306780846149925e+03, 1.3390509034538309e-01,
3.7798193600109009e+00]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([7.3621186811330963e-01, 3.5068899941471650e-04,
2.4451209281408992e-04]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[2.4714409064597873e-01, -6.9067261911110836e-05,
-3.1236953270424990e-05],
[-6.9067261911110836e-05, 5.6077531517333009e-08,
3.6133261832722601e-08],
[-3.1236953270424990e-05, 3.6133261832722601e-08,
2.7261220025171730e-08]]),
)
def test_ticket_1253(self):
def linear(c, x):
return c[0]*x+c[1]
c = [2.0, 3.0]
x = np.linspace(0, 10)
y = linear(c, x)
model = Model(linear)
data = Data(x, y, wd=1.0, we=1.0)
job = ODR(data, model, beta0=[1.0, 1.0])
result = job.run()
assert_equal(result.info, 2)
# Verify fix for gh-9140
def test_ifixx(self):
x1 = [-2.01, -0.99, -0.001, 1.02, 1.98]
x2 = [3.98, 1.01, 0.001, 0.998, 4.01]
fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int)))
data = Data(np.vstack((x1, x2)), y=1, fix=fix)
model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True)
odr1 = ODR(data, model, beta0=np.array([1.]))
sol1 = odr1.run()
odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix)
sol2 = odr2.run()
assert_equal(sol1.beta, sol2.beta)
# verify bugfix for #11800 in #11802
def test_ticket_11800(self):
# parameters
beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5])
nr_measurements = 10
std_dev_x = 0.01
x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866,
-0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301],
[-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829,
0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]])
std_dev_y = 0.05
y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642,
0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929],
[0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536,
-0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]])
beta_solution = np.array([
2.62920235756665876536e+00, -1.26608484996299608838e+02, 1.29703572775403074502e+02,
-1.88560985401185465804e+00, 7.83834160771274923718e+01, -7.64124076838087091801e+01])
# model's function and Jacobians
def func(beta, x):
y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :]
y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :]
return np.vstack((y0, y1))
def df_dbeta_odr(beta, x):
nr_meas = np.shape(x)[1]
zeros = np.zeros(nr_meas)
ones = np.ones(nr_meas)
dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros])
dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]])
return np.stack((dy0, dy1))
def df_dx_odr(beta, x):
nr_meas = np.shape(x)[1]
ones = np.ones(nr_meas)
dy0 = np.array([beta[1] * ones, beta[2] * ones])
dy1 = np.array([beta[4] * ones, beta[5] * ones])
return np.stack((dy0, dy1))
# do measurements with errors in independent and dependent variables
x0_true = np.linspace(1, 10, nr_measurements)
x1_true = np.linspace(1, 10, nr_measurements)
x_true = np.array([x0_true, x1_true])
y_true = func(beta_true, x_true)
x_meas = x_true + x_error
y_meas = y_true + y_error
# estimate model's parameters
model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr)
data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y)
odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100)
#odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1)
odr_obj.set_job(deriv=3)
odr_out = odr_obj.run()
# check results
assert_equal(odr_out.info, 1)
assert_array_almost_equal(odr_out.beta, beta_solution)
def test_multilinear_model(self):
x = np.linspace(0.0, 5.0)
y = 10.0 + 5.0 * x
data = Data(x, y)
odr_obj = ODR(data, multilinear)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [10.0, 5.0])
def test_exponential_model(self):
x = np.linspace(0.0, 5.0)
y = -10.0 + np.exp(0.5*x)
data = Data(x, y)
odr_obj = ODR(data, exponential)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [-10.0, 0.5])
def test_polynomial_model(self):
x = np.linspace(0.0, 5.0)
y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3
poly_model = polynomial(3)
data = Data(x, y)
odr_obj = ODR(data, poly_model)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0])
def test_unilinear_model(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x + 2.0
data = Data(x, y)
odr_obj = ODR(data, unilinear)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [1.0, 2.0])
def test_quadratic_model(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x ** 2 + 2.0 * x + 3.0
data = Data(x, y)
odr_obj = ODR(data, quadratic)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0])
def test_work_ind(self):
def func(par, x):
b0, b1 = par
return b0 + b1 * x
# generate some data
n_data = 4
x = np.arange(n_data)
y = np.where(x % 2, x + 0.1, x - 0.1)
x_err = np.full(n_data, 0.1)
y_err = np.full(n_data, 0.1)
# do the fitting
linear_model = Model(func)
real_data = RealData(x, y, sx=x_err, sy=y_err)
odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4])
odr_obj.set_job(fit_type=0)
out = odr_obj.run()
sd_ind = out.work_ind['sd']
assert_array_almost_equal(out.sd_beta,
out.work[sd_ind:sd_ind + len(out.sd_beta)])
@pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better "
"not to run this test, see gh-13127")
def test_output_file_overwrite(self):
"""
Verify fix for gh-1892
"""
def func(b, x):
return b[0] + b[1] * x
p = Model(func)
data = Data(np.arange(10), 12 * np.arange(10))
tmp_dir = tempfile.mkdtemp()
error_file_path = os.path.join(tmp_dir, "error.dat")
report_file_path = os.path.join(tmp_dir, "report.dat")
try:
ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
rptfile=report_file_path).run()
ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
rptfile=report_file_path, overwrite=True).run()
finally:
# remove output files for clean up
shutil.rmtree(tmp_dir)
def test_odr_model_default_meta(self):
def func(b, x):
return b[0] + b[1] * x
p = Model(func)
p.set_meta(name='Sample Model Meta', ref='ODRPACK')
assert_equal(p.meta, {'name': 'Sample Model Meta', 'ref': 'ODRPACK'})
def test_work_array_del_init(self):
"""
Verify fix for gh-18739 where del_init=1 fails.
"""
def func(b, x):
return b[0] + b[1] * x
# generate some data
n_data = 4
x = np.arange(n_data)
y = np.where(x % 2, x + 0.1, x - 0.1)
x_err = np.full(n_data, 0.1)
y_err = np.full(n_data, 0.1)
linear_model = Model(func)
# Try various shapes of the `we` array from various `sy` and `covy`
rd0 = RealData(x, y, sx=x_err, sy=y_err)
rd1 = RealData(x, y, sx=x_err, sy=0.1)
rd2 = RealData(x, y, sx=x_err, sy=[0.1])
rd3 = RealData(x, y, sx=x_err, sy=np.full((1, n_data), 0.1))
rd4 = RealData(x, y, sx=x_err, covy=[[0.01]])
rd5 = RealData(x, y, sx=x_err, covy=np.full((1, 1, n_data), 0.01))
for rd in [rd0, rd1, rd2, rd3, rd4, rd5]:
odr_obj = ODR(rd, linear_model, beta0=[0.4, 0.4],
delta0=np.full(n_data, -0.1))
odr_obj.set_job(fit_type=0, del_init=1)
# Just make sure that it runs without raising an exception.
odr_obj.run()
| 20,931
| 36.179396
| 98
|
py
|
scipy
|
scipy-main/scipy/odr/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/stats/_continuous_distns.py
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import warnings
from collections.abc import Iterable
from functools import wraps, cached_property
import ctypes
import numpy as np
from numpy.polynomial import Polynomial
from scipy._lib.doccer import (extend_notes_in_docstring,
replace_notes_in_docstring,
inherit_docstring_from)
from scipy._lib._ccallback import LowLevelCallable
from scipy import optimize
from scipy import integrate
import scipy.special as sc
import scipy.special._ufuncs as scu
from scipy._lib._util import _lazyselect, _lazywhere
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (
get_distribution_names, _kurtosis,
rv_continuous, _skew, _get_fixed_fit_value, _check_shape, _ShapeInfo)
from ._ksstats import kolmogn, kolmognp, kolmogni
from ._constants import (_XMIN, _LOGXMIN, _EULER, _ZETA3, _SQRT_PI,
_SQRT_2_OVER_PI, _LOG_SQRT_2_OVER_PI)
from ._censored_data import CensoredData
import scipy.stats._boost as _boost
from scipy.optimize import root_scalar
from scipy.stats._warnings_errors import FitError
import scipy.stats as stats
def _remove_optimizer_parameters(kwds):
"""
Remove the optimizer-related keyword arguments 'loc', 'scale' and
'optimizer' from `kwds`. Then check that `kwds` is empty, and
raise `TypeError("Unknown arguments: %s." % kwds)` if it is not.
This function is used in the fit method of distributions that override
the default method and do not use the default optimization code.
`kwds` is modified in-place.
"""
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
kwds.pop('method', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
def _call_super_mom(fun):
# If fit method is overridden only for MLE and doesn't specify what to do
# if method == 'mm' or with censored data, this decorator calls the generic
# implementation.
@wraps(fun)
def wrapper(self, data, *args, **kwds):
method = kwds.get('method', 'mle').lower()
censored = isinstance(data, CensoredData)
if method == 'mm' or (censored and data.num_censored() > 0):
return super(type(self), self).fit(data, *args, **kwds)
else:
if censored:
# data is an instance of CensoredData, but actually holds
# no censored values, so replace it with the array of
# uncensored values.
data = data._uncensored
return fun(self, data, *args, **kwds)
return wrapper
def _get_left_bracket(fun, rbrack, lbrack=None):
# find left bracket for `root_scalar`. A guess for lbrack may be provided.
lbrack = lbrack or rbrack - 1
diff = rbrack - lbrack
# if there is no sign change in `fun` between the brackets, expand
# rbrack - lbrack until a sign change occurs
def interval_contains_root(lbrack, rbrack):
# return true if the signs disagree.
return np.sign(fun(lbrack)) != np.sign(fun(rbrack))
while not interval_contains_root(lbrack, rbrack):
diff *= 2
lbrack = rbrack - diff
msg = ("The solver could not find a bracket containing a "
"root to an MLE first order condition.")
if np.isinf(lbrack):
raise FitSolverError(msg)
return lbrack
class ksone_gen(rv_continuous):
r"""Kolmogorov-Smirnov one-sided test statistic distribution.
This is the distribution of the one-sided Kolmogorov-Smirnov (KS)
statistics :math:`D_n^+` and :math:`D_n^-`
for a finite sample size ``n >= 1`` (the shape parameter).
%(before_notes)s
See Also
--------
kstwobign, kstwo, kstest
Notes
-----
:math:`D_n^+` and :math:`D_n^-` are given by
.. math::
D_n^+ &= \text{sup}_x (F_n(x) - F(x)),\\
D_n^- &= \text{sup}_x (F(x) - F_n(x)),\\
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`ksone` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
References
----------
.. [1] Birnbaum, Z. W. and Tingey, F.H. "One-sided confidence contours
for probability distribution functions", The Annals of Mathematical
Statistics, 22(4), pp 592-596 (1951).
%(example)s
"""
def _argcheck(self, n):
return (n >= 1) & (n == np.round(n))
def _shape_info(self):
return [_ShapeInfo("n", True, (1, np.inf), (True, False))]
def _pdf(self, x, n):
return -scu._smirnovp(n, x)
def _cdf(self, x, n):
return scu._smirnovc(n, x)
def _sf(self, x, n):
return sc.smirnov(n, x)
def _ppf(self, q, n):
return scu._smirnovci(n, q)
def _isf(self, q, n):
return sc.smirnovi(n, q)
ksone = ksone_gen(a=0.0, b=1.0, name='ksone')
class kstwo_gen(rv_continuous):
r"""Kolmogorov-Smirnov two-sided test statistic distribution.
This is the distribution of the two-sided Kolmogorov-Smirnov (KS)
statistic :math:`D_n` for a finite sample size ``n >= 1``
(the shape parameter).
%(before_notes)s
See Also
--------
kstwobign, ksone, kstest
Notes
-----
:math:`D_n` is given by
.. math::
D_n = \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a (continuous) CDF and :math:`F_n` is an empirical CDF.
`kstwo` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
References
----------
.. [1] Simard, R., L'Ecuyer, P. "Computing the Two-Sided
Kolmogorov-Smirnov Distribution", Journal of Statistical Software,
Vol 39, 11, 1-18 (2011).
%(example)s
"""
def _argcheck(self, n):
return (n >= 1) & (n == np.round(n))
def _shape_info(self):
return [_ShapeInfo("n", True, (1, np.inf), (True, False))]
def _get_support(self, n):
return (0.5/(n if not isinstance(n, Iterable) else np.asanyarray(n)),
1.0)
def _pdf(self, x, n):
return kolmognp(n, x)
def _cdf(self, x, n):
return kolmogn(n, x)
def _sf(self, x, n):
return kolmogn(n, x, cdf=False)
def _ppf(self, q, n):
return kolmogni(n, q, cdf=True)
def _isf(self, q, n):
return kolmogni(n, q, cdf=False)
# Use the pdf, (not the ppf) to compute moments
kstwo = kstwo_gen(momtype=0, a=0.0, b=1.0, name='kstwo')
class kstwobign_gen(rv_continuous):
r"""Limiting distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
This is the asymptotic distribution of the two-sided Kolmogorov-Smirnov
statistic :math:`\sqrt{n} D_n` that measures the maximum absolute
distance of the theoretical (continuous) CDF from the empirical CDF.
(see `kstest`).
%(before_notes)s
See Also
--------
ksone, kstwo, kstest
Notes
-----
:math:`\sqrt{n} D_n` is given by
.. math::
D_n = \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`kstwobign` describes the asymptotic distribution (i.e. the limit of
:math:`\sqrt{n} D_n`) under the null hypothesis of the KS test that the
empirical CDF corresponds to i.i.d. random variates with CDF :math:`F`.
%(after_notes)s
References
----------
.. [1] Feller, W. "On the Kolmogorov-Smirnov Limit Theorems for Empirical
Distributions", Ann. Math. Statist. Vol 19, 177-189 (1948).
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
return -scu._kolmogp(x)
def _cdf(self, x):
return scu._kolmogc(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return scu._kolmogci(q)
def _isf(self, q):
return sc.kolmogi(q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
r"""A normal continuous random variable.
The location (``loc``) keyword specifies the mean.
The scale (``scale``) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is:
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return random_state.standard_normal(size)
def _pdf(self, x):
# norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
For the normal distribution, method of moments and maximum likelihood
estimation give identical fits, and explicit formulas for the estimates
are available.
This function uses these explicit formulas for the maximum likelihood
estimation of the normal distribution parameters, so the
`optimizer` and `method` arguments are ignored.\n\n""")
def fit(self, data, **kwds):
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise ValueError("The data contains non-finite values.")
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
def _munp(self, n):
"""
@returns Moments of standard normal distribution for integer n >= 0
See eq. 16 of https://arxiv.org/abs/1209.4340v2
"""
if n % 2 == 0:
return sc.factorial2(n - 1)
else:
return 0.
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
r"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` ([1]_, [2]_) is:
.. math::
f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
\exp(-\frac{1}{2} (a-1/x)^2)
where :math:`\Phi` is the normal CDF, :math:`x > 0`, and :math:`a > 0`.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
Distributions, Volume 1", Second Edition, John Wiley and Sons,
p. 173 (1994).
.. [2] Anthony A. Salvia, "Reliability applications of the Alpha
Distribution", IEEE Transactions on Reliability, Vol. R-34,
No. 3, pp. 251-252 (1985).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
def _pdf(self, x, a):
# alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a - _norm_ppf(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
r"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is:
.. math::
f(x) = \sin(2x + \pi/2) = \cos(2x)
for :math:`-\pi/4 \le x \le \pi/4`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x)
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _sf(self, x):
return np.cos(x + np.pi / 4) ** 2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
r"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is:
.. math::
f(x) = \frac{1}{\pi \sqrt{x (1-x)}}
for :math:`0 < x < 1`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
with np.errstate(divide='ignore'):
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
"""Raised when input data is inconsistent with fixed parameters."""
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < "
"(x - loc)/scale < {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(FitError):
"""
Raised when a solver fails to converge while fitting a distribution.
"""
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
r"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is:
.. math::
f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}}
{\Gamma(a) \Gamma(b)}
for :math:`0 <= x <= 1`, :math:`a > 0`, :math:`b > 0`, where
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`beta` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ia, ib]
def _rvs(self, a, b, size=None, random_state=None):
return random_state.beta(a, b, size)
def _pdf(self, x, a, b):
# gamma(a+b) * x**(a-1) * (1-x)**(b-1)
# beta.pdf(x, a, b) = ------------------------------------
# gamma(a)*gamma(b)
with np.errstate(over='ignore'):
return _boost._beta_pdf(x, a, b)
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return _boost._beta_cdf(x, a, b)
def _sf(self, x, a, b):
return _boost._beta_sf(x, a, b)
def _isf(self, x, a, b):
with np.errstate(over='ignore'): # see gh-17432
return _boost._beta_isf(x, a, b)
def _ppf(self, q, a, b):
with np.errstate(over='ignore'): # see gh-17432
return _boost._beta_ppf(q, a, b)
def _stats(self, a, b):
return (
_boost._beta_mean(a, b),
_boost._beta_variance(a, b),
_boost._beta_skewness(a, b),
_boost._beta_kurtosis_excess(a, b))
def _fitstart(self, data):
if isinstance(data, CensoredData):
data = data._uncensor()
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super()._fitstart(data, args=(a, b))
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
In the special case where `method="MLE"` and
both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.\n\n""")
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super().fit(data, *args, **kwds)
# We already got these from kwds, so just pop them.
kwds.pop('floc', None)
kwds.pop('fscale', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
f1 = _get_fixed_fit_value(kwds, ['f1', 'fb', 'fix_b'])
_remove_optimizer_parameters(kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
if not np.isfinite(data).all():
raise ValueError("The data contains non-finite values.")
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
def _entropy(self, a, b):
def regular(a, b):
return (sc.betaln(a, b) - (a - 1) * sc.psi(a) -
(b - 1) * sc.psi(b) + (a + b - 2) * sc.psi(a + b))
def asymptotic_ab_large(a, b):
sum_ab = a + b
log_term = 0.5 * (
np.log(2*np.pi) + np.log(a) + np.log(b) - 3*np.log(sum_ab) + 1
)
t1 = 110/sum_ab + 20*sum_ab**-2.0 + sum_ab**-3.0 - 2*sum_ab**-4.0
t2 = -50/a - 10*a**-2.0 - a**-3.0 + a**-4.0
t3 = -50/b - 10*b**-2.0 - b**-3.0 + b**-4.0
return log_term + (t1 + t2 + t3) / 120
if a >= 4.96e6 and b >= 4.96e6:
return asymptotic_ab_large(a, b)
else:
return regular(a, b)
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
r"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is:
.. math::
f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)}
for :math:`x >= 0`, :math:`a > 0`, :math:`b > 0`, where
:math:`\beta(a, b)` is the beta function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
The distribution is related to the `beta` distribution as follows:
If :math:`X` follows a beta distribution with parameters :math:`a, b`,
then :math:`Y = X/(1-X)` has a beta prime distribution with
parameters :math:`a, b` ([1]_).
The beta prime distribution is a reparametrized version of the
F distribution. The beta prime distribution with shape parameters
``a`` and ``b`` and ``scale = s`` is equivalent to the F distribution
with parameters ``d1 = 2*a``, ``d2 = 2*b`` and ``scale = (a/b)*s``.
For example,
>>> from scipy.stats import betaprime, f
>>> x = [1, 2, 5, 10]
>>> a = 12
>>> b = 5
>>> betaprime.pdf(x, a, b, scale=2)
array([0.00541179, 0.08331299, 0.14669185, 0.03150079])
>>> f.pdf(x, 2*a, 2*b, scale=(a/b)*2)
array([0.00541179, 0.08331299, 0.14669185, 0.03150079])
%(after_notes)s
References
----------
.. [1] Beta prime distribution, Wikipedia,
https://en.wikipedia.org/wiki/Beta_prime_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ia, ib]
def _rvs(self, a, b, size=None, random_state=None):
u1 = gamma.rvs(a, size=size, random_state=random_state)
u2 = gamma.rvs(b, size=size, random_state=random_state)
return u1 / u2
def _pdf(self, x, a, b):
# betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
# note: f2 is the direct way to compute the cdf if the relationship
# to the beta distribution is used.
# however, for very large x, x/(1+x) == 1. since the distribution
# has very fat tails if b is small, this can cause inaccurate results
# use the following relationship of the incomplete beta function:
# betainc(x, a, b) = 1 - betainc(1-x, b, a)
# see gh-17631
return _lazywhere(
x > 1, [x, a, b],
lambda x_, a_, b_: beta._sf(1/(1+x_), b_, a_),
f2=lambda x_, a_, b_: beta._cdf(x_/(1+x_), a_, b_))
def _sf(self, x, a, b):
return _lazywhere(
x > 1, [x, a, b],
lambda x_, a_, b_: beta._cdf(1/(1+x_), b_, a_),
f2=lambda x_, a_, b_: beta._sf(x_/(1+x_), a_, b_)
)
def _ppf(self, p, a, b):
p, a, b = np.broadcast_arrays(p, a, b)
# by default, compute compute the ppf by solving the following:
# p = beta._cdf(x/(1+x), a, b). This implies x = r/(1-r) with
# r = beta._ppf(p, a, b). This can cause numerical issues if r is
# very close to 1. in that case, invert the alternative expression of
# the cdf: p = beta._sf(1/(1+x), b, a).
r = stats.beta._ppf(p, a, b)
with np.errstate(divide='ignore'):
out = r / (1 - r)
i = (r > 0.9999)
out[i] = 1/stats.beta._isf(p[i], b[i], a[i]) - 1
return out
def _munp(self, n, a, b):
return _lazywhere(
b > n, (a, b),
lambda a, b: np.prod([(a+i-1)/(b-i) for i in range(1, n+1)], axis=0),
fillvalue=np.inf)
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
r"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is:
.. math::
f(x, c) = \frac{c}{\log(1+c) (1+cx)}
for :math:`0 <= x <= 1` and :math:`c > 0`.
`bradford` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# bradford.pdf(x, c) = c / (k * (1+c*x))
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
r"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr12 : Burr Type XII distribution
mielke : Mielke Beta-Kappa / Dagum distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x; c, d) = c d \frac{x^{-c - 1}}
{{(1 + x^{-c})}^{d + 1}}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr` takes ``c`` and ``d`` as shape parameters for :math:`c` and
:math:`d`.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_. The distribution
is also commonly referred to as the Dagum distribution [2]_. If the
parameter :math:`c < 1` then the mean of the distribution does not
exist and if :math:`c < 2` the variance does not exist [2]_.
The PDF is finite at the left endpoint :math:`x = 0` if :math:`c * d >= 1`.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://en.wikipedia.org/wiki/Dagum_distribution
.. [3] Kleiber, Christian. "A guide to the Dagum distributions."
Modeling Income Distributions and Lorenz Curves pp 97-117 (2008).
%(example)s
"""
# Do not set _support_mask to rv_continuous._open_support_mask
# Whether the left-hand endpoint is suitable for pdf evaluation is dependent
# on the values of c and d: if c*d >= 1, the pdf is finite, otherwise infinite.
def _shape_info(self):
ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
id = _ShapeInfo("d", False, (0, np.inf), (False, False))
return [ic, id]
def _pdf(self, x, c, d):
# burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
output = _lazywhere(
x == 0, [x, c, d],
lambda x_, c_, d_: c_ * d_ * (x_**(c_*d_-1)) / (1 + x_**c_),
f2=lambda x_, c_, d_: (c_ * d_ * (x_ ** (-c_ - 1.0)) /
((1 + x_ ** (-c_)) ** (d_ + 1.0))))
if output.ndim == 0:
return output[()]
return output
def _logpdf(self, x, c, d):
output = _lazywhere(
x == 0, [x, c, d],
lambda x_, c_, d_: (np.log(c_) + np.log(d_) + sc.xlogy(c_*d_ - 1, x_)
- (d_+1) * sc.log1p(x_**(c_))),
f2=lambda x_, c_, d_: (np.log(c_) + np.log(d_)
+ sc.xlogy(-c_ - 1, x_)
- sc.xlog1py(d_+1, x_**(-c_))))
if output.ndim == 0:
return output[()]
return output
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _logcdf(self, x, c, d):
return sc.log1p(x**(-c)) * (-d)
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return np.log1p(- (1 + x**(-c))**(-d))
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _isf(self, q, c, d):
_q = sc.xlog1py(-1.0 / d, -q)
return sc.expm1(_q) ** (-1.0 / c)
def _stats(self, c, d):
nc = np.arange(1, 5).reshape(4,1) / c
# ek is the kth raw moment, e1 is the mean e2-e1**2 variance etc.
e1, e2, e3, e4 = sc.beta(d + nc, 1. - nc) * d
mu = np.where(c > 1.0, e1, np.nan)
mu2_if_c = e2 - mu**2
mu2 = np.where(c > 2.0, mu2_if_c, np.nan)
g1 = _lazywhere(
c > 3.0,
(c, e1, e2, e3, mu2_if_c),
lambda c, e1, e2, e3, mu2_if_c: (e3 - 3*e2*e1 + 2*e1**3) / np.sqrt((mu2_if_c)**3),
fillvalue=np.nan)
g2 = _lazywhere(
c > 4.0,
(c, e1, e2, e3, e4, mu2_if_c),
lambda c, e1, e2, e3, e4, mu2_if_c: (
((e4 - 4*e3*e1 + 6*e2*e1**2 - 3*e1**4) / mu2_if_c**2) - 3),
fillvalue=np.nan)
if np.ndim(c) == 0:
return mu.item(), mu2.item(), g1.item(), g2.item()
return mu, mu2, g1, g2
def _munp(self, n, c, d):
def __munp(n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
n, c, d = np.asarray(n), np.asarray(c), np.asarray(d)
return _lazywhere((c > n) & (n == n) & (d == d), (c, d, n),
lambda c, d, n: __munp(n, c, d),
np.nan)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
r"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr12` is:
.. math::
f(x; c, d) = c d \frac{x^{c-1}}
{(1 + x^c)^{d + 1}}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr12` takes ``c`` and ``d`` as shape parameters for :math:`c`
and :math:`d`.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
.. [3] "Burr distribution",
https://en.wikipedia.org/wiki/Burr_distribution
%(example)s
"""
def _shape_info(self):
ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
id = _ShapeInfo("d", False, (0, np.inf), (False, False))
return [ic, id]
def _pdf(self, x, c, d):
# burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
def moment_if_exists(n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
return _lazywhere(c * d > n, (n, c, d), moment_if_exists,
fillvalue=np.nan)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
r"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution.
%(before_notes)s
See Also
--------
burr
Notes
-----
The probability density function for `fisk` is:
.. math::
f(x, c) = \frac{c x^{c-1}}
{(1 + x^c)^2}
for :math:`x >= 0` and :math:`c > 0`.
Please note that the above expression can be transformed into the following
one, which is also commonly used:
.. math::
f(x, c) = \frac{c x^{-c-1}}
{(1 + x^{-c})^2}
`fisk` takes ``c`` as a shape parameter for :math:`c`.
`fisk` is a special case of `burr` or `burr12` with ``d=1``.
Suppose ``X`` is a logistic random variable with location ``l``
and scale ``s``. Then ``Y = exp(X)`` is a Fisk (log-logistic)
random variable with ``scale = exp(l)`` and shape ``c = 1/s``.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._pdf(x, c, 1.0)
def _cdf(self, x, c):
return burr._cdf(x, c, 1.0)
def _sf(self, x, c):
return burr._sf(x, c, 1.0)
def _logpdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._logpdf(x, c, 1.0)
def _logcdf(self, x, c):
return burr._logcdf(x, c, 1.0)
def _logsf(self, x, c):
return burr._logsf(x, c, 1.0)
def _ppf(self, x, c):
return burr._ppf(x, c, 1.0)
def _isf(self, q, c):
return burr._isf(q, c, 1.0)
def _munp(self, n, c):
return burr._munp(n, c, 1.0)
def _stats(self, c):
return burr._stats(c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
class cauchy_gen(rv_continuous):
r"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is
.. math::
f(x) = \frac{1}{\pi (1 + x^2)}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# cauchy.pdf(x) = 1 / (pi * (1 + x**2))
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
if isinstance(data, CensoredData):
data = data._uncensor()
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
r"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is:
.. math::
f(x, k) = \frac{1}{2^{k/2-1} \Gamma \left( k/2 \right)}
x^{k-1} \exp \left( -x^2/2 \right)
for :math:`x >= 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation). :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("df", False, (0, np.inf), (False, False))]
def _rvs(self, df, size=None, random_state=None):
return np.sqrt(chi2.rvs(df, size=size, random_state=random_state))
def _pdf(self, x, df):
# x**(df-1) * exp(-x**2/2)
# chi.pdf(x, df) = -------------------------
# 2**(df/2-1) * gamma(df/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _sf(self, x, df):
return sc.gammaincc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _isf(self, q, df):
return np.sqrt(2*sc.gammainccinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*np.exp(sc.gammaln(df/2.0+0.5)-sc.gammaln(df/2.0))
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
def _entropy(self, df):
def regular_formula(df):
return (sc.gammaln(.5 * df)
+ 0.5 * (df - np.log(2) - (df - 1) * sc.digamma(0.5 * df)))
def asymptotic_formula(df):
return (0.5 + np.log(np.pi)/2 - (df**-1)/6 - (df**-2)/6
- 4/45*(df**-3) + (df**-4)/15)
return _lazywhere(df < 3e2, (df, ), regular_formula,
f2=asymptotic_formula)
chi = chi_gen(a=0.0, name='chi')
class chi2_gen(rv_continuous):
r"""A chi-squared continuous random variable.
For the noncentral chi-square distribution, see `ncx2`.
%(before_notes)s
See Also
--------
ncx2
Notes
-----
The probability density function for `chi2` is:
.. math::
f(x, k) = \frac{1}{2^{k/2} \Gamma \left( k/2 \right)}
x^{k/2-1} \exp \left( -x/2 \right)
for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation).
`chi2` takes ``df`` as a shape parameter.
The chi-squared distribution is a special case of the gamma
distribution, with gamma parameters ``a = df/2``, ``loc = 0`` and
``scale = 2``.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("df", False, (0, np.inf), (False, False))]
def _rvs(self, df, size=None, random_state=None):
return random_state.chisquare(df, size)
def _pdf(self, x, df):
# chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return 2*sc.gammaincinv(df/2, p)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
def _entropy(self, df):
half_df = 0.5 * df
def regular_formula(half_df):
return (half_df + np.log(2) + sc.gammaln(half_df) +
(1 - half_df) * sc.psi(half_df))
def asymptotic_formula(half_df):
# plug in the above formula the following asymptotic
# expansions:
# ln(gamma(a)) ~ (a - 0.5) * ln(a) - a + 0.5 * ln(2 * pi) +
# 1/(12 * a) - 1/(360 * a**3)
# psi(a) ~ ln(a) - 1/(2 * a) - 1/(3 * a**2) + 1/120 * a**4)
c = np.log(2) + 0.5*(1 + np.log(2*np.pi))
h = 0.5/half_df
return (h*(-2/3 + h*(-1/3 + h*(-4/45 + h/7.5))) +
0.5*np.log(half_df) + c)
return _lazywhere(half_df < 125, (half_df, ),
regular_formula,
f2=asymptotic_formula)
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
r"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is:
.. math::
f(x) = \frac{1}{2\pi} (1+\cos(x))
for :math:`-\pi \le x \le \pi`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
return 1.0/2/np.pi*(1+np.cos(x))
def _logpdf(self, x):
c = np.cos(x)
return _lazywhere(c != -1, (c,),
lambda c: np.log1p(c) - np.log(2*np.pi),
fillvalue=-np.inf)
def _cdf(self, x):
return scu._cosine_cdf(x)
def _sf(self, x):
return scu._cosine_cdf(-x)
def _ppf(self, p):
return scu._cosine_invcdf(p)
def _isf(self, p):
return -scu._cosine_invcdf(p)
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
r"""A double gamma continuous random variable.
The double gamma distribution is also known as the reflected gamma
distribution [1]_.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is:
.. math::
f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|)
for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`dgamma` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
References
----------
.. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
Distributions, Volume 1", Second Edition, John Wiley and Sons
(1994).
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
def _rvs(self, a, size=None, random_state=None):
u = random_state.uniform(size=size)
gm = gamma.rvs(a, size=size, random_state=random_state)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
# dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
return np.where(x > 0,
0.5 + 0.5*sc.gammainc(a, x),
0.5*sc.gammaincc(a, -x))
def _sf(self, x, a):
return np.where(x > 0,
0.5*sc.gammaincc(a, x),
0.5 + 0.5*sc.gammainc(a, -x))
def _entropy(self, a):
return stats.gamma._entropy(a) - np.log(0.5)
def _ppf(self, q, a):
return np.where(q > 0.5,
sc.gammaincinv(a, 2*q - 1),
-sc.gammainccinv(a, 2*q))
def _isf(self, q, a):
return np.where(q > 0.5,
-sc.gammaincinv(a, 2*q - 1),
sc.gammainccinv(a, 2*q))
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
r"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is given by
.. math::
f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
for a real number :math:`x` and :math:`c > 0`.
`dweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _rvs(self, c, size=None, random_state=None):
u = random_state.uniform(size=size)
w = weibull_min.rvs(c, size=size, random_state=random_state)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
# dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _sf(self, x, c):
half_weibull_min_sf = 0.5 * stats.weibull_min._sf(np.abs(x), c)
return np.where(x > 0, half_weibull_min_sf, 1 - half_weibull_min_sf)
def _isf(self, q, c):
double_q = 2. * np.where(q <= 0.5, q, 1. - q)
weibull_min_isf = stats.weibull_min._isf(double_q, c)
return np.where(q > 0.5, -weibull_min_isf, weibull_min_isf)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
def _entropy(self, c):
h = stats.weibull_min._entropy(c) - np.log(0.5)
return h
dweibull = dweibull_gen(name='dweibull')
class expon_gen(rv_continuous):
r"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is:
.. math::
f(x) = \exp(-x)
for :math:`x \ge 0`.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
The exponential distribution is a special case of the gamma
distributions, with gamma shape parameter ``a = 1``.
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return random_state.standard_exponential(size)
def _pdf(self, x):
# expon.pdf(x) = exp(-x)
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
When `method='MLE'`,
this function uses explicit formulas for the maximum likelihood
estimation of the exponential distribution parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are
ignored.\n\n""")
def fit(self, data, *args, **kwds):
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise ValueError("The data contains non-finite values.")
data_min = data.min()
if floc is None:
# ML estimate of the location is the minimum of the data.
loc = data_min
else:
loc = floc
if data_min < loc:
# There are values that are less than the specified loc.
raise FitDataError("expon", lower=floc, upper=np.inf)
if fscale is None:
# ML estimate of the scale is the shifted mean.
scale = data.mean() - loc
else:
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
expon = expon_gen(a=0.0, name='expon')
class exponnorm_gen(rv_continuous):
r"""An exponentially modified Normal continuous random variable.
Also known as the exponentially modified Gaussian distribution [1]_.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is:
.. math::
f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2} - x / K \right)
\text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right)
where :math:`x` is a real number and :math:`K > 0`.
It can be thought of as the sum of a standard normal random variable
and an independent exponentially distributed random variable with rate
``1/K``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
the Wikpedia article [1]_) involves three parameters, :math:`\mu`,
:math:`\lambda` and :math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/(\sigma\lambda)`.
.. versionadded:: 0.16.0
References
----------
.. [1] Exponentially modified Gaussian distribution, Wikipedia,
https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("K", False, (0, np.inf), (False, False))]
def _rvs(self, K, size=None, random_state=None):
expval = random_state.standard_exponential(size) * K
gval = random_state.standard_normal(size)
return expval + gval
def _pdf(self, x, K):
return np.exp(self._logpdf(x, K))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = invK * (0.5 * invK - x)
return exparg + _norm_logcdf(x - invK) - np.log(K)
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
logprod = expval + _norm_logcdf(x - invK)
return _norm_cdf(x) - np.exp(logprod)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
logprod = expval + _norm_logcdf(x - invK)
return _norm_cdf(-x) + np.exp(logprod)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
def _pow1pm1(x, y):
"""
Compute (1 + x)**y - 1.
Uses expm1 and xlog1py to avoid loss of precision when
(1 + x)**y is close to 1.
Note that the inverse of this function with respect to x is
``_pow1pm1(x, 1/y)``. That is, if
t = _pow1pm1(x, y)
then
x = _pow1pm1(t, 1/y)
"""
return np.expm1(sc.xlog1py(y, x))
class exponweib_gen(rv_continuous):
r"""An exponentiated Weibull continuous random variable.
%(before_notes)s
See Also
--------
weibull_min, numpy.random.Generator.weibull
Notes
-----
The probability density function for `exponweib` is:
.. math::
f(x, a, c) = a c [1-\exp(-x^c)]^{a-1} \exp(-x^c) x^{c-1}
and its cumulative distribution function is:
.. math::
F(x, a, c) = [1-\exp(-x^c)]^a
for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`.
`exponweib` takes :math:`a` and :math:`c` as shape parameters:
* :math:`a` is the exponentiation parameter,
with the special case :math:`a=1` corresponding to the
(non-exponentiated) Weibull distribution `weibull_min`.
* :math:`c` is the shape parameter of the non-exponentiated Weibull law.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution
%(example)s
"""
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
return [ia, ic]
def _pdf(self, x, a, c):
# exponweib.pdf(x, a, c) =
# a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
def _sf(self, x, a, c):
return -_pow1pm1(-np.exp(-x**c), a)
def _isf(self, p, a, c):
return (-np.log(-_pow1pm1(-p, 1/a)))**(1/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
r"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is:
.. math::
f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
for :math:`x \ge 0`, :math:`b > 0`. Note that this is a different
distribution from the exponential power distribution that is also known
under the names "generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("b", False, (0, np.inf), (False, False))]
def _pdf(self, x, b):
# exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
r"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is:
.. math::
f(x, c) = \frac{x+1}{2c\sqrt{2\pi x^3}} \exp(-\frac{(x-1)^2}{2x c^2})
for :math:`x >= 0` and :math:`c > 0`.
`fatiguelife` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
https://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _rvs(self, c, size=None, random_state=None):
z = random_state.standard_normal(size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
# fatiguelife.pdf(x, c) =
# (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c * _norm_ppf(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _sf(self, x, c):
return _norm_sf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _isf(self, q, c):
tmp = -c * _norm_ppf(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
r"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is:
.. math::
f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)}
for :math:`x \ge 0` and :math:`c \ge 0`.
`foldcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (True, False))]
def _rvs(self, c, size=None, random_state=None):
return abs(cauchy.rvs(loc=c, size=size,
random_state=random_state))
def _pdf(self, x, c):
# foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _sf(self, x, c):
# 1 - CDF(x, c) = 1 - (atan(x - c) + atan(x + c))/pi
# = ((pi/2 - atan(x - c)) + (pi/2 - atan(x + c)))/pi
# = (acot(x - c) + acot(x + c))/pi
# = (atan2(1, x - c) + atan2(1, x + c))/pi
return (np.arctan2(1, x - c) + np.arctan2(1, x + c))/np.pi
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
r"""An F continuous random variable.
For the noncentral F distribution, see `ncf`.
%(before_notes)s
See Also
--------
ncf
Notes
-----
The probability density function for `f` is:
.. math::
f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}}
{(df_2+df_1 x)^{(df_1+df_2)/2}
B(df_1/2, df_2/2)}
for :math:`x > 0` and parameters :math:`df_1, df_2 > 0` .
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
idfn = _ShapeInfo("dfn", False, (0, np.inf), (False, False))
idfd = _ShapeInfo("dfd", False, (0, np.inf), (False, False))
return [idfn, idfd]
def _rvs(self, dfn, dfd, size=None, random_state=None):
return random_state.f(dfn, dfd, size)
def _pdf(self, x, dfn, dfd):
# df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
# F.pdf(x, df1, df2) = --------------------------------------------
# (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = (m/2 * np.log(m) + n/2 * np.log(n) + sc.xlogy(n/2 - 1, x)
- (((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)))
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
def _entropy(self, dfn, dfd):
# the formula found in literature is incorrect. This one yields the
# same result as numerical integration using the generic entropy
# definition. This is also tested in tests/test_conntinous_basic
half_dfn = 0.5 * dfn
half_dfd = 0.5 * dfd
half_sum = 0.5 * (dfn + dfd)
return (np.log(dfd) - np.log(dfn) + sc.betaln(half_dfn, half_dfd) +
(1 - half_dfn) * sc.psi(half_dfn) - (1 + half_dfd) *
sc.psi(half_dfd) + half_sum * sc.psi(half_sum))
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
r"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is:
.. math::
f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
for :math:`x \ge 0` and :math:`c \ge 0`.
`foldnorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (True, False))]
def _rvs(self, c, size=None, random_state=None):
return abs(random_state.standard_normal(size) + c)
def _pdf(self, x, c):
# foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
sqrt_two = np.sqrt(2)
return 0.5 * (sc.erf((x - c)/sqrt_two) + sc.erf((x + c)/sqrt_two))
def _sf(self, x, c):
return _norm_sf(x - c) + _norm_sf(x + c)
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# https://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
class weibull_min_gen(rv_continuous):
r"""Weibull minimum continuous random variable.
The Weibull Minimum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is also often simply called the Weibull
distribution. It arises as the limiting distribution of the rescaled
minimum of iid random variables.
%(before_notes)s
See Also
--------
weibull_max, numpy.random.Generator.weibull, exponweib
Notes
-----
The probability density function for `weibull_min` is:
.. math::
f(x, c) = c x^{c-1} \exp(-x^c)
for :math:`x > 0`, :math:`c > 0`.
`weibull_min` takes ``c`` as a shape parameter for :math:`c`.
(named :math:`k` in Wikipedia article and :math:`a` in
``numpy.random.weibull``). Special shape values are :math:`c=1` and
:math:`c=2` where Weibull distribution reduces to the `expon` and
`rayleigh` distributions respectively.
Suppose ``X`` is an exponentially distributed random variable with
scale ``s``. Then ``Y = X**k`` is `weibull_min` distributed with shape
``c = 1/k`` and scale ``s**k``.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# weibull_min.pdf(x, c) = c * x**(c-1) * exp(-x**c)
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _sf(self, x, c):
return np.exp(self._logsf(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _isf(self, q, c):
return (-np.log(q))**(1/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
@extend_notes_in_docstring(rv_continuous, notes="""\
If ``method='mm'``, parameters fixed by the user are respected, and the
remaining parameters are used to match distribution and sample moments
where possible. For example, if the user fixes the location with
``floc``, the parameters will only match the distribution skewness and
variance to the sample skewness and variance; no attempt will be made
to match the means or minimize a norm of the errors.
\n\n""")
def fit(self, data, *args, **kwds):
if isinstance(data, CensoredData):
if data.num_censored() == 0:
data = data._uncensor()
else:
return super().fit(data, *args, **kwds)
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
# this extracts fixed shape, location, and scale however they
# are specified, and also leaves them in `kwds`
data, fc, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
method = kwds.get("method", "mle").lower()
# See https://en.wikipedia.org/wiki/Weibull_distribution#Moments for
# moment formulas.
def skew(c):
gamma1 = sc.gamma(1+1/c)
gamma2 = sc.gamma(1+2/c)
gamma3 = sc.gamma(1+3/c)
num = 2 * gamma1**3 - 3*gamma1*gamma2 + gamma3
den = (gamma2 - gamma1**2)**(3/2)
return num/den
# For c in [1e2, 3e4], population skewness appears to approach
# asymptote near -1.139, but past c > 3e4, skewness begins to vary
# wildly, and MoM won't provide a good guess. Get out early.
s = stats.skew(data)
max_c = 1e4
s_min = skew(max_c)
if s < s_min and method != "mm" and fc is None and not args:
return super().fit(data, *args, **kwds)
# If method is method of moments, we don't need the user's guesses.
# Otherwise, extract the guesses from args and kwds.
if method == "mm":
c, loc, scale = None, None, None
else:
c = args[0] if len(args) else None
loc = kwds.pop('loc', None)
scale = kwds.pop('scale', None)
if fc is None and c is None: # not fixed and no guess: use MoM
# Solve for c that matches sample distribution skewness to sample
# skewness.
# we start having numerical issues with `weibull_min` with
# parameters outside this range - and not just in this method.
# We could probably improve the situation by doing everything
# in the log space, but that is for another time.
c = root_scalar(lambda c: skew(c) - s, bracket=[0.02, max_c],
method='bisect').root
elif fc is not None: # fixed: use it
c = fc
if fscale is None and scale is None:
v = np.var(data)
scale = np.sqrt(v / (sc.gamma(1+2/c) - sc.gamma(1+1/c)**2))
elif fscale is not None:
scale = fscale
if floc is None and loc is None:
m = np.mean(data)
loc = m - scale*sc.gamma(1 + 1/c)
elif floc is not None:
loc = floc
if method == 'mm':
return c, loc, scale
else:
# At this point, parameter "guesses" may equal the fixed parameters
# in kwds. No harm in passing them as guesses, too.
return super().fit(data, c, loc=loc, scale=scale, **kwds)
weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
class truncweibull_min_gen(rv_continuous):
r"""A doubly truncated Weibull minimum continuous random variable.
%(before_notes)s
See Also
--------
weibull_min, truncexpon
Notes
-----
The probability density function for `truncweibull_min` is:
.. math::
f(x, a, b, c) = \frac{c x^{c-1} \exp(-x^c)}{\exp(-a^c) - \exp(-b^c)}
for :math:`a < x <= b`, :math:`0 \le a < b` and :math:`c > 0`.
`truncweibull_min` takes :math:`a`, :math:`b`, and :math:`c` as shape
parameters.
Notice that the truncation values, :math:`a` and :math:`b`, are defined in
standardized form:
.. math::
a = (u_l - loc)/scale
b = (u_r - loc)/scale
where :math:`u_l` and :math:`u_r` are the specific left and right
truncation values, respectively. In other words, the support of the
distribution becomes :math:`(a*scale + loc) < x <= (b*scale + loc)` when
:math:`loc` and/or :math:`scale` are provided.
%(after_notes)s
References
----------
.. [1] Rinne, H. "The Weibull Distribution: A Handbook". CRC Press (2009).
%(example)s
"""
def _argcheck(self, c, a, b):
return (a >= 0.) & (b > a) & (c > 0.)
def _shape_info(self):
ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
ia = _ShapeInfo("a", False, (0, np.inf), (True, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ic, ia, ib]
def _fitstart(self, data):
# Arbitrary, but default a=b=c=1 is not valid
return super()._fitstart(data, args=(1, 0, 1))
def _get_support(self, c, a, b):
return a, b
def _pdf(self, x, c, a, b):
denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
return (c * pow(x, c-1) * np.exp(-pow(x, c))) / denum
def _logpdf(self, x, c, a, b):
logdenum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c) - logdenum
def _cdf(self, x, c, a, b):
num = (np.exp(-pow(a, c)) - np.exp(-pow(x, c)))
denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
return num / denum
def _logcdf(self, x, c, a, b):
lognum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(x, c)))
logdenum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
return lognum - logdenum
def _sf(self, x, c, a, b):
num = (np.exp(-pow(x, c)) - np.exp(-pow(b, c)))
denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
return num / denum
def _logsf(self, x, c, a, b):
lognum = np.log(np.exp(-pow(x, c)) - np.exp(-pow(b, c)))
logdenum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
return lognum - logdenum
def _isf(self, q, c, a, b):
return pow(
-np.log((1 - q) * np.exp(-pow(b, c)) + q * np.exp(-pow(a, c))), 1/c
)
def _ppf(self, q, c, a, b):
return pow(
-np.log((1 - q) * np.exp(-pow(a, c)) + q * np.exp(-pow(b, c))), 1/c
)
def _munp(self, n, c, a, b):
gamma_fun = sc.gamma(n/c + 1.) * (
sc.gammainc(n/c + 1., pow(b, c)) - sc.gammainc(n/c + 1., pow(a, c))
)
denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
return gamma_fun / denum
truncweibull_min = truncweibull_min_gen(name='truncweibull_min')
class weibull_max_gen(rv_continuous):
r"""Weibull maximum continuous random variable.
The Weibull Maximum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is the limiting distribution of rescaled
maximum of iid random variables. This is the distribution of -X
if X is from the `weibull_min` function.
%(before_notes)s
See Also
--------
weibull_min
Notes
-----
The probability density function for `weibull_max` is:
.. math::
f(x, c) = c (-x)^{c-1} \exp(-(-x)^c)
for :math:`x < 0`, :math:`c > 0`.
`weibull_max` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# weibull_max.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_max = weibull_max_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
r"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is:
.. math::
f(x, c) = c \frac{\exp(-x)}
{(1 + \exp(-x))^{c+1}}
for real :math:`x` and :math:`c > 0`. In literature, different
generalizations of the logistic distribution can be found. This is the type 1
generalized logistic distribution according to [1]_. It is also referred to
as the skew-logistic distribution [2]_.
`genlogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
.. [1] Johnson et al. "Continuous Univariate Distributions", Volume 2,
Wiley. 1995.
.. [2] "Generalized Logistic Distribution", Wikipedia,
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
# Two mathematically equivalent expressions for log(pdf(x, c)):
# log(pdf(x, c)) = log(c) - x - (c + 1)*log(1 + exp(-x))
# = log(c) + c*x - (c + 1)*log(1 + exp(x))
mult = -(c - 1) * (x < 0) - 1
absx = np.abs(x)
return np.log(c) + mult*absx - (c+1) * sc.log1p(np.exp(-absx))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _logcdf(self, x, c):
return -c * np.log1p(np.exp(-x))
def _ppf(self, q, c):
return -np.log(sc.powm1(q, -1.0/c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _isf(self, q, c):
return self._ppf(1 - q, c)
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
def _entropy(self, c):
return _lazywhere(c < 8e6, (c, ),
lambda c: -np.log(c) + sc.psi(c + 1) + _EULER + 1,
# asymptotic expansion: psi(c) ~ log(c) - 1/(2 * c)
# a = -log(c) + psi(c + 1)
# = -log(c) + psi(c) + 1/c
# ~ -log(c) + log(c) - 1/(2 * c) + 1/c
# = 1/(2 * c)
f2=lambda c: 1/(2 * c) + _EULER + 1)
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
r"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is:
.. math::
f(x, c) = (1 + c x)^{-1 - 1/c}
defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
:math:`0 \le x \le -1/c` if :math:`c < 0`.
`genpareto` takes ``c`` as a shape parameter for :math:`c`.
For :math:`c=0`, `genpareto` reduces to the exponential
distribution, `expon`:
.. math::
f(x, 0) = \exp(-x)
For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``:
.. math::
f(x, -1) = 1
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.isfinite(c)
def _shape_info(self):
return [_ShapeInfo("c", False, (-np.inf, np.inf), (False, False))]
def _get_support(self, c):
c = np.asarray(c)
b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
a = np.where(c >= 0, self.a, self.a)
return a, b
def _pdf(self, x, c):
# genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _stats(self, c, moments='mv'):
if 'm' not in moments:
m = None
else:
m = _lazywhere(c < 1, (c,),
lambda xi: 1/(1 - xi),
np.inf)
if 'v' not in moments:
v = None
else:
v = _lazywhere(c < 1/2, (c,),
lambda xi: 1 / (1 - xi)**2 / (1 - 2*xi),
np.nan)
if 's' not in moments:
s = None
else:
s = _lazywhere(c < 1/3, (c,),
lambda xi: (2 * (1 + xi) * np.sqrt(1 - 2*xi) /
(1 - 3*xi)),
np.nan)
if 'k' not in moments:
k = None
else:
k = _lazywhere(c < 1/4, (c,),
lambda xi: (3 * (1 - 2*xi) * (2*xi**2 + xi + 3) /
(1 - 3*xi) / (1 - 4*xi) - 3),
np.nan)
return m, v, s, k
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
r"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is:
.. math::
f(x, a, b, c) = (a + b (1 - \exp(-c x)))
\exp(-a x - b x + \frac{b}{c} (1-\exp(-c x)))
for :math:`x \ge 0`, :math:`a, b, c > 0`.
`genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, Asit P. Basu (editors), *The Exponential Distribution:
Theory, Methods and Applications*, Gordon and Breach, 1995.
ISBN 10: 2884491929
%(example)s
"""
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
return [ia, ib, ic]
def _pdf(self, x, a, b, c):
# genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
# exp(-a*x - b*x + b/c * (1-exp(-c*x)))
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _ppf(self, p, a, b, c):
s = a + b
t = (b - c*np.log1p(-p))/s
return (t + sc.lambertw(-b/s * np.exp(-t)).real)/c
def _sf(self, x, a, b, c):
return np.exp((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _isf(self, p, a, b, c):
s = a + b
t = (b - c*np.log(p))/s
return (t + sc.lambertw(-b/s * np.exp(-t)).real)/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
r"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For :math:`c=0`, `genextreme` is equal to `gumbel_r` with
probability density function
.. math::
f(x) = \exp(-\exp(-x)) \exp(-x),
where :math:`-\infty < x < \infty`.
For :math:`c \ne 0`, the probability density function for `genextreme` is:
.. math::
f(x, c) = \exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1},
where :math:`-\infty < x \le 1/c` if :math:`c > 0` and
:math:`1/c \le x < \infty` if :math:`c < 0`.
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter :math:`c`.
`genextreme` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.isfinite(c)
def _shape_info(self):
return [_ShapeInfo("c", False, (-np.inf, np.inf), (False, False))]
def _get_support(self, c):
_b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
_a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return _a, _b
def _loglogcdf(self, x, c):
# Returns log(-log(cdf(x, c)))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
# genextreme.pdf(x, c) =
# exp(-exp(-x))*exp(-x), for c==0
# exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = _lazywhere(~((cx == 1) | (cx == -np.inf)),
(pex2, logpex2, logex2),
lambda pex2, lpex2, lex2: -pex2 + lpex2 - lex2,
fillvalue=-np.inf)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
def g(n):
return sc.gamma(n * c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = _lazywhere(c >= -1./3,
(c, g1, g2, g3, g2mg12),
lambda c, g1, g2, g3, g2gm12:
np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5,
fillvalue=np.nan)
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = _lazywhere(c >= -1./4,
(g1, g2, g3, g4, g2mg12),
lambda g1, g2, g3, g4, g2mg12:
(g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2,
fillvalue=np.nan)
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
if isinstance(data, CensoredData):
data = data._uncensor()
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super()._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
"""Inverse of the digamma function (real positive arguments only).
This function is used in the `fit` method of `gamma_gen`.
The function uses either optimize.fsolve or optimize.newton
to solve `sc.digamma(x) - y = 0`. There is probably room for
improvement, but currently it works over a wide range of y:
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> y = 64*rng.standard_normal(1000000)
>>> y.min(), y.max()
(-311.43592651416662, 351.77388222276869)
>>> x = [_digammainv(t) for t in y]
>>> np.abs(sc.digamma(x) - y).max()
1.1368683772161603e-13
"""
_em = 0.5772156649015328606065120
def func(x):
return sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
r"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is:
.. math::
f(x, a) = \frac{x^{a-1} e^{-x}}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the
gamma function.
`gamma` takes ``a`` as a shape parameter for :math:`a`.
When :math:`a` is an integer, `gamma` reduces to the Erlang
distribution, and when :math:`a=1` to the exponential distribution.
Gamma distributions are sometimes parameterized with two variables,
with a probability density function of:
.. math::
f(x, \alpha, \beta) = \frac{\beta^\alpha x^{\alpha - 1} e^{-\beta x }}{\Gamma(\alpha)}
Note that this parameterization is equivalent to the above, with
``scale = 1 / beta``.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
def _rvs(self, a, size=None, random_state=None):
return random_state.standard_gamma(a, size)
def _pdf(self, x, a):
# gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _isf(self, q, a):
return sc.gammainccinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
def regular_formula(a):
return sc.psi(a) * (1-a) + a + sc.gammaln(a)
def asymptotic_formula(a):
# plug in above formula the expansions:
# psi(a) ~ ln(a) - 1/2a - 1/12a^2 + 1/120a^4
# gammaln(a) ~ a * ln(a) - a - 1/2 * ln(a) + 1/2 ln(2 * pi) +
# 1/12a - 1/360a^3
return (0.5 * (1. + np.log(2*np.pi) + np.log(a)) - 1/(3 * a)
- (a**-2.)/12 - (a**-3.)/90 + (a**-4.)/120)
return _lazywhere(a < 250, (a, ), regular_formula,
f2=asymptotic_formula)
def _fitstart(self, data):
# The skewness of the gamma distribution is `2 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
if isinstance(data, CensoredData):
data = data._uncensor()
sk = _skew(data)
a = 4 / (1e-8 + sk**2)
return super()._fitstart(data, args=(a,))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location is fixed by using the argument `floc`
and `method='MLE'`, this
function uses explicit formulas or solves a simpler numerical
problem than the full ML optimization problem. So in that case,
the `optimizer`, `loc` and `scale` arguments are ignored.
\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
method = kwds.get('method', 'mle')
if (isinstance(data, CensoredData) or floc is None
or method.lower() == 'mm'):
# loc is not fixed or we're not doing standard MLE.
# Use the default fit method.
return super().fit(data, *args, **kwds)
# We already have this value, so just pop it from kwds.
kwds.pop('floc', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if not np.isfinite(data).all():
raise ValueError("The data contains non-finite values.")
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) +
# np.log(data).mean() = 0
s = np.log(xbar) - np.log(data).mean()
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(lambda a: np.log(a) - sc.digamma(a) - s,
xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value {!r}.'.format(a),
RuntimeWarning)
return a > 0
def _shape_info(self):
return [_ShapeInfo("a", True, (1, np.inf), (True, False))]
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
if isinstance(data, CensoredData):
data = data._uncensor()
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
@extend_notes_in_docstring(rv_continuous, notes="""\
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.""")
def fit(self, data, *args, **kwds):
return super().fit(data, *args, **kwds)
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
r"""A generalized gamma continuous random variable.
%(before_notes)s
See Also
--------
gamma, invgamma, weibull_min
Notes
-----
The probability density function for `gengamma` is ([1]_):
.. math::
f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`.
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gengamma` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
.. [1] E.W. Stacy, "A Generalization of the Gamma Distribution",
Annals of Mathematical Statistics, Vol 33(3), pp. 1187--1192.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ic = _ShapeInfo("c", False, (-np.inf, np.inf), (False, False))
return [ia, ic]
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return _lazywhere((x != 0) | (c > 0), (x, c),
lambda x, c: (np.log(abs(c)) + sc.xlogy(c*a - 1, x)
- x**c - sc.gammaln(a)),
fillvalue=-np.inf)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _rvs(self, a, c, size=None, random_state=None):
r = random_state.standard_gamma(a, size=size)
return r**(1./c)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
def regular(a, c):
val = sc.psi(a)
A = a * (1 - val) + val / c
B = sc.gammaln(a) - np.log(abs(c))
h = A + B
return h
def asymptotic(a, c):
# using asymptotic expansions for gammaln and psi (see gh-18093)
return (norm._entropy() - np.log(a)/2
- np.log(np.abs(c)) + (a**-1.)/6 - (a**-3.)/90
+ (np.log(a) - (a**-1.)/2 - (a**-2.)/12 + (a**-4.)/120)/c)
h = _lazywhere(a >= 2e2, (a, c), f=asymptotic, f2=regular)
return h
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
r"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is:
.. math::
f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2}
for :math:`0 \le x \le 1/c`, and :math:`c > 0`.
`genhalflogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _get_support(self, c):
return self.a, 1.0/c
def _pdf(self, x, c):
# genhalflogistic.pdf(x, c) =
# 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class genhyperbolic_gen(rv_continuous):
r"""A generalized hyperbolic continuous random variable.
%(before_notes)s
See Also
--------
t, norminvgauss, geninvgauss, laplace, cauchy
Notes
-----
The probability density function for `genhyperbolic` is:
.. math::
f(x, p, a, b) =
\frac{(a^2 - b^2)^{p/2}}
{\sqrt{2\pi}a^{p-1/2}
K_p\Big(\sqrt{a^2 - b^2}\Big)}
e^{bx} \times \frac{K_{p - 1/2}
(a \sqrt{1 + x^2})}
{(\sqrt{1 + x^2})^{1/2 - p}}
for :math:`x, p \in ( - \infty; \infty)`,
:math:`|b| < a` if :math:`p \ge 0`,
:math:`|b| \le a` if :math:`p < 0`.
:math:`K_{p}(.)` denotes the modified Bessel function of the second
kind and order :math:`p` (`scipy.special.kv`)
`genhyperbolic` takes ``p`` as a tail parameter,
``a`` as a shape parameter,
``b`` as a skewness parameter.
%(after_notes)s
The original parameterization of the Generalized Hyperbolic Distribution
is found in [1]_ as follows
.. math::
f(x, \lambda, \alpha, \beta, \delta, \mu) =
\frac{(\gamma/\delta)^\lambda}{\sqrt{2\pi}K_\lambda(\delta \gamma)}
e^{\beta (x - \mu)} \times \frac{K_{\lambda - 1/2}
(\alpha \sqrt{\delta^2 + (x - \mu)^2})}
{(\sqrt{\delta^2 + (x - \mu)^2} / \alpha)^{1/2 - \lambda}}
for :math:`x \in ( - \infty; \infty)`,
:math:`\gamma := \sqrt{\alpha^2 - \beta^2}`,
:math:`\lambda, \mu \in ( - \infty; \infty)`,
:math:`\delta \ge 0, |\beta| < \alpha` if :math:`\lambda \ge 0`,
:math:`\delta > 0, |\beta| \le \alpha` if :math:`\lambda < 0`.
The location-scale-based parameterization implemented in
SciPy is based on [2]_, where :math:`a = \alpha\delta`,
:math:`b = \beta\delta`, :math:`p = \lambda`,
:math:`scale=\delta` and :math:`loc=\mu`
Moments are implemented based on [3]_ and [4]_.
For the distributions that are a special case such as Student's t,
it is not recommended to rely on the implementation of genhyperbolic.
To avoid potential numerical problems and for performance reasons,
the methods of the specific distributions should be used.
References
----------
.. [1] O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions
on Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978. https://www.jstor.org/stable/4615705
.. [2] Eberlein E., Prause K. (2002) The Generalized Hyperbolic Model:
Financial Derivatives and Risk Measures. In: Geman H., Madan D.,
Pliska S.R., Vorst T. (eds) Mathematical Finance - Bachelier
Congress 2000. Springer Finance. Springer, Berlin, Heidelberg.
:doi:`10.1007/978-3-662-12429-1_12`
.. [3] Scott, David J, Würtz, Diethelm, Dong, Christine and Tran,
Thanh Tam, (2009), Moments of the generalized hyperbolic
distribution, MPRA Paper, University Library of Munich, Germany,
https://EconPapers.repec.org/RePEc:pra:mprapa:19081.
.. [4] E. Eberlein and E. A. von Hammerstein. Generalized hyperbolic
and inverse Gaussian distributions: Limiting cases and approximation
of processes. FDM Preprint 80, April 2003. University of Freiburg.
https://freidok.uni-freiburg.de/fedora/objects/freidok:7974/datastreams/FILE1/content
%(example)s
"""
def _argcheck(self, p, a, b):
return (np.logical_and(np.abs(b) < a, p >= 0)
| np.logical_and(np.abs(b) <= a, p < 0))
def _shape_info(self):
ip = _ShapeInfo("p", False, (-np.inf, np.inf), (False, False))
ia = _ShapeInfo("a", False, (0, np.inf), (True, False))
ib = _ShapeInfo("b", False, (-np.inf, np.inf), (False, False))
return [ip, ia, ib]
def _fitstart(self, data):
# Arbitrary, but the default p = a = b = 1 is not valid; the
# distribution requires |b| < a if p >= 0.
return super()._fitstart(data, args=(1, 1, 0.5))
def _logpdf(self, x, p, a, b):
# kve instead of kv works better for large values of p
# and smaller values of sqrt(a^2 - b^2)
@np.vectorize
def _logpdf_single(x, p, a, b):
return _stats.genhyperbolic_logpdf(x, p, a, b)
return _logpdf_single(x, p, a, b)
def _pdf(self, x, p, a, b):
# kve instead of kv works better for large values of p
# and smaller values of sqrt(a^2 - b^2)
@np.vectorize
def _pdf_single(x, p, a, b):
return _stats.genhyperbolic_pdf(x, p, a, b)
return _pdf_single(x, p, a, b)
# np.vectorize isn't currently designed to be used as a decorator,
# so use a lambda instead. This allows us to decorate the function
# with `np.vectorize` and still provide the `otypes` parameter.
# The first argument to `vectorize` is `func.__get__(object)` for
# compatibility with Python 3.9. In Python 3.10, this can be
# simplified to just `func`.
@lambda func: np.vectorize(func.__get__(object), otypes=[np.float64])
@staticmethod
def _integrate_pdf(x0, x1, p, a, b):
"""
Integrate the pdf of the genhyberbolic distribution from x0 to x1.
This is a private function used by _cdf() and _sf() only; either x0
will be -inf or x1 will be inf.
"""
user_data = np.array([p, a, b], float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, '_genhyperbolic_pdf',
user_data)
d = np.sqrt((a + b)*(a - b))
mean = b/d * sc.kv(p + 1, d) / sc.kv(p, d)
epsrel = 1e-10
epsabs = 0
if x0 < mean < x1:
# If the interval includes the mean, integrate over the two
# intervals [x0, mean] and [mean, x1] and add. If we try to do
# the integral in one call of quad and the non-infinite endpoint
# is far in the tail, quad might return an incorrect result
# because it does not "see" the peak of the PDF.
intgrl = (integrate.quad(llc, x0, mean,
epsrel=epsrel, epsabs=epsabs)[0]
+ integrate.quad(llc, mean, x1,
epsrel=epsrel, epsabs=epsabs)[0])
else:
intgrl = integrate.quad(llc, x0, x1,
epsrel=epsrel, epsabs=epsabs)[0]
if np.isnan(intgrl):
msg = ("Infinite values encountered in scipy.special.kve. "
"Values replaced by NaN to avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
return max(0.0, min(1.0, intgrl))
def _cdf(self, x, p, a, b):
return self._integrate_pdf(-np.inf, x, p, a, b)
def _sf(self, x, p, a, b):
return self._integrate_pdf(x, np.inf, p, a, b)
def _rvs(self, p, a, b, size=None, random_state=None):
# note: X = b * V + sqrt(V) * X has a
# generalized hyperbolic distribution
# if X is standard normal and V is
# geninvgauss(p = p, b = t2, loc = loc, scale = t3)
t1 = np.float_power(a, 2) - np.float_power(b, 2)
# b in the GIG
t2 = np.float_power(t1, 0.5)
# scale in the GIG
t3 = np.float_power(t1, - 0.5)
gig = geninvgauss.rvs(
p=p,
b=t2,
scale=t3,
size=size,
random_state=random_state
)
normst = norm.rvs(size=size, random_state=random_state)
return b * gig + np.sqrt(gig) * normst
def _stats(self, p, a, b):
# https://mpra.ub.uni-muenchen.de/19081/1/MPRA_paper_19081.pdf
# https://freidok.uni-freiburg.de/fedora/objects/freidok:7974/datastreams/FILE1/content
# standardized moments
p, a, b = np.broadcast_arrays(p, a, b)
t1 = np.float_power(a, 2) - np.float_power(b, 2)
t1 = np.float_power(t1, 0.5)
t2 = np.float_power(1, 2) * np.float_power(t1, - 1)
integers = np.linspace(0, 4, 5)
# make integers perpendicular to existing dimensions
integers = integers.reshape(integers.shape + (1,) * p.ndim)
b0, b1, b2, b3, b4 = sc.kv(p + integers, t1)
r1, r2, r3, r4 = (b / b0 for b in (b1, b2, b3, b4))
m = b * t2 * r1
v = (
t2 * r1 + np.float_power(b, 2) * np.float_power(t2, 2) *
(r2 - np.float_power(r1, 2))
)
m3e = (
np.float_power(b, 3) * np.float_power(t2, 3) *
(r3 - 3 * b2 * b1 * np.float_power(b0, -2) +
2 * np.float_power(r1, 3)) +
3 * b * np.float_power(t2, 2) *
(r2 - np.float_power(r1, 2))
)
s = m3e * np.float_power(v, - 3 / 2)
m4e = (
np.float_power(b, 4) * np.float_power(t2, 4) *
(r4 - 4 * b3 * b1 * np.float_power(b0, - 2) +
6 * b2 * np.float_power(b1, 2) * np.float_power(b0, - 3) -
3 * np.float_power(r1, 4)) +
np.float_power(b, 2) * np.float_power(t2, 3) *
(6 * r3 - 12 * b2 * b1 * np.float_power(b0, - 2) +
6 * np.float_power(r1, 3)) +
3 * np.float_power(t2, 2) * r2
)
k = m4e * np.float_power(v, -2) - 3
return m, v, s, k
genhyperbolic = genhyperbolic_gen(name='genhyperbolic')
class gompertz_gen(rv_continuous):
r"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is:
.. math::
f(x, c) = c \exp(x) \exp(-c (e^x-1))
for :math:`x \ge 0`, :math:`c > 0`.
`gompertz` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _sf(self, x, c):
return np.exp(-c * sc.expm1(x))
def _isf(self, p, c):
return sc.log1p(-np.log(p)/c)
def _entropy(self, c):
return 1.0 - np.log(c) - sc._ufuncs._scaled_exp1(c)/c
gompertz = gompertz_gen(a=0.0, name='gompertz')
def _average_with_log_weights(x, logweights):
x = np.asarray(x)
logweights = np.asarray(logweights)
maxlogw = logweights.max()
weights = np.exp(logweights - maxlogw)
return np.average(x, weights=weights)
class gumbel_r_gen(rv_continuous):
r"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is:
.. math::
f(x) = \exp(-(x + e^{-x}))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# gumbel_r.pdf(x) = exp(-(x + exp(-x)))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _sf(self, x):
return -sc.expm1(-np.exp(-x))
def _isf(self, p):
return -np.log(-np.log1p(-p))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# https://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# By the method of maximum likelihood, the estimators of the
# location and scale are the roots of the equations defined in
# `func` and the value of the expression for `loc` that follows.
# The first `func` is a first order derivative of the log-likelihood
# equation and the second is from Source: Statistical Distributions,
# 3rd Edition. Evans, Hastings, and Peacock (2000), Page 101.
def get_loc_from_scale(scale):
return -scale * (sc.logsumexp(-data / scale) - np.log(len(data)))
if fscale is not None:
# if the scale is fixed, the location can be analytically
# determined.
scale = fscale
loc = get_loc_from_scale(scale)
else:
# A different function is solved depending on whether the location
# is fixed.
if floc is not None:
loc = floc
# equation to use if the location is fixed.
# note that one cannot use the equation in Evans, Hastings,
# and Peacock (2000) (since it assumes that the derivative
# w.r.t. the log-likelihood is zero). however, it is easy to
# derive the MLE condition directly if loc is fixed
def func(scale):
term1 = (loc - data) * np.exp((loc - data) / scale) + data
term2 = len(data) * (loc + scale)
return term1.sum() - term2
else:
# equation to use if both location and scale are free
def func(scale):
sdata = -data / scale
wavg = _average_with_log_weights(data, logweights=sdata)
return data.mean() - wavg - scale
# set brackets for `root_scalar` to use when optimizing over the
# scale such that a root is likely between them. Use user supplied
# guess or default 1.
brack_start = kwds.get('scale', 1)
lbrack, rbrack = brack_start / 2, brack_start * 2
# if a root is not between the brackets, iteratively expand them
# until they include a sign change, checking after each bracket is
# modified.
def interval_contains_root(lbrack, rbrack):
# return true if the signs disagree.
return (np.sign(func(lbrack)) !=
np.sign(func(rbrack)))
while (not interval_contains_root(lbrack, rbrack)
and (lbrack > 0 or rbrack < np.inf)):
lbrack /= 2
rbrack *= 2
res = optimize.root_scalar(func, bracket=(lbrack, rbrack),
rtol=1e-14, xtol=1e-14)
scale = res.root
loc = floc if floc is not None else get_loc_from_scale(scale)
return loc, scale
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
r"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is:
.. math::
f(x) = \exp(x - e^x)
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# gumbel_l.pdf(x) = exp(x - exp(x))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
# The fit method of `gumbel_r` can be used for this distribution with
# small modifications. The process to do this is
# 1. pass the sign negated data into `gumbel_r.fit`
# - if the location is fixed, it should also be negated.
# 2. negate the sign of the resulting location, leaving the scale
# unmodified.
# `gumbel_r.fit` holds necessary input checks.
if kwds.get('floc') is not None:
kwds['floc'] = -kwds['floc']
loc_r, scale_r, = gumbel_r.fit(-np.asarray(data), *args, **kwds)
return -loc_r, scale_r
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
r"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is:
.. math::
f(x) = \frac{2}{\pi (1 + x^2)}
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _sf(self, x):
return 2.0/np.pi * np.arctan2(1, x)
def _isf(self, p):
return 1.0/np.tan(np.pi*p/2)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# location is independent from the scale
data_min = np.min(data)
if floc is not None:
if data_min < floc:
# There are values that are less than the specified loc.
raise FitDataError("halfcauchy", lower=floc, upper=np.inf)
loc = floc
else:
# if not provided, location MLE is the minimal data point
loc = data_min
# find scale
def find_scale(loc, data):
shifted_data = data - loc
n = data.size
shifted_data_squared = np.square(shifted_data)
def fun_to_solve(scale):
denominator = scale**2 + shifted_data_squared
return 2 * np.sum(shifted_data_squared/denominator) - n
small = np.finfo(1.0).tiny**0.5 # avoid underflow
res = root_scalar(fun_to_solve, bracket=(small, np.max(shifted_data)))
return res.root
if fscale is not None:
scale = fscale
else:
scale = find_scale(loc, data)
return loc, scale
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
r"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is:
.. math::
f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 }
= \frac{1}{2} \text{sech}(x/2)^2
for :math:`x \ge 0`.
%(after_notes)s
References
----------
.. [1] Asgharzadeh et al (2011). "Comparisons of Methods of Estimation for the
Half-Logistic Distribution". Selcuk J. Appl. Math. 93-108.
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
# = 1/2 * sech(x/2)**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _sf(self, x):
return 2 * sc.expit(-x)
def _isf(self, q):
return _lazywhere(q < 0.5, (q, ),
lambda q: -sc.logit(0.5 * q),
f2=lambda q: 2*np.arctanh(1 - q))
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
def find_scale(data, loc):
# scale is solution to a fix point problem ([1] 2.6)
# use approximate MLE as starting point ([1] 3.1)
n_observations = data.shape[0]
sorted_data = np.sort(data, axis=0)
p = np.arange(1, n_observations + 1)/(n_observations + 1)
q = 1 - p
pp1 = 1 + p
alpha = p - 0.5 * q * pp1 * np.log(pp1 / q)
beta = 0.5 * q * pp1
sorted_data = sorted_data - loc
B = 2 * np.sum(alpha[1:] * sorted_data[1:])
C = 2 * np.sum(beta[1:] * sorted_data[1:]**2)
# starting guess
scale = ((B + np.sqrt(B**2 + 8 * n_observations * C))
/(4 * n_observations))
# relative tolerance of fix point iterator
rtol = 1e-8
relative_residual = 1
shifted_mean = sorted_data.mean() # y_mean - y_min
# find fix point by repeated application of eq. (2.6)
# simplify as
# exp(-x) / (1 + exp(-x)) = 1 / (1 + exp(x))
# = expit(-x))
while relative_residual > rtol:
sum_term = sorted_data * sc.expit(-sorted_data/scale)
scale_new = shifted_mean - 2/n_observations * sum_term.sum()
relative_residual = abs((scale - scale_new)/scale)
scale = scale_new
return scale
# location is independent from the scale
data_min = np.min(data)
if floc is not None:
if data_min < floc:
# There are values that are less than the specified loc.
raise FitDataError("halflogistic", lower=floc, upper=np.inf)
loc = floc
else:
# if not provided, location MLE is the minimal data point
loc = data_min
# scale depends on location
scale = fscale if fscale is not None else find_scale(data, loc)
return loc, scale
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
r"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is:
.. math::
f(x) = \sqrt{2/\pi} \exp(-x^2 / 2)
for :math:`x >= 0`.
`halfnorm` is a special case of `chi` with ``df=1``.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return abs(random_state.standard_normal(size=size))
def _pdf(self, x):
# halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return sc.erf(x / np.sqrt(2))
def _ppf(self, q):
return _norm_ppf((1+q)/2.0)
def _sf(self, x):
return 2 * _norm_sf(x)
def _isf(self, p):
return _norm_isf(p/2)
def _stats(self):
return (np.sqrt(2.0/np.pi),
1-2.0/np.pi,
np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
data_min = np.min(data)
if floc is not None:
if data_min < floc:
# There are values that are less than the specified loc.
raise FitDataError("halfnorm", lower=floc, upper=np.inf)
loc = floc
else:
loc = data_min
if fscale is not None:
scale = fscale
else:
scale = stats.moment(data, moment=2, center=loc)**0.5
return loc, scale
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
r"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is:
.. math::
f(x) = \frac{1}{\pi} \text{sech}(x)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# hypsecant.pdf(x) = 1/pi * sech(x)
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _sf(self, x):
return 2.0/np.pi*np.arctan(np.exp(-x))
def _isf(self, q):
return -np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
r"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is:
.. math::
f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c}
for :math:`0 \le x \le 1`, :math:`a,b > 0`, :math:`c` a real number,
:math:`z > -1`, and :math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`.
:math:`F[2, 1]` is the Gauss hypergeometric function
`scipy.special.hyp2f1`.
`gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape
parameters.
%(after_notes)s
References
----------
.. [1] Armero, C., and M. J. Bayarri. "Prior Assessments for Prediction in
Queues." *Journal of the Royal Statistical Society*. Series D (The
Statistician) 43, no. 1 (1994): 139-53. doi:10.2307/2348939
%(example)s
"""
def _argcheck(self, a, b, c, z):
# z > -1 per gh-10134
return (a > 0) & (b > 0) & (c == c) & (z > -1)
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
ic = _ShapeInfo("c", False, (-np.inf, np.inf), (False, False))
iz = _ShapeInfo("z", False, (-1, np.inf), (False, False))
return [ia, ib, ic, iz]
def _pdf(self, x, a, b, c, z):
# gausshyper.pdf(x, a, b, c, z) =
# C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
r"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is:
.. math::
f(x, a) = \frac{x^{-a-1}}{\Gamma(a)} \exp(-\frac{1}{x})
for :math:`x >= 0`, :math:`a > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`invgamma` takes ``a`` as a shape parameter for :math:`a`.
`invgamma` is a special case of `gengamma` with ``c=-1``, and it is a
different parameterization of the scaled inverse chi-squared distribution.
Specifically, if the scaled inverse chi-squared distribution is
parameterized with degrees of freedom :math:`\nu` and scaling parameter
:math:`\tau^2`, then it can be modeled using `invgamma` with
``a=`` :math:`\nu/2` and ``scale=`` :math:`\nu \tau^2/2`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, a):
# invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
def regular(a):
h = a - (a + 1.0) * sc.psi(a) + sc.gammaln(a)
return h
def asymptotic(a):
# gammaln(a) ~ a * ln(a) - a - 0.5 * ln(a) + 0.5 * ln(2 * pi)
# psi(a) ~ ln(a) - 1 / (2 * a)
h = ((1 - 3*np.log(a) + np.log(2) + np.log(np.pi))/2
+ 2/3*a**-1. + a**-2./12 - a**-3./90 - a**-4./120)
return h
h = _lazywhere(a >= 2e2, (a,), f=asymptotic, f2=regular)
return h
invgamma = invgamma_gen(a=0.0, name='invgamma')
class invgauss_gen(rv_continuous):
r"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}}
\exp(-\frac{(x-\mu)^2}{2 x \mu^2})
for :math:`x >= 0` and :math:`\mu > 0`.
`invgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return [_ShapeInfo("mu", False, (0, np.inf), (False, False))]
def _rvs(self, mu, size=None, random_state=None):
return random_state.wald(mu, 1.0, size=size)
def _pdf(self, x, mu):
# invgauss.pdf(x, mu) =
# 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
# approach adapted from equations in
# https://journal.r-project.org/archive/2016-1/giner-smyth.pdf,
# not R code. see gh-13616
def _logcdf(self, x, mu):
fac = 1 / np.sqrt(x)
a = _norm_logcdf(fac * ((x / mu) - 1))
b = 2 / mu + _norm_logcdf(-fac * ((x / mu) + 1))
return a + np.log1p(np.exp(b - a))
def _logsf(self, x, mu):
fac = 1 / np.sqrt(x)
a = _norm_logsf(fac * ((x / mu) - 1))
b = 2 / mu + _norm_logcdf(-fac * (x + mu) / mu)
return a + np.log1p(-np.exp(b - a))
def _sf(self, x, mu):
return np.exp(self._logsf(x, mu))
def _cdf(self, x, mu):
return np.exp(self._logcdf(x, mu))
def _ppf(self, x, mu):
with np.errstate(divide='ignore', over='ignore', invalid='ignore'):
x, mu = np.broadcast_arrays(x, mu)
ppf = _boost._invgauss_ppf(x, mu, 1)
i_wt = x > 0.5 # "wrong tail" - sometimes too inaccurate
ppf[i_wt] = _boost._invgauss_isf(1-x[i_wt], mu[i_wt], 1)
i_nan = np.isnan(ppf)
ppf[i_nan] = super()._ppf(x[i_nan], mu[i_nan])
return ppf
def _isf(self, x, mu):
with np.errstate(divide='ignore', over='ignore', invalid='ignore'):
x, mu = np.broadcast_arrays(x, mu)
isf = _boost._invgauss_isf(x, mu, 1)
i_wt = x > 0.5 # "wrong tail" - sometimes too inaccurate
isf[i_wt] = _boost._invgauss_ppf(1-x[i_wt], mu[i_wt], 1)
i_nan = np.isnan(isf)
isf[i_nan] = super()._isf(x[i_nan], mu[i_nan])
return isf
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
method = kwds.get('method', 'mle')
if (isinstance(data, CensoredData) or type(self) == wald_gen
or method.lower() == 'mm'):
return super().fit(data, *args, **kwds)
data, fshape_s, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
'''
Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
and Peacock (2000), Page 121. Their shape parameter is equivilent to
SciPy's with the conversion `fshape_s = fshape / scale`.
MLE formulas are not used in 3 condtions:
- `loc` is not fixed
- `mu` is fixed
These cases fall back on the superclass fit method.
- `loc` is fixed but translation results in negative data raises
a `FitDataError`.
'''
if floc is None or fshape_s is not None:
return super().fit(data, *args, **kwds)
elif np.any(data - floc < 0):
raise FitDataError("invgauss", lower=0, upper=np.inf)
else:
data = data - floc
fshape_n = np.mean(data)
if fscale is None:
fscale = len(data) / (np.sum(data ** -1 - fshape_n ** -1))
fshape_s = fshape_n / fscale
return fshape_s, floc, fscale
def _entropy(self, mu):
"""
Ref.: https://moser-isi.ethz.ch/docs/papers/smos-2012-10.pdf (eq. 9)
"""
# a = log(2*pi*e*mu**3)
# = 1 + log(2*pi) + 3 * log(mu)
a = 1. + np.log(2 * np.pi) + 3 * np.log(mu)
# b = exp(2/mu) * exp1(2/mu)
# = _scaled_exp1(2/mu) / (2/mu)
r = 2/mu
b = sc._ufuncs._scaled_exp1(r)/r
return 0.5 * a - 1.5 * b
invgauss = invgauss_gen(a=0.0, name='invgauss')
class geninvgauss_gen(rv_continuous):
r"""A Generalized Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `geninvgauss` is:
.. math::
f(x, p, b) = x^{p-1} \exp(-b (x + 1/x) / 2) / (2 K_p(b))
where `x > 0`, `p` is a real number and `b > 0`([1]_).
:math:`K_p` is the modified Bessel function of second kind of order `p`
(`scipy.special.kv`).
%(after_notes)s
The inverse Gaussian distribution `stats.invgauss(mu)` is a special case of
`geninvgauss` with `p = -1/2`, `b = 1 / mu` and `scale = mu`.
Generating random variates is challenging for this distribution. The
implementation is based on [2]_.
References
----------
.. [1] O. Barndorff-Nielsen, P. Blaesild, C. Halgreen, "First hitting time
models for the generalized inverse gaussian distribution",
Stochastic Processes and their Applications 7, pp. 49--54, 1978.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
%(example)s
"""
def _argcheck(self, p, b):
return (p == p) & (b > 0)
def _shape_info(self):
ip = _ShapeInfo("p", False, (-np.inf, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ip, ib]
def _logpdf(self, x, p, b):
# kve instead of kv works better for large values of b
# warn if kve produces infinite values and replace by nan
# otherwise c = -inf and the results are often incorrect
def logpdf_single(x, p, b):
return _stats.geninvgauss_logpdf(x, p, b)
logpdf_single = np.vectorize(logpdf_single, otypes=[np.float64])
z = logpdf_single(x, p, b)
if np.isnan(z).any():
msg = ("Infinite values encountered in scipy.special.kve(p, b). "
"Values replaced by NaN to avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
return z
def _pdf(self, x, p, b):
# relying on logpdf avoids overflow of x**(p-1) for large x and p
return np.exp(self._logpdf(x, p, b))
def _cdf(self, x, *args):
_a, _b = self._get_support(*args)
def _cdf_single(x, *args):
p, b = args
user_data = np.array([p, b], float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, '_geninvgauss_pdf',
user_data)
return integrate.quad(llc, _a, x)[0]
_cdf_single = np.vectorize(_cdf_single, otypes=[np.float64])
return _cdf_single(x, *args)
def _logquasipdf(self, x, p, b):
# log of the quasi-density (w/o normalizing constant) used in _rvs
return _lazywhere(x > 0, (x, p, b),
lambda x, p, b: (p - 1)*np.log(x) - b*(x + 1/x)/2,
-np.inf)
def _rvs(self, p, b, size=None, random_state=None):
# if p and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(p) and np.isscalar(b):
out = self._rvs_scalar(p, b, size, random_state)
elif p.size == 1 and b.size == 1:
out = self._rvs_scalar(p.item(), b.item(), size, random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
p, b = np.broadcast_arrays(p, b)
# p and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting p and b.
# bc is a tuple the same lenth as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(p.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in the
# loop below.
out = np.empty(size)
it = np.nditer([p, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples,
random_state).reshape(shp)
it.iternext()
if size == ():
out = out.item()
return out
def _rvs_scalar(self, p, b, numsamples, random_state):
# following [2], the quasi-pdf is used instead of the pdf for the
# generation of rvs
invert_res = False
if not numsamples:
numsamples = 1
if p < 0:
# note: if X is geninvgauss(p, b), then 1/X is geninvgauss(-p, b)
p = -p
invert_res = True
m = self._mode(p, b)
# determine method to be used following [2]
ratio_unif = True
if p >= 1 or b > 1:
# ratio of uniforms with mode shift below
mode_shift = True
elif b >= min(0.5, 2 * np.sqrt(1 - p) / 3):
# ratio of uniforms without mode shift below
mode_shift = False
else:
# new algorithm in [2]
ratio_unif = False
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
x = np.zeros(N)
simulated = 0
if ratio_unif:
# use ratio of uniforms method
if mode_shift:
a2 = -2 * (p + 1) / b - m
a1 = 2 * m * (p - 1) / b - 1
# find roots of x**3 + a2*x**2 + a1*x + m (Cardano's formula)
p1 = a1 - a2**2 / 3
q1 = 2 * a2**3 / 27 - a2 * a1 / 3 + m
phi = np.arccos(-q1 * np.sqrt(-27 / p1**3) / 2)
s1 = -np.sqrt(-4 * p1 / 3)
root1 = s1 * np.cos(phi / 3 + np.pi / 3) - a2 / 3
root2 = -s1 * np.cos(phi / 3) - a2 / 3
# root3 = s1 * np.cos(phi / 3 - np.pi / 3) - a2 / 3
# if g is the quasipdf, rescale: g(x) / g(m) which we can write
# as exp(log(g(x)) - log(g(m))). This is important
# since for large values of p and b, g cannot be evaluated.
# denote the rescaled quasipdf by h
lm = self._logquasipdf(m, p, b)
d1 = self._logquasipdf(root1, p, b) - lm
d2 = self._logquasipdf(root2, p, b) - lm
# compute the bounding rectangle w.r.t. h. Note that
# np.exp(0.5*d1) = np.sqrt(g(root1)/g(m)) = np.sqrt(h(root1))
vmin = (root1 - m) * np.exp(0.5 * d1)
vmax = (root2 - m) * np.exp(0.5 * d2)
umax = 1 # umax = sqrt(h(m)) = 1
def logqpdf(x):
return self._logquasipdf(x, p, b) - lm
c = m
else:
# ratio of uniforms without mode shift
# compute np.sqrt(quasipdf(m))
umax = np.exp(0.5*self._logquasipdf(m, p, b))
xplus = ((1 + p) + np.sqrt((1 + p)**2 + b**2))/b
vmin = 0
# compute xplus * np.sqrt(quasipdf(xplus))
vmax = xplus * np.exp(0.5 * self._logquasipdf(xplus, p, b))
c = 0
def logqpdf(x):
return self._logquasipdf(x, p, b)
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
i = 1
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u = umax * random_state.uniform(size=k)
v = random_state.uniform(size=k)
v = vmin + (vmax - vmin) * v
rvs = v / u + c
# rewrite acceptance condition u**2 <= pdf(rvs) by taking logs
accept = (2*np.log(u) <= logqpdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated "
"in {} attempts. Sampling does not appear to "
"work for the provided parameters.".format(i*N))
raise RuntimeError(msg)
i += 1
else:
# use new algorithm in [2]
x0 = b / (1 - p)
xs = np.max((x0, 2 / b))
k1 = np.exp(self._logquasipdf(m, p, b))
A1 = k1 * x0
if x0 < 2 / b:
k2 = np.exp(-b)
if p > 0:
A2 = k2 * ((2 / b)**p - x0**p) / p
else:
A2 = k2 * np.log(2 / b**2)
else:
k2, A2 = 0, 0
k3 = xs**(p - 1)
A3 = 2 * k3 * np.exp(-xs * b / 2) / b
A = A1 + A2 + A3
# [2]: rejection constant is < 2.73; so expected runtime is finite
while simulated < N:
k = N - simulated
h, rvs = np.zeros(k), np.zeros(k)
# simulate uniform rvs on [x1, x2] and [0, y2]
u = random_state.uniform(size=k)
v = A * random_state.uniform(size=k)
cond1 = v <= A1
cond2 = np.logical_not(cond1) & (v <= A1 + A2)
cond3 = np.logical_not(cond1 | cond2)
# subdomain (0, x0)
rvs[cond1] = x0 * v[cond1] / A1
h[cond1] = k1
# subdomain (x0, 2 / b)
if p > 0:
rvs[cond2] = (x0**p + (v[cond2] - A1) * p / k2)**(1 / p)
else:
rvs[cond2] = b * np.exp((v[cond2] - A1) * np.exp(b))
h[cond2] = k2 * rvs[cond2]**(p - 1)
# subdomain (xs, infinity)
z = np.exp(-xs * b / 2) - b * (v[cond3] - A1 - A2) / (2 * k3)
rvs[cond3] = -2 / b * np.log(z)
h[cond3] = k3 * np.exp(-rvs[cond3] * b / 2)
# apply rejection method
accept = (np.log(u * h) <= self._logquasipdf(rvs, p, b))
num_accept = sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
rvs = np.reshape(x, size1d)
if invert_res:
rvs = 1 / rvs
return rvs
def _mode(self, p, b):
# distinguish cases to avoid catastrophic cancellation (see [2])
if p < 1:
return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p)
else:
return (np.sqrt((1 - p)**2 + b**2) - (1 - p)) / b
def _munp(self, n, p, b):
num = sc.kve(p + n, b)
denom = sc.kve(p, b)
inf_vals = np.isinf(num) | np.isinf(denom)
if inf_vals.any():
msg = ("Infinite values encountered in the moment calculation "
"involving scipy.special.kve. Values replaced by NaN to "
"avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
m = np.full_like(num, np.nan, dtype=np.double)
m[~inf_vals] = num[~inf_vals] / denom[~inf_vals]
else:
m = num / denom
return m
geninvgauss = geninvgauss_gen(a=0.0, name="geninvgauss")
class norminvgauss_gen(rv_continuous):
r"""A Normal Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `norminvgauss` is:
.. math::
f(x, a, b) = \frac{a \, K_1(a \sqrt{1 + x^2})}{\pi \sqrt{1 + x^2}} \,
\exp(\sqrt{a^2 - b^2} + b x)
where :math:`x` is a real number, the parameter :math:`a` is the tail
heaviness and :math:`b` is the asymmetry parameter satisfying
:math:`a > 0` and :math:`|b| <= a`.
:math:`K_1` is the modified Bessel function of second kind
(`scipy.special.k1`).
%(after_notes)s
A normal inverse Gaussian random variable `Y` with parameters `a` and `b`
can be expressed as a normal mean-variance mixture:
`Y = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is
`invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used
to generate random variates.
Another common parametrization of the distribution (see Equation 2.1 in
[2]_) is given by the following expression of the pdf:
.. math::
g(x, \alpha, \beta, \delta, \mu) =
\frac{\alpha\delta K_1\left(\alpha\sqrt{\delta^2 + (x - \mu)^2}\right)}
{\pi \sqrt{\delta^2 + (x - \mu)^2}} \,
e^{\delta \sqrt{\alpha^2 - \beta^2} + \beta (x - \mu)}
In SciPy, this corresponds to
`a = alpha * delta, b = beta * delta, loc = mu, scale=delta`.
References
----------
.. [1] O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978.
.. [2] O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and
Stochastic Volatility Modelling", Scandinavian Journal of
Statistics, Vol. 24, pp. 1-13, 1997.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (a > 0) & (np.absolute(b) < a)
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (-np.inf, np.inf), (False, False))
return [ia, ib]
def _fitstart(self, data):
# Arbitrary, but the default a = b = 1 is not valid; the distribution
# requires |b| < a.
return super()._fitstart(data, args=(1, 0.5))
def _pdf(self, x, a, b):
gamma = np.sqrt(a**2 - b**2)
fac1 = a / np.pi * np.exp(gamma)
sq = np.hypot(1, x) # reduce overflows
return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq
def _sf(self, x, a, b):
if np.isscalar(x):
# If x is a scalar, then so are a and b.
return integrate.quad(self._pdf, x, np.inf, args=(a, b))[0]
else:
a = np.atleast_1d(a)
b = np.atleast_1d(b)
result = []
for (x0, a0, b0) in zip(x, a, b):
result.append(integrate.quad(self._pdf, x0, np.inf,
args=(a0, b0))[0])
return np.array(result)
def _isf(self, q, a, b):
def _isf_scalar(q, a, b):
def eq(x, a, b, q):
# Solve eq(x, a, b, q) = 0 to obtain isf(x, a, b) = q.
return self._sf(x, a, b) - q
# Find a bracketing interval for the root.
# Start at the mean, and grow the length of the interval
# by 2 each iteration until there is a sign change in eq.
xm = self.mean(a, b)
em = eq(xm, a, b, q)
if em == 0:
# Unlikely, but might as well check.
return xm
if em > 0:
delta = 1
left = xm
right = xm + delta
while eq(right, a, b, q) > 0:
delta = 2*delta
right = xm + delta
else:
# em < 0
delta = 1
right = xm
left = xm - delta
while eq(left, a, b, q) < 0:
delta = 2*delta
left = xm - delta
result = optimize.brentq(eq, left, right, args=(a, b, q),
xtol=self.xtol)
return result
if np.isscalar(q):
return _isf_scalar(q, a, b)
else:
result = []
for (q0, a0, b0) in zip(q, a, b):
result.append(_isf_scalar(q0, a0, b0))
return np.array(result)
def _rvs(self, a, b, size=None, random_state=None):
# note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
# normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
gamma = np.sqrt(a**2 - b**2)
ig = invgauss.rvs(mu=1/gamma, size=size, random_state=random_state)
return b * ig + np.sqrt(ig) * norm.rvs(size=size,
random_state=random_state)
def _stats(self, a, b):
gamma = np.sqrt(a**2 - b**2)
mean = b / gamma
variance = a**2 / gamma**3
skewness = 3.0 * b / (a * np.sqrt(gamma))
kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
return mean, variance, skewness, kurtosis
norminvgauss = norminvgauss_gen(name="norminvgauss")
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
This distribution is also known as the Fréchet distribution or the
type II extreme value distribution.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is:
.. math::
f(x, c) = c x^{-c-1} \\exp(-x^{-c})
for :math:`x > 0`, :math:`c > 0`.
`invweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _sf(self, x, c):
return -np.expm1(-x**-c)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _isf(self, p, c):
return (-np.log1p(-p))**(-1/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
def _fitstart(self, data, args=None):
# invweibull requires c > 1 for the first moment to exist, so use 2.0
args = (2.0,) if args is None else args
return super()._fitstart(data, args=args)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
r"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is:
.. math::
f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} )
where :math:`x`, :math:`a`, and :math:`b` are real scalars; :math:`b > 0`
and :math:`x \in [0,1]`. :math:`\phi` is the pdf of the normal
distribution.
`johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _shape_info(self):
ia = _ShapeInfo("a", False, (-np.inf, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ia, ib]
def _pdf(self, x, a, b):
# johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
trm = _norm_pdf(a + b*sc.logit(x))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*sc.logit(x))
def _ppf(self, q, a, b):
return sc.expit(1.0 / b * (_norm_ppf(q) - a))
def _sf(self, x, a, b):
return _norm_sf(a + b*sc.logit(x))
def _isf(self, q, a, b):
return sc.expit(1.0 / b * (_norm_isf(q) - a))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
r"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is:
.. math::
f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}}
\phi(a + b \log(x + \sqrt{x^2 + 1}))
where :math:`x`, :math:`a`, and :math:`b` are real scalars; :math:`b > 0`.
:math:`\phi` is the pdf of the normal distribution.
`johnsonsu` takes :math:`a` and :math:`b` as shape parameters.
The first four central moments are calculated according to the formulas
in [1]_.
%(after_notes)s
References
----------
.. [1] Taylor Enterprises. "Johnson Family of Distributions".
https://variation.com/wp-content/distribution_analyzer_help/hs126.htm
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _shape_info(self):
ia = _ShapeInfo("a", False, (-np.inf, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ia, ib]
def _pdf(self, x, a, b):
# johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
# phi(a + b * log(x + sqrt(x**2 + 1)))
x2 = x*x
trm = _norm_pdf(a + b * np.arcsinh(x))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.arcsinh(x))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
def _sf(self, x, a, b):
return _norm_sf(a + b * np.arcsinh(x))
def _isf(self, x, a, b):
return np.sinh((_norm_isf(x) - a) / b)
def _stats(self, a, b, moments='mv'):
# Naive implementation of first and second moment to address gh-18071.
# https://variation.com/wp-content/distribution_analyzer_help/hs126.htm
# Numerical improvements left to future enhancements.
mu, mu2, g1, g2 = None, None, None, None
bn2 = b**-2.
expbn2 = np.exp(bn2)
a_b = a / b
if 'm' in moments:
mu = -expbn2**0.5 * np.sinh(a_b)
if 'v' in moments:
mu2 = 0.5*sc.expm1(bn2)*(expbn2*np.cosh(2*a_b) + 1)
if 's' in moments:
t1 = expbn2**.5 * sc.expm1(bn2)**0.5
t2 = 3*np.sinh(a_b)
t3 = expbn2 * (expbn2 + 2) * np.sinh(3*a_b)
denom = np.sqrt(2) * (1 + expbn2 * np.cosh(2*a_b))**(3/2)
g1 = -t1 * (t2 + t3) / denom
if 'k' in moments:
t1 = 3 + 6*expbn2
t2 = 4*expbn2**2 * (expbn2 + 2) * np.cosh(2*a_b)
t3 = expbn2**2 * np.cosh(4*a_b)
t4 = -3 + 3*expbn2**2 + 2*expbn2**3 + expbn2**4
denom = 2*(1 + expbn2*np.cosh(2*a_b))**2
g2 = (t1 + t2 + t3*t4) / denom - 3
return mu, mu2, g1, g2
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
r"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is
.. math::
f(x) = \frac{1}{2} \exp(-|x|)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return random_state.laplace(0, 1, size=size)
def _pdf(self, x):
# laplace.pdf(x) = 1/2 * exp(-abs(x))
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
with np.errstate(over='ignore'):
return np.where(x > 0, 1.0 - 0.5*np.exp(-x), 0.5*np.exp(x))
def _sf(self, x):
# By symmetry...
return self._cdf(-x)
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _isf(self, q):
# By symmetry...
return -self._ppf(q)
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the Laplace distribution parameters, so the keyword
arguments `loc`, `scale`, and `optimizer` are ignored.\n\n""")
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 124
if floc is None:
floc = np.median(data)
if fscale is None:
fscale = (np.sum(np.abs(data - floc))) / len(data)
return floc, fscale
laplace = laplace_gen(name='laplace')
class laplace_asymmetric_gen(rv_continuous):
r"""An asymmetric Laplace continuous random variable.
%(before_notes)s
See Also
--------
laplace : Laplace distribution
Notes
-----
The probability density function for `laplace_asymmetric` is
.. math::
f(x, \kappa) &= \frac{1}{\kappa+\kappa^{-1}}\exp(-x\kappa),\quad x\ge0\\
&= \frac{1}{\kappa+\kappa^{-1}}\exp(x/\kappa),\quad x<0\\
for :math:`-\infty < x < \infty`, :math:`\kappa > 0`.
`laplace_asymmetric` takes ``kappa`` as a shape parameter for
:math:`\kappa`. For :math:`\kappa = 1`, it is identical to a
Laplace distribution.
%(after_notes)s
Note that the scale parameter of some references is the reciprocal of
SciPy's ``scale``. For example, :math:`\lambda = 1/2` in the
parameterization of [1]_ is equivalent to ``scale = 2`` with
`laplace_asymmetric`.
References
----------
.. [1] "Asymmetric Laplace distribution", Wikipedia
https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution
.. [2] Kozubowski TJ and Podgórski K. A Multivariate and
Asymmetric Generalization of Laplace Distribution,
Computational Statistics 15, 531--540 (2000).
:doi:`10.1007/PL00022717`
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("kappa", False, (0, np.inf), (False, False))]
def _pdf(self, x, kappa):
return np.exp(self._logpdf(x, kappa))
def _logpdf(self, x, kappa):
kapinv = 1/kappa
lPx = x * np.where(x >= 0, -kappa, kapinv)
lPx -= np.log(kappa+kapinv)
return lPx
def _cdf(self, x, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(x >= 0,
1 - np.exp(-x*kappa)*(kapinv/kappkapinv),
np.exp(x*kapinv)*(kappa/kappkapinv))
def _sf(self, x, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(x >= 0,
np.exp(-x*kappa)*(kapinv/kappkapinv),
1 - np.exp(x*kapinv)*(kappa/kappkapinv))
def _ppf(self, q, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(q >= kappa/kappkapinv,
-np.log((1 - q)*kappkapinv*kappa)*kapinv,
np.log(q*kappkapinv/kappa)*kappa)
def _isf(self, q, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(q <= kapinv/kappkapinv,
-np.log(q*kappkapinv*kappa)*kapinv,
np.log((1 - q)*kappkapinv/kappa)*kappa)
def _stats(self, kappa):
kapinv = 1/kappa
mn = kapinv - kappa
var = kapinv*kapinv + kappa*kappa
g1 = 2.0*(1-np.power(kappa, 6))/np.power(1+np.power(kappa, 4), 1.5)
g2 = 6.0*(1+np.power(kappa, 8))/np.power(1+np.power(kappa, 4), 2)
return mn, var, g1, g2
def _entropy(self, kappa):
return 1 + np.log(kappa+1/kappa)
laplace_asymmetric = laplace_asymmetric_gen(name='laplace_asymmetric')
def _check_fit_input_parameters(dist, data, args, kwds):
if not isinstance(data, CensoredData):
data = np.asarray(data)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
num_shapes = len(dist.shapes.split(",")) if dist.shapes else 0
fshape_keys = []
fshapes = []
# user has many options for fixing the shape, so here we standardize it
# into 'f' + the number of the shape.
# Adapted from `_reduce_func` in `_distn_infrastructure.py`:
if dist.shapes:
shapes = dist.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
fshape_keys.append(key)
fshapes.append(val)
if val is not None:
kwds[key] = val
# determine if there are any unknown arguments in kwds
known_keys = {'loc', 'scale', 'optimizer', 'method',
'floc', 'fscale', *fshape_keys}
unknown_keys = set(kwds).difference(known_keys)
if unknown_keys:
raise TypeError(f"Unknown keyword arguments: {unknown_keys}.")
if len(args) > num_shapes:
raise TypeError("Too many positional arguments.")
if None not in {floc, fscale, *fshapes}:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise RuntimeError("All parameters fixed. There is nothing to "
"optimize.")
uncensored = data._uncensor() if isinstance(data, CensoredData) else data
if not np.isfinite(uncensored).all():
raise ValueError("The data contains non-finite values.")
return (data, *fshapes, floc, fscale)
class levy_gen(rv_continuous):
r"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{1}{2x}\right)
for :math:`x > 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=1`.
%(after_notes)s
Examples
--------
>>> import numpy as np
>>> from scipy.stats import levy
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
>>> mean, var, skew, kurt = levy.stats(moments='mvsk')
Display the probability density function (``pdf``):
>>> # `levy` is very heavy-tailed.
>>> # To show a nice plot, let's cut off the upper 40 percent.
>>> a, b = levy.ppf(0), levy.ppf(0.6)
>>> x = np.linspace(a, b, 100)
>>> ax.plot(x, levy.pdf(x),
... 'r-', lw=5, alpha=0.6, label='levy pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = levy()
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = levy.ppf([0.001, 0.5, 0.999])
>>> np.allclose([0.001, 0.5, 0.999], levy.cdf(vals))
True
Generate random numbers:
>>> r = levy.rvs(size=1000)
And compare the histogram:
>>> # manual binning to ignore the tail
>>> bins = np.concatenate((np.linspace(a, b, 20), [np.max(r)]))
>>> ax.hist(r, bins=bins, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.set_xlim([x[0], x[-1]])
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return []
def _pdf(self, x):
# levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _sf(self, x):
return sc.erf(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = _norm_isf(q/2)
return 1.0 / (val * val)
def _isf(self, p):
return 1/(2*sc.erfinv(p)**2)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
r"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is:
.. math::
f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp{ \left(-\frac{1}{2|x|} \right)}
for :math:`x < 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=-1`.
%(after_notes)s
Examples
--------
>>> import numpy as np
>>> from scipy.stats import levy_l
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
>>> mean, var, skew, kurt = levy_l.stats(moments='mvsk')
Display the probability density function (``pdf``):
>>> # `levy_l` is very heavy-tailed.
>>> # To show a nice plot, let's cut off the lower 40 percent.
>>> a, b = levy_l.ppf(0.4), levy_l.ppf(1)
>>> x = np.linspace(a, b, 100)
>>> ax.plot(x, levy_l.pdf(x),
... 'r-', lw=5, alpha=0.6, label='levy_l pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = levy_l()
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = levy_l.ppf([0.001, 0.5, 0.999])
>>> np.allclose([0.001, 0.5, 0.999], levy_l.cdf(vals))
True
Generate random numbers:
>>> r = levy_l.rvs(size=1000)
And compare the histogram:
>>> # manual binning to ignore the tail
>>> bins = np.concatenate(([np.min(r)], np.linspace(a, b, 20)))
>>> ax.hist(r, bins=bins, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.set_xlim([x[0], x[-1]])
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return []
def _pdf(self, x):
# levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _sf(self, x):
ax = abs(x)
return 2 * _norm_sf(1 / np.sqrt(ax))
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _isf(self, p):
return -1/_norm_isf(p/2)**2
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class logistic_gen(rv_continuous):
r"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is:
.. math::
f(x) = \frac{\exp(-x)}
{(1+\exp(-x))^2}
`logistic` is a special case of `genlogistic` with ``c=1``.
Remark that the survival function (``logistic.sf``) is equal to the
Fermi-Dirac distribution describing fermionic statistics.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return random_state.logistic(size=size)
def _pdf(self, x):
# logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
y = -np.abs(x)
return y - 2. * sc.log1p(np.exp(y))
def _cdf(self, x):
return sc.expit(x)
def _logcdf(self, x):
return sc.log_expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _logsf(self, x):
return sc.log_expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# https://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
n = len(data)
# rv_continuous provided guesses
loc, scale = self._fitstart(data)
# these are trumped by user-provided guesses
loc, scale = kwds.get('loc', loc), kwds.get('scale', scale)
# the maximum likelihood estimators `a` and `b` of the location and
# scale parameters are roots of the two equations described in `func`.
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings, and
# Peacock (2000), Page 130
def dl_dloc(loc, scale=fscale):
c = (data - loc) / scale
return np.sum(sc.expit(c)) - n/2
def dl_dscale(scale, loc=floc):
c = (data - loc) / scale
return np.sum(c*np.tanh(c/2)) - n
def func(params):
loc, scale = params
return dl_dloc(loc, scale), dl_dscale(scale, loc)
if fscale is not None and floc is None:
res = optimize.root(dl_dloc, (loc,))
loc = res.x[0]
scale = fscale
elif floc is not None and fscale is None:
res = optimize.root(dl_dscale, (scale,))
scale = res.x[0]
loc = floc
else:
res = optimize.root(func, (loc, scale))
loc, scale = res.x
# Note: gh-18176 reported data for which the reported MLE had
# `scale < 0`. To fix the bug, we return abs(scale). This is OK because
# `dl_dscale` and `dl_dloc` are even and odd functions of `scale`,
# respectively, so if `-scale` is a solution, so is `scale`.
scale = abs(scale)
return ((loc, scale) if res.success
else super().fit(data, *args, **kwds))
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
r"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is:
.. math::
f(x, c) = \frac{\exp(c x - \exp(x))}
{\Gamma(c)}
for all :math:`x, c > 0`. Here, :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`loggamma` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _rvs(self, c, size=None, random_state=None):
# Use the property of the gamma distribution Gamma(c)
# Gamma(c) ~ Gamma(c + 1)*U**(1/c),
# where U is uniform on [0, 1]. (See, e.g.,
# G. Marsaglia and W.W. Tsang, "A simple method for generating gamma
# variables", https://doi.org/10.1145/358407.358414)
# So
# log(Gamma(c)) ~ log(Gamma(c + 1)) + log(U)/c
# Generating a sample with this formulation is a bit slower
# than the more obvious log(Gamma(c)), but it avoids loss
# of precision when c << 1.
return (np.log(random_state.gamma(c + 1, size=size))
+ np.log(random_state.uniform(size=size))/c)
def _pdf(self, x, c):
# loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _logpdf(self, x, c):
return c*x - np.exp(x) - sc.gammaln(c)
def _cdf(self, x, c):
# This function is gammainc(c, exp(x)), where gammainc(c, z) is
# the regularized incomplete gamma function.
# The first term in a series expansion of gamminc(c, z) is
# z**c/Gamma(c+1); see 6.5.29 of Abramowitz & Stegun (and refer
# back to 6.5.1, 6.5.2 and 6.5.4 for the relevant notation).
# This can also be found in the wikipedia article
# https://en.wikipedia.org/wiki/Incomplete_gamma_function.
# Here we use that formula when x is sufficiently negative that
# exp(x) will result in subnormal numbers and lose precision.
# We evaluate the log of the expression first to allow the possible
# cancellation of the terms in the division, and then exponentiate.
# That is,
# exp(x)**c/Gamma(c+1) = exp(log(exp(x)**c/Gamma(c+1)))
# = exp(c*x - gammaln(c+1))
return _lazywhere(x < _LOGXMIN, (x, c),
lambda x, c: np.exp(c*x - sc.gammaln(c+1)),
f2=lambda x, c: sc.gammainc(c, np.exp(x)))
def _ppf(self, q, c):
# The expression used when g < _XMIN inverts the one term expansion
# given in the comments of _cdf().
g = sc.gammaincinv(c, q)
return _lazywhere(g < _XMIN, (g, q, c),
lambda g, q, c: (np.log(q) + sc.gammaln(c+1))/c,
f2=lambda g, q, c: np.log(g))
def _sf(self, x, c):
# See the comments for _cdf() for how x < _LOGXMIN is handled.
return _lazywhere(x < _LOGXMIN, (x, c),
lambda x, c: -np.expm1(c*x - sc.gammaln(c+1)),
f2=lambda x, c: sc.gammaincc(c, np.exp(x)))
def _isf(self, q, c):
# The expression used when g < _XMIN inverts the complement of
# the one term expansion given in the comments of _cdf().
g = sc.gammainccinv(c, q)
return _lazywhere(g < _XMIN, (g, q, c),
lambda g, q, c: (np.log1p(-q) + sc.gammaln(c+1))/c,
f2=lambda g, q, c: np.log(g))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
def _entropy(self, c):
def regular(c):
h = sc.gammaln(c) - c * sc.digamma(c) + c
return h
def asymptotic(c):
# using asymptotic expansions for gammaln and psi (see gh-18093)
term = -0.5*np.log(c) + c**-1./6 - c**-3./90 + c**-5./210
h = norm._entropy() + term
return h
h = _lazywhere(c >= 45, (c, ), f=asymptotic, f2=regular)
return h
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
r"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is:
.. math::
f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\
\frac{c}{2} x^{-c-1} &\text{for } x \ge 1
\end{cases}
for :math:`c > 0`.
`loglaplace` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
# = c / 2 * x**(-c-1), for x >= 1
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _sf(self, x, c):
return np.where(x < 1, 1 - 0.5*x**c, 0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _isf(self, q, c):
return np.where(q > 0.5, (2.0*(1.0 - q))**(1.0/c), (2*q)**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
r"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is:
.. math::
f(x, s) = \frac{1}{s x \sqrt{2\pi}}
\exp\left(-\frac{\log^2(x)}{2s^2}\right)
for :math:`x > 0`, :math:`s > 0`.
`lognorm` takes ``s`` as a shape parameter for :math:`s`.
%(after_notes)s
Suppose a normally distributed random variable ``X`` has mean ``mu`` and
standard deviation ``sigma``. Then ``Y = exp(X)`` is lognormally
distributed with ``s = sigma`` and ``scale = exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return [_ShapeInfo("s", False, (0, np.inf), (False, False))]
def _rvs(self, s, size=None, random_state=None):
return np.exp(s * random_state.standard_normal(size))
def _pdf(self, x, s):
# lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _isf(self, q, s):
return np.exp(s * _norm_isf(q))
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt(p-1)*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
When `method='MLE'` and
the location parameter is fixed by using the `floc` argument,
this function uses explicit formulas for the maximum likelihood
estimation of the log-normal shape and scale parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.
If the location is free, a likelihood maximum is found by
setting its partial derivative wrt to location to 0, and
solving by substituting the analytical expressions of shape
and scale (or provided parameters).
See, e.g., equation 3.1 in
A. Clifford Cohen & Betty Jones Whitten (1980)
Estimation in the Three-Parameter Lognormal Distribution,
Journal of the American Statistical Association, 75:370, 399-404
https://doi.org/10.2307/2287466
\n\n""")
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
parameters = _check_fit_input_parameters(self, data, args, kwds)
data, fshape, floc, fscale = parameters
data_min = np.min(data)
def get_shape_scale(loc):
# Calculate maximum likelihood scale and shape with analytical
# formulas unless provided by the user
if fshape is None or fscale is None:
lndata = np.log(data - loc)
scale = fscale or np.exp(lndata.mean())
shape = fshape or np.sqrt(np.mean((lndata - np.log(scale))**2))
return shape, scale
def dL_dLoc(loc):
# Derivative of (positive) LL w.r.t. loc
shape, scale = get_shape_scale(loc)
shifted = data - loc
return np.sum((1 + np.log(shifted/scale)/shape**2)/shifted)
def ll(loc):
# (Positive) log-likelihood
shape, scale = get_shape_scale(loc)
return -self.nnlf((shape, loc, scale), data)
if floc is None:
# The location must be less than the minimum of the data.
# Back off a bit to avoid numerical issues.
spacing = np.spacing(data_min)
rbrack = data_min - spacing
# Find the right end of the bracket by successive doubling of the
# distance to data_min. We're interested in a maximum LL, so the
# slope dL_dLoc_rbrack should be negative at the right end.
# optimization for later: share shape, scale
dL_dLoc_rbrack = dL_dLoc(rbrack)
ll_rbrack = ll(rbrack)
delta = 2 * spacing # 2 * (data_min - rbrack)
while dL_dLoc_rbrack >= -1e-6:
rbrack = data_min - delta
dL_dLoc_rbrack = dL_dLoc(rbrack)
delta *= 2
if not np.isfinite(rbrack) or not np.isfinite(dL_dLoc_rbrack):
# If we never find a negative slope, either we missed it or the
# slope is always positive. It's usually the latter,
# which means
# loc = data_min - spacing
# But sometimes when shape and/or scale are fixed there are
# other issues, so be cautious.
return super().fit(data, *args, **kwds)
# Now find the left end of the bracket. Guess is `rbrack-1`
# unless that is too small of a difference to resolve. Double
# the size of the interval until the left end is found.
lbrack = np.minimum(np.nextafter(rbrack, -np.inf), rbrack-1)
dL_dLoc_lbrack = dL_dLoc(lbrack)
delta = 2 * (rbrack - lbrack)
while (np.isfinite(lbrack) and np.isfinite(dL_dLoc_lbrack)
and np.sign(dL_dLoc_lbrack) == np.sign(dL_dLoc_rbrack)):
lbrack = rbrack - delta
dL_dLoc_lbrack = dL_dLoc(lbrack)
delta *= 2
# I don't recall observing this, but just in case...
if not np.isfinite(lbrack) or not np.isfinite(dL_dLoc_lbrack):
return super().fit(data, *args, **kwds)
# If we have a valid bracket, find the root
res = root_scalar(dL_dLoc, bracket=(lbrack, rbrack))
if not res.converged:
return super().fit(data, *args, **kwds)
# If the slope was positive near the minimum of the data,
# the maximum LL could be there instead of at the root. Compare
# the LL of the two points to decide.
ll_root = ll(res.root)
loc = res.root if ll_root > ll_rbrack else data_min-spacing
else:
if floc >= data_min:
raise FitDataError("lognorm", lower=0., upper=np.inf)
loc = floc
shape, scale = get_shape_scale(loc)
if not (self._argcheck(shape) and scale > 0):
return super().fit(data, *args, **kwds)
return shape, loc, scale
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gibrat_gen(rv_continuous):
r"""A Gibrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gibrat` is:
.. math::
f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2)
`gibrat` is a special case of `lognorm` with ``s=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return np.exp(random_state.standard_normal(size))
def _pdf(self, x):
# gibrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _sf(self, x):
return _norm_sf(np.log(x))
def _isf(self, p):
return np.exp(_norm_isf(p))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt(p - 1) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gibrat = gibrat_gen(a=0.0, name='gibrat')
class maxwell_gen(rv_continuous):
r"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df=3``, ``loc=0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is:
.. math::
f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2)
for :math:`x >= 0`.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return chi.rvs(3.0, size=size, random_state=random_state)
def _pdf(self, x):
# maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
return _SQRT_2_OVER_PI*x*x*np.exp(-x*x/2.0)
def _logpdf(self, x):
# Allow x=0 without 'divide by zero' warnings
with np.errstate(divide='ignore'):
return _LOG_SQRT_2_OVER_PI + 2*np.log(x) - 0.5*x*x
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _sf(self, x):
return sc.gammaincc(1.5, x*x/2.0)
def _isf(self, q):
return np.sqrt(2*sc.gammainccinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
r"""A Mielke Beta-Kappa / Dagum continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is:
.. math::
f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}}
for :math:`x > 0` and :math:`k, s > 0`. The distribution is sometimes
called Dagum distribution ([2]_). It was already defined in [3]_, called
a Burr Type III distribution (`burr` with parameters ``c=s`` and
``d=k/s``).
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
References
----------
.. [1] Mielke, P.W., 1973 "Another Family of Distributions for Describing
and Analyzing Precipitation Data." J. Appl. Meteor., 12, 275-280
.. [2] Dagum, C., 1977 "A new model for personal income distribution."
Economie Appliquee, 33, 327-367.
.. [3] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
def _shape_info(self):
ik = _ShapeInfo("k", False, (0, np.inf), (False, False))
i_s = _ShapeInfo("s", False, (0, np.inf), (False, False))
return [ik, i_s]
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _logpdf(self, x, k, s):
# Allow x=0 without 'divide by zero' warnings.
with np.errstate(divide='ignore'):
return np.log(k) + np.log(x)*(k - 1) - np.log1p(x**s)*(1 + k/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
def _munp(self, n, k, s):
def nth_moment(n, k, s):
# n-th moment is defined for -k < n < s
return sc.gamma((k+n)/s)*sc.gamma(1-n/s)/sc.gamma(k/s)
return _lazywhere(n < s, (n, k, s), nth_moment, np.inf)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
r"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is:
.. math::
f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
if :math:`h` and :math:`k` are not equal to 0.
If :math:`h` or :math:`k` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes :math:`h` and :math:`k` as shape parameters.
The kappa4 distribution returns other distributions when certain
:math:`h` and :math:`k` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
https://digitalcommons.lsu.edu/gradschool_dissertations/3672
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
:doi:`10.4236/jwarp.2012.410101`
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
shape = np.broadcast_arrays(h, k)[0].shape
return np.full(shape, fill_value=True)
def _shape_info(self):
ih = _ShapeInfo("h", False, (-np.inf, np.inf), (False, False))
ik = _ShapeInfo("k", False, (-np.inf, np.inf), (False, False))
return [ih, ik]
def _get_support(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - np.float_power(h, -k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
_a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
_b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return _a, _b
def _pdf(self, x, h, k):
# kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
# (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _get_stats_info(self, h, k):
condlist = [
np.logical_and(h < 0, k >= 0),
k < 0,
]
def f0(h, k):
return (-1.0/h*k).astype(int)
def f1(h, k):
return (-1.0/k).astype(int)
return _lazyselect(condlist, [f0, f1], [h, k], default=5)
def _stats(self, h, k):
maxr = self._get_stats_info(h, k)
outputs = [None if np.any(r < maxr) else np.nan for r in range(1, 5)]
return outputs[:]
def _mom1_sc(self, m, *args):
maxr = self._get_stats_info(args[0], args[1])
if m >= maxr:
return np.nan
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
r"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa3` is:
.. math::
f(x, a) = a (a + x^a)^{-(a + 1)/a}
for :math:`x > 0` and :math:`a > 0`.
`kappa3` takes ``a`` as a shape parameter for :math:`a`.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
:doi:`10.1175/1520-0493(1973)101<0701:TKDMLE>2.3.CO;2`
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012), :doi:`10.4236/ojs.2012.24050`
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
def _pdf(self, x, a):
# kappa3.pdf(x, a) = a*(a + x**a)**(-(a + 1)/a), for x > 0
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _sf(self, x, a):
x, a = np.broadcast_arrays(x, a) # some code paths pass scalars
sf = super()._sf(x, a)
# When the SF is small, another formulation is typically more accurate.
# However, it blows up for large `a`, so use it only if it also returns
# a small value of the SF.
cutoff = 0.01
i = sf < cutoff
sf2 = -sc.expm1(sc.xlog1py(-1.0 / a[i], a[i] * x[i]**-a[i]))
i2 = sf2 > cutoff
sf2[i2] = sf[i][i2] # replace bad values with original values
sf[i] = sf2
return sf
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _isf(self, q, a):
lg = sc.xlog1py(-a, -q)
denom = sc.expm1(lg)
return (a / denom)**(1.0 / a)
def _stats(self, a):
outputs = [None if np.any(i < a) else np.nan for i in range(1, 5)]
return outputs[:]
def _mom1_sc(self, m, *args):
if np.any(m >= args[0]):
return np.nan
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class moyal_gen(rv_continuous):
r"""A Moyal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `moyal` is:
.. math::
f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi}
for a real number :math:`x`.
%(after_notes)s
This distribution has utility in high-energy physics and radiation
detection. It describes the energy loss of a charged relativistic
particle due to ionization of the medium [1]_. It also provides an
approximation for the Landau distribution. For an in depth description
see [2]_. For additional description, see [3]_.
References
----------
.. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations",
The London, Edinburgh, and Dublin Philosophical Magazine
and Journal of Science, vol 46, 263-280, (1955).
:doi:`10.1080/14786440308521076` (gated)
.. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution",
International Journal of Research and Reviews in Applied Sciences,
vol 10, 171-192, (2012).
http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf
.. [3] C. Walck, "Handbook on Statistical Distributions for
Experimentalists; International Report SUF-PFY/96-01", Chapter 26,
University of Stockholm: Stockholm, Sweden, (2007).
http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
.. versionadded:: 1.1.0
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
u1 = gamma.rvs(a=0.5, scale=2, size=size,
random_state=random_state)
return -np.log(u1)
def _pdf(self, x):
return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi)
def _cdf(self, x):
return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2))
def _sf(self, x):
return sc.erf(np.exp(-0.5 * x) / np.sqrt(2))
def _ppf(self, x):
return -np.log(2 * sc.erfcinv(x)**2)
def _stats(self):
mu = np.log(2) + np.euler_gamma
mu2 = np.pi**2 / 2
g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3
g2 = 4.
return mu, mu2, g1, g2
def _munp(self, n):
if n == 1.0:
return np.log(2) + np.euler_gamma
elif n == 2.0:
return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2
elif n == 3.0:
tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma)
tmp2 = (np.log(2)+np.euler_gamma)**3
tmp3 = 14 * sc.zeta(3)
return tmp1 + tmp2 + tmp3
elif n == 4.0:
tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma)
tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2
tmp3 = (np.log(2) + np.euler_gamma)**4
tmp4 = 7 * np.pi**4 / 4
return tmp1 + tmp2 + tmp3 + tmp4
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n)
moyal = moyal_gen(name="moyal")
class nakagami_gen(rv_continuous):
r"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is:
.. math::
f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
for :math:`x >= 0`, :math:`\nu > 0`. The distribution was introduced in
[2]_, see also [1]_ for further information.
`nakagami` takes ``nu`` as a shape parameter for :math:`\nu`.
%(after_notes)s
References
----------
.. [1] "Nakagami distribution", Wikipedia
https://en.wikipedia.org/wiki/Nakagami_distribution
.. [2] M. Nakagami, "The m-distribution - A general formula of intensity
distribution of rapid fading", Statistical methods in radio wave
propagation, Pergamon Press, 1960, 3-36.
:doi:`10.1016/B978-0-08-009306-2.50005-4`
%(example)s
"""
def _argcheck(self, nu):
return nu > 0
def _shape_info(self):
return [_ShapeInfo("nu", False, (0, np.inf), (False, False))]
def _pdf(self, x, nu):
return np.exp(self._logpdf(x, nu))
def _logpdf(self, x, nu):
# nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
# x**(2*nu-1) * exp(-nu*x**2)
return (np.log(2) + sc.xlogy(nu, nu) - sc.gammaln(nu) +
sc.xlogy(2*nu - 1, x) - nu*x**2)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _sf(self, x, nu):
return sc.gammaincc(nu, nu*x*x)
def _isf(self, p, nu):
return np.sqrt(1/nu * sc.gammainccinv(nu, p))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
def _entropy(self, nu):
shape = np.shape(nu)
# because somehow this isn't taken care of by the infrastructure...
nu = np.atleast_1d(nu)
A = sc.gammaln(nu)
B = nu - (nu - 0.5) * sc.digamma(nu)
C = -0.5 * np.log(nu) - np.log(2)
h = A + B + C
# This is the asymptotic sum of A and B (see gh-17868)
norm_entropy = stats.norm._entropy()
# Above, this is lost to rounding error for large nu, so use the
# asymptotic sum when the approximation becomes accurate
i = nu > 5e4 # roundoff error ~ approximation error
# -1 / (12 * nu) is the O(1/nu) term; see gh-17929
h[i] = C[i] + norm_entropy - 1/(12*nu[i])
return h.reshape(shape)[()]
def _rvs(self, nu, size=None, random_state=None):
# this relationship can be found in [1] or by a direct calculation
return np.sqrt(random_state.standard_gamma(nu, size=size) / nu)
def _fitstart(self, data, args=None):
if isinstance(data, CensoredData):
data = data._uncensor()
if args is None:
args = (1.0,) * self.numargs
# Analytical justified estimates
# see: https://docs.scipy.org/doc/scipy/reference/tutorial/stats/continuous_nakagami.html
loc = np.min(data)
scale = np.sqrt(np.sum((data - loc)**2) / len(data))
return args + (loc, scale)
nakagami = nakagami_gen(a=0.0, name="nakagami")
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = sc.xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = sc.ive(df2, xs*ns) / 2.0
# Return res + np.log(corr) avoiding np.log(0)
return _lazywhere(
corr > 0,
(res, corr),
f=lambda r, c: r + np.log(c),
fillvalue=-np.inf)
class ncx2_gen(rv_continuous):
r"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is:
.. math::
f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2)
(x/\lambda)^{(k-2)/4} I_{(k-2)/2}(\sqrt{\lambda x})
for :math:`x >= 0`, :math:`k > 0` and :math:`\lambda \ge 0`.
:math:`k` specifies the degrees of freedom (denoted ``df`` in the
implementation) and :math:`\lambda` is the non-centrality parameter
(denoted ``nc`` in the implementation). :math:`I_\nu` denotes the
modified Bessel function of first order of degree :math:`\nu`
(`scipy.special.iv`).
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & np.isfinite(df) & (nc >= 0)
def _shape_info(self):
idf = _ShapeInfo("df", False, (0, np.inf), (False, False))
inc = _ShapeInfo("nc", False, (0, np.inf), (True, False))
return [idf, inc]
def _rvs(self, df, nc, size=None, random_state=None):
return random_state.noncentral_chisquare(df, nc, size)
def _logpdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_log_pdf,
f2=lambda x, df, _: chi2._logpdf(x, df))
def _pdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
with np.errstate(over='ignore'): # see gh-17432
return _lazywhere(cond, (x, df, nc), f=_boost._ncx2_pdf,
f2=lambda x, df, _: chi2._pdf(x, df))
def _cdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
with np.errstate(over='ignore'): # see gh-17432
return _lazywhere(cond, (x, df, nc), f=_boost._ncx2_cdf,
f2=lambda x, df, _: chi2._cdf(x, df))
def _ppf(self, q, df, nc):
cond = np.ones_like(q, dtype=bool) & (nc != 0)
with np.errstate(over='ignore'): # see gh-17432
return _lazywhere(cond, (q, df, nc), f=_boost._ncx2_ppf,
f2=lambda x, df, _: chi2._ppf(x, df))
def _sf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
with np.errstate(over='ignore'): # see gh-17432
return _lazywhere(cond, (x, df, nc), f=_boost._ncx2_sf,
f2=lambda x, df, _: chi2._sf(x, df))
def _isf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
with np.errstate(over='ignore'): # see gh-17432
return _lazywhere(cond, (x, df, nc), f=_boost._ncx2_isf,
f2=lambda x, df, _: chi2._isf(x, df))
def _stats(self, df, nc):
return (
_boost._ncx2_mean(df, nc),
_boost._ncx2_variance(df, nc),
_boost._ncx2_skewness(df, nc),
_boost._ncx2_kurtosis_excess(df, nc),
)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
r"""A non-central F distribution continuous random variable.
%(before_notes)s
See Also
--------
scipy.stats.f : Fisher distribution
Notes
-----
The probability density function for `ncf` is:
.. math::
f(x, n_1, n_2, \lambda) =
\exp\left(\frac{\lambda}{2} +
\lambda n_1 \frac{x}{2(n_1 x + n_2)}
\right)
n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\
(n_2 + n_1 x)^{-(n_1 + n_2)/2}
\gamma(n_1/2) \gamma(1 + n_2/2) \\
\frac{L^{\frac{n_1}{2}-1}_{n_2/2}
\left(-\lambda n_1 \frac{x}{2(n_1 x + n_2)}\right)}
{B(n_1/2, n_2/2)
\gamma\left(\frac{n_1 + n_2}{2}\right)}
for :math:`n_1, n_2 > 0`, :math:`\lambda \ge 0`. Here :math:`n_1` is the
degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in
the denominator, :math:`\lambda` the non-centrality parameter,
:math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a
generalized Laguerre polynomial and :math:`B` is the beta function.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters. If ``nc=0``,
the distribution becomes equivalent to the Fisher distribution.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df1, df2, nc):
return (df1 > 0) & (df2 > 0) & (nc >= 0)
def _shape_info(self):
idf1 = _ShapeInfo("df1", False, (0, np.inf), (False, False))
idf2 = _ShapeInfo("df2", False, (0, np.inf), (False, False))
inc = _ShapeInfo("nc", False, (0, np.inf), (True, False))
return [idf1, idf2, inc]
def _rvs(self, dfn, dfd, nc, size=None, random_state=None):
return random_state.noncentral_f(dfn, dfd, nc, size)
def _pdf(self, x, dfn, dfd, nc):
# ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
# df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
# (df2+df1*x)**(-(df1+df2)/2) *
# gamma(df1/2)*gamma(1+df2/2) *
# L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
# (B(v1/2, v2/2) * gamma((v1+v2)/2))
return _boost._ncf_pdf(x, dfn, dfd, nc)
def _cdf(self, x, dfn, dfd, nc):
return _boost._ncf_cdf(x, dfn, dfd, nc)
def _ppf(self, q, dfn, dfd, nc):
with np.errstate(over='ignore'): # see gh-17432
return _boost._ncf_ppf(q, dfn, dfd, nc)
def _sf(self, x, dfn, dfd, nc):
return _boost._ncf_sf(x, dfn, dfd, nc)
def _isf(self, x, dfn, dfd, nc):
with np.errstate(over='ignore'): # see gh-17432
return _boost._ncf_isf(x, dfn, dfd, nc)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc, moments='mv'):
mu = _boost._ncf_mean(dfn, dfd, nc)
mu2 = _boost._ncf_variance(dfn, dfd, nc)
g1 = _boost._ncf_skewness(dfn, dfd, nc) if 's' in moments else None
g2 = _boost._ncf_kurtosis_excess(
dfn, dfd, nc) if 'k' in moments else None
return mu, mu2, g1, g2
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
r"""A Student's t continuous random variable.
For the noncentral t distribution, see `nct`.
%(before_notes)s
See Also
--------
nct
Notes
-----
The probability density function for `t` is:
.. math::
f(x, \nu) = \frac{\Gamma((\nu+1)/2)}
{\sqrt{\pi \nu} \Gamma(\nu/2)}
(1+x^2/\nu)^{-(\nu+1)/2}
where :math:`x` is a real number and the degrees of freedom parameter
:math:`\nu` (denoted ``df`` in the implementation) satisfies
:math:`\nu > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("df", False, (0, np.inf), (False, False))]
def _rvs(self, df, size=None, random_state=None):
return random_state.standard_t(df, size=size)
def _pdf(self, x, df):
return _lazywhere(
df == np.inf, (x, df),
f=lambda x, df: norm._pdf(x),
f2=lambda x, df: (
np.exp(self._logpdf(x, df))
)
)
def _logpdf(self, x, df):
def regular_formula(x, df):
return (sc.gammaln((df + 1)/2) - sc.gammaln(df/2)
- (0.5 * np.log(df*np.pi))
- (df + 1)/2*np.log1p(x * x/df))
def asymptotic_formula(x, df):
return (- 0.5 * (1 + np.log(2 * np.pi)) + df/2 * np.log1p(1/df)
+ 1/6 * (df + 1)**-1. - 1/45*(df + 1)**-3.
- 1/6 * df**-1. + 1/45*df**-3.
- (df + 1)/2 * np.log1p(x*x/df))
def norm_logpdf(x, df):
return norm._logpdf(x)
return _lazyselect(
((df == np.inf),
(df >= 200) & np.isfinite(df),
(df < 200)),
(norm_logpdf,
asymptotic_formula,
regular_formula),
(x, df, )
)
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
# infinite df -> normal distribution (0.0, 1.0, 0.0, 0.0)
infinite_df = np.isposinf(df)
mu = np.where(df > 1, 0.0, np.inf)
condlist = ((df > 1) & (df <= 2),
(df > 2) & np.isfinite(df),
infinite_df)
choicelist = (lambda df: np.broadcast_to(np.inf, df.shape),
lambda df: df / (df-2.0),
lambda df: np.broadcast_to(1, df.shape))
mu2 = _lazyselect(condlist, choicelist, (df,), np.nan)
g1 = np.where(df > 3, 0.0, np.nan)
condlist = ((df > 2) & (df <= 4),
(df > 4) & np.isfinite(df),
infinite_df)
choicelist = (lambda df: np.broadcast_to(np.inf, df.shape),
lambda df: 6.0 / (df-4.0),
lambda df: np.broadcast_to(0, df.shape))
g2 = _lazyselect(condlist, choicelist, (df,), np.nan)
return mu, mu2, g1, g2
def _entropy(self, df):
if df == np.inf:
return norm._entropy()
def regular(df):
half = df/2
half1 = (df + 1)/2
return (half1*(sc.digamma(half1) - sc.digamma(half))
+ np.log(np.sqrt(df)*sc.beta(half, 0.5)))
def asymptotic(df):
# Formula from Wolfram Alpha:
# "asymptotic expansion (d+1)/2 * (digamma((d+1)/2) - digamma(d/2))
# + log(sqrt(d) * beta(d/2, 1/2))"
h = (norm._entropy() + 1/df + (df**-2.)/4 - (df**-3.)/6
- (df**-4.)/8 + 3/10*(df**-5.) + (df**-6.)/4)
return h
h = _lazywhere(df >= 100, (df, ), f=asymptotic, f2=regular)
return h
t = t_gen(name='t')
class nct_gen(rv_continuous):
r"""A non-central Student's t continuous random variable.
%(before_notes)s
Notes
-----
If :math:`Y` is a standard normal random variable and :math:`V` is
an independent chi-square random variable (`chi2`) with :math:`k` degrees
of freedom, then
.. math::
X = \frac{Y + c}{\sqrt{V/k}}
has a non-central Student's t distribution on the real line.
The degrees of freedom parameter :math:`k` (denoted ``df`` in the
implementation) satisfies :math:`k > 0` and the noncentrality parameter
:math:`c` (denoted ``nc`` in the implementation) is a real number.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _shape_info(self):
idf = _ShapeInfo("df", False, (0, np.inf), (False, False))
inc = _ShapeInfo("nc", False, (-np.inf, np.inf), (False, False))
return [idf, inc]
def _rvs(self, df, nc, size=None, random_state=None):
n = norm.rvs(loc=nc, size=size, random_state=random_state)
c2 = chi2.rvs(df, size=size, random_state=random_state)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
# Boost version has accuracy issues in left tail; see gh-16591
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = (n/2.*np.log(n) + sc.gammaln(n+1)
- (n*np.log(2) + nc*nc/2 + (n/2)*np.log(fac1)
+ sc.gammaln(n/2)))
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = (np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
/ np.asarray(fac1*sc.gamma((n+1)/2)))
trm2 = (sc.hyp1f1((n+1)/2, 0.5, valF)
/ np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1)))
Px *= trm1+trm2
return np.clip(Px, 0, None)
def _cdf(self, x, df, nc):
with np.errstate(over='ignore'): # see gh-17432
return np.clip(_boost._nct_cdf(x, df, nc), 0, 1)
def _ppf(self, q, df, nc):
with np.errstate(over='ignore'): # see gh-17432
return _boost._nct_ppf(q, df, nc)
def _sf(self, x, df, nc):
with np.errstate(over='ignore'): # see gh-17432
return np.clip(_boost._nct_sf(x, df, nc), 0, 1)
def _isf(self, x, df, nc):
with np.errstate(over='ignore'): # see gh-17432
return _boost._nct_isf(x, df, nc)
def _stats(self, df, nc, moments='mv'):
mu = _boost._nct_mean(df, nc)
mu2 = _boost._nct_variance(df, nc)
g1 = _boost._nct_skewness(df, nc) if 's' in moments else None
g2 = _boost._nct_kurtosis_excess(df, nc) if 'k' in moments else None
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
r"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is:
.. math::
f(x, b) = \frac{b}{x^{b+1}}
for :math:`x \ge 1`, :math:`b > 0`.
`pareto` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("b", False, (0, np.inf), (False, False))]
def _pdf(self, x, b):
# pareto.pdf(x, b) = b / x**(b+1)
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _sf(self, x, b):
return x**(-b)
def _isf(self, q, b):
return np.power(q, -1.0 / b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = np.full(np.shape(b), fill_value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = np.full(np.shape(b), fill_value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = np.full(np.shape(b), fill_value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = np.full(np.shape(b), fill_value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
parameters = _check_fit_input_parameters(self, data, args, kwds)
data, fshape, floc, fscale = parameters
# ensure that any fixed parameters don't violate constraints of the
# distribution before continuing.
if floc is not None and np.min(data) - floc < (fscale or 0):
raise FitDataError("pareto", lower=1, upper=np.inf)
ndata = data.shape[0]
def get_shape(scale, location):
# The first-order necessary condition on `shape` can be solved in
# closed form
return ndata / np.sum(np.log((data - location) / scale))
if floc is fscale is None:
# The support of the distribution is `(x - loc)/scale > 0`.
# The method of Lagrange multipliers turns this constraint
# into an equation that can be solved numerically.
# See gh-12545 for details.
def dL_dScale(shape, scale):
# The partial derivative of the log-likelihood function w.r.t.
# the scale.
return ndata * shape / scale
def dL_dLocation(shape, location):
# The partial derivative of the log-likelihood function w.r.t.
# the location.
return (shape + 1) * np.sum(1 / (data - location))
def fun_to_solve(scale):
# optimize the scale by setting the partial derivatives
# w.r.t. to location and scale equal and solving.
location = np.min(data) - scale
shape = fshape or get_shape(scale, location)
return dL_dLocation(shape, location) - dL_dScale(shape, scale)
def interval_contains_root(lbrack, rbrack):
# return true if the signs disagree.
return (np.sign(fun_to_solve(lbrack)) !=
np.sign(fun_to_solve(rbrack)))
# set brackets for `root_scalar` to use when optimizing over the
# scale such that a root is likely between them. Use user supplied
# guess or default 1.
brack_start = float(kwds.get('scale', 1))
lbrack, rbrack = brack_start / 2, brack_start * 2
# if a root is not between the brackets, iteratively expand them
# until they include a sign change, checking after each bracket is
# modified.
while (not interval_contains_root(lbrack, rbrack)
and (lbrack > 0 or rbrack < np.inf)):
lbrack /= 2
rbrack *= 2
res = root_scalar(fun_to_solve, bracket=[lbrack, rbrack])
if res.converged:
scale = res.root
loc = np.min(data) - scale
shape = fshape or get_shape(scale, loc)
# The Pareto distribution requires that its parameters satisfy
# the condition `fscale + floc <= min(data)`. However, to
# avoid numerical issues, we require that `fscale + floc`
# is strictly less than `min(data)`. If this condition
# is not satisfied, reduce the scale with `np.nextafter` to
# ensure that data does not fall outside of the support.
if not (scale + loc) < np.min(data):
scale = np.min(data) - loc
scale = np.nextafter(scale, 0)
return shape, loc, scale
else:
return super().fit(data, **kwds)
elif floc is None:
loc = np.min(data) - fscale
else:
loc = floc
# Source: Evans, Hastings, and Peacock (2000), Statistical
# Distributions, 3rd. Ed., John Wiley and Sons. Page 149.
scale = fscale or np.min(data) - loc
shape = fshape or get_shape(scale, loc)
return shape, loc, scale
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
r"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lomax` is:
.. math::
f(x, c) = \frac{c}{(1+x)^{c+1}}
for :math:`x \ge 0`, :math:`c > 0`.
`lomax` takes ``c`` as a shape parameter for :math:`c`.
`lomax` is a special case of `pareto` with ``loc=-1.0``.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# lomax.pdf(x, c) = c / (1+x)**(c+1)
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _isf(self, q, c):
return q**(-1.0 / c) - 1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
r"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is:
.. math::
f(x, \kappa) = \frac{|\beta|}{\Gamma(\alpha)}
(\beta (x - \zeta))^{\alpha - 1}
\exp(-\beta (x - \zeta))
where:
.. math::
\beta = \frac{2}{\kappa}
\alpha = \beta^2 = \frac{4}{\kappa^2}
\zeta = -\frac{\alpha}{\beta} = -\beta
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
Pass the skew :math:`\kappa` into `pearson3` as the shape parameter
``skew``.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays(1.0, x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.isfinite(skew)
def _shape_info(self):
return [_ShapeInfo("skew", False, (-np.inf, np.inf), (False, False))]
def _stats(self, skew):
m = 0.0
v = 1.0
s = skew
k = 1.5*skew**2
return m, v, s, k
def _pdf(self, x, skew):
# pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
# (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
# use logpdf instead of _logpdf to fix issue mentioned in gh-12640
# (_logpdf does not return correct result for alpha = 1)
ans[invmask] = np.log(abs(beta)) + gamma.logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
skew = np.broadcast_to(skew, invmask.shape)
invmask1a = np.logical_and(invmask, skew > 0)
invmask1b = skew[invmask] > 0
# use cdf instead of _cdf to fix issue mentioned in gh-12640
# (_cdf produces NaNs for inputs outside support)
ans[invmask1a] = gamma.cdf(transx[invmask1b], alpha[invmask1b])
# The gamma._cdf approach wasn't working with negative skew.
# Note that multiplying the skew by -1 reflects about x=0.
# So instead of evaluating the CDF with negative skew at x,
# evaluate the SF with positive skew at -x.
invmask2a = np.logical_and(invmask, skew < 0)
invmask2b = skew[invmask] < 0
# gamma._sf produces NaNs when transx < 0, so use gamma.sf
ans[invmask2a] = gamma.sf(transx[invmask2b], alpha[invmask2b])
return ans
def _sf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_sf(x[mask])
skew = np.broadcast_to(skew, invmask.shape)
invmask1a = np.logical_and(invmask, skew > 0)
invmask1b = skew[invmask] > 0
ans[invmask1a] = gamma.sf(transx[invmask1b], alpha[invmask1b])
invmask2a = np.logical_and(invmask, skew < 0)
invmask2b = skew[invmask] < 0
ans[invmask2a] = gamma.cdf(transx[invmask2b], alpha[invmask2b])
return ans
def _rvs(self, skew, size=None, random_state=None):
skew = np.broadcast_to(skew, size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = random_state.standard_normal(nsmall)
ans[invmask] = random_state.standard_gamma(alpha, nbig)/beta + zeta
if size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
q = q[invmask]
q[beta < 0] = 1 - q[beta < 0] # for negative skew; see gh-17050
ans[invmask] = sc.gammaincinv(alpha, q)/beta + zeta
return ans
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Note that method of moments (`method='MM'`) is not
available for this distribution.\n\n""")
def fit(self, data, *args, **kwds):
if kwds.get("method", None) == 'MM':
raise NotImplementedError("Fit `method='MM'` is not available for "
"the Pearson3 distribution. Please try "
"the default `method='MLE'`.")
else:
return super(type(self), self).fit(data, *args, **kwds)
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
r"""A power-function continuous random variable.
%(before_notes)s
See Also
--------
pareto
Notes
-----
The probability density function for `powerlaw` is:
.. math::
f(x, a) = a x^{a-1}
for :math:`0 \le x \le 1`, :math:`a > 0`.
`powerlaw` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
For example, the support of `powerlaw` can be adjusted from the default
interval ``[0, 1]`` to the interval ``[c, c+d]`` by setting ``loc=c`` and
``scale=d``. For a power-law distribution with infinite support, see
`pareto`.
`powerlaw` is a special case of `beta` with ``b=1``.
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
def _pdf(self, x, a):
# powerlaw.pdf(x, a) = a * x**(a-1)
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _sf(self, p, a):
return -sc.powm1(p, a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
def _support_mask(self, x, a):
return (super()._support_mask(x, a)
& ((x != 0) | (a >= 1)))
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Notes specifically for ``powerlaw.fit``: If the location is a free
parameter and the value returned for the shape parameter is less than
one, the true maximum likelihood approaches infinity. This causes
numerical difficulties, and the resulting estimates are approximate.
\n\n""")
def fit(self, data, *args, **kwds):
# Summary of the strategy:
#
# 1) If the scale and location are fixed, return the shape according
# to a formula.
#
# 2) If the scale is fixed, there are two possibilities for the other
# parameters - one corresponding with shape less than one, and
# another with shape greater than one. Calculate both, and return
# whichever has the better log-likelihood.
#
# At this point, the scale is known to be free.
#
# 3) If the location is fixed, return the scale and shape according to
# formulas (or, if the shape is fixed, the fixed shape).
#
# At this point, the location and scale are both free. There are
# separate equations depending on whether the shape is less than one or
# greater than one.
#
# 4a) If the shape is less than one, there are formulas for shape,
# location, and scale.
# 4b) If the shape is greater than one, there are formulas for shape
# and scale, but there is a condition for location to be solved
# numerically.
#
# If the shape is fixed and less than one, we use 4a.
# If the shape is fixed and greater than one, we use 4b.
# If the shape is also free, we calculate fits using both 4a and 4b
# and choose the one that results a better log-likelihood.
#
# In many cases, the use of `np.nextafter` is used to avoid numerical
# issues.
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
if len(np.unique(data)) == 1:
return super().fit(data, *args, **kwds)
data, fshape, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
penalized_nllf_args = [data, (self._fitstart(data),)]
penalized_nllf = self._reduce_func(penalized_nllf_args, {})[1]
# ensure that any fixed parameters don't violate constraints of the
# distribution before continuing. The support of the distribution
# is `0 < (x - loc)/scale < 1`.
if floc is not None:
if not data.min() > floc:
raise FitDataError('powerlaw', 0, 1)
if fscale is not None and not data.max() <= floc + fscale:
raise FitDataError('powerlaw', 0, 1)
if fscale is not None:
if fscale <= 0:
raise ValueError("Negative or zero `fscale` is outside the "
"range allowed by the distribution.")
if fscale <= np.ptp(data):
msg = "`fscale` must be greater than the range of data."
raise ValueError(msg)
def get_shape(data, loc, scale):
# The first-order necessary condition on `shape` can be solved in
# closed form. It can be used no matter the assumption of the
# value of the shape.
N = len(data)
return - N / (np.sum(np.log(data - loc)) - N*np.log(scale))
def get_scale(data, loc):
# analytical solution for `scale` based on the location.
# It can be used no matter the assumption of the value of the
# shape.
return data.max() - loc
# 1) The location and scale are both fixed. Analytically determine the
# shape.
if fscale is not None and floc is not None:
return get_shape(data, floc, fscale), floc, fscale
# 2) The scale is fixed. There are two possibilities for the other
# parameters. Choose the option with better log-likelihood.
if fscale is not None:
# using `data.min()` as the optimal location
loc_lt1 = np.nextafter(data.min(), -np.inf)
shape_lt1 = fshape or get_shape(data, loc_lt1, fscale)
ll_lt1 = penalized_nllf((shape_lt1, loc_lt1, fscale), data)
# using `data.max() - scale` as the optimal location
loc_gt1 = np.nextafter(data.max() - fscale, np.inf)
shape_gt1 = fshape or get_shape(data, loc_gt1, fscale)
ll_gt1 = penalized_nllf((shape_gt1, loc_gt1, fscale), data)
if ll_lt1 < ll_gt1:
return shape_lt1, loc_lt1, fscale
else:
return shape_gt1, loc_gt1, fscale
# 3) The location is fixed. Return the analytical scale and the
# analytical (or fixed) shape.
if floc is not None:
scale = get_scale(data, floc)
shape = fshape or get_shape(data, floc, scale)
return shape, floc, scale
# 4) Location and scale are both free
# 4a) Use formulas that assume `shape <= 1`.
def fit_loc_scale_w_shape_lt_1():
loc = np.nextafter(data.min(), -np.inf)
if np.abs(loc) < np.finfo(loc.dtype).tiny:
loc = np.sign(loc) * np.finfo(loc.dtype).tiny
scale = np.nextafter(get_scale(data, loc), np.inf)
shape = fshape or get_shape(data, loc, scale)
return shape, loc, scale
# 4b) Fit under the assumption that `shape > 1`. The support
# of the distribution is `(x - loc)/scale <= 1`. The method of Lagrange
# multipliers turns this constraint into the condition that
# dL_dScale - dL_dLocation must be zero, which is solved numerically.
# (Alternatively, substitute the constraint into the objective
# function before deriving the likelihood equation for location.)
def dL_dScale(data, shape, scale):
# The partial derivative of the log-likelihood function w.r.t.
# the scale.
return -data.shape[0] * shape / scale
def dL_dLocation(data, shape, loc):
# The partial derivative of the log-likelihood function w.r.t.
# the location.
return (shape - 1) * np.sum(1 / (loc - data)) # -1/(data-loc)
def dL_dLocation_star(loc):
# The derivative of the log-likelihood function w.r.t.
# the location, given optimal shape and scale
scale = np.nextafter(get_scale(data, loc), -np.inf)
shape = fshape or get_shape(data, loc, scale)
return dL_dLocation(data, shape, loc)
def fun_to_solve(loc):
# optimize the location by setting the partial derivatives
# w.r.t. to location and scale equal and solving.
scale = np.nextafter(get_scale(data, loc), -np.inf)
shape = fshape or get_shape(data, loc, scale)
return (dL_dScale(data, shape, scale)
- dL_dLocation(data, shape, loc))
def fit_loc_scale_w_shape_gt_1():
# set brackets for `root_scalar` to use when optimizing over the
# location such that a root is likely between them.
rbrack = np.nextafter(data.min(), -np.inf)
# if the sign of `dL_dLocation_star` is positive at rbrack,
# we're not going to find the root we're looking for
delta = (data.min() - rbrack)
while dL_dLocation_star(rbrack) > 0:
rbrack = data.min() - delta
delta *= 2
def interval_contains_root(lbrack, rbrack):
# Check if the interval (lbrack, rbrack) contains the root.
return (np.sign(fun_to_solve(lbrack))
!= np.sign(fun_to_solve(rbrack)))
lbrack = rbrack - 1
# if the sign doesn't change between the brackets, move the left
# bracket until it does. (The right bracket remains fixed at the
# maximum permissible value.)
i = 1.0
while (not interval_contains_root(lbrack, rbrack)
and lbrack != -np.inf):
lbrack = (data.min() - i)
i *= 2
root = optimize.root_scalar(fun_to_solve, bracket=(lbrack, rbrack))
loc = np.nextafter(root.root, -np.inf)
scale = np.nextafter(get_scale(data, loc), np.inf)
shape = fshape or get_shape(data, loc, scale)
return shape, loc, scale
# Shape is fixed - choose 4a or 4b accordingly.
if fshape is not None and fshape <= 1:
return fit_loc_scale_w_shape_lt_1()
elif fshape is not None and fshape > 1:
return fit_loc_scale_w_shape_gt_1()
# Shape is free
fit_shape_lt1 = fit_loc_scale_w_shape_lt_1()
ll_lt1 = self.nnlf(fit_shape_lt1, data)
fit_shape_gt1 = fit_loc_scale_w_shape_gt_1()
ll_gt1 = self.nnlf(fit_shape_gt1, data)
if ll_lt1 <= ll_gt1 and fit_shape_lt1[0] <= 1:
return fit_shape_lt1
elif ll_lt1 > ll_gt1 and fit_shape_gt1[0] > 1:
return fit_shape_gt1
else:
return super().fit(data, *args, **kwds)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
r"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is:
.. math::
f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
(\Phi(-\log(x)/s))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`s, c > 0`.
`powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
i_s = _ShapeInfo("s", False, (0, np.inf), (False, False))
return [ic, i_s]
def _pdf(self, x, c, s):
return np.exp(self._logpdf(x, c, s))
def _logpdf(self, x, c, s):
return (np.log(c) - np.log(x) - np.log(s) +
_norm_logpdf(np.log(x) / s) +
_norm_logcdf(-np.log(x) / s) * (c - 1.))
def _cdf(self, x, c, s):
return -sc.expm1(self._logsf(x, c, s))
def _ppf(self, q, c, s):
return self._isf(1 - q, c, s)
def _sf(self, x, c, s):
return np.exp(self._logsf(x, c, s))
def _logsf(self, x, c, s):
return _norm_logcdf(-np.log(x) / s) * c
def _isf(self, q, c, s):
return np.exp(-_norm_ppf(q**(1/c)) * s)
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
r"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is:
.. math::
f(x, c) = c \phi(x) (\Phi(-x))^{c-1}
where :math:`\phi` is the normal pdf, :math:`\Phi` is the normal cdf,
:math:`x` is any real, and :math:`c > 0` [1]_.
`powernorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
.. [1] NIST Engineering Statistics Handbook, Section 1.3.6.6.13,
https://www.itl.nist.gov/div898/handbook//eda/section3/eda366d.htm
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return -sc.expm1(self._logsf(x, c))
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
def _sf(self, x, c):
return np.exp(self._logsf(x, c))
def _logsf(self, x, c):
return c * _norm_logcdf(-x)
def _isf(self, q, c):
return -_norm_ppf(np.exp(np.log(q) / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
r"""An R-distributed (symmetric beta) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is:
.. math::
f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)}
for :math:`-1 \le x \le 1`, :math:`c > 0`. `rdist` is also called the
symmetric beta distribution: if B has a `beta` distribution with
parameters (c/2, c/2), then X = 2*B - 1 follows a R-distribution with
parameter c.
`rdist` takes ``c`` as a shape parameter for :math:`c`.
This distribution includes the following distribution kernels as
special cases::
c = 2: uniform
c = 3: `semicircular`
c = 4: Epanechnikov (parabolic)
c = 6: quartic (biweight)
c = 8: triweight
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
# use relation to the beta distribution for pdf, cdf, etc
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return -np.log(2) + beta._logpdf((x + 1)/2, c/2, c/2)
def _cdf(self, x, c):
return beta._cdf((x + 1)/2, c/2, c/2)
def _sf(self, x, c):
return beta._sf((x + 1)/2, c/2, c/2)
def _ppf(self, q, c):
return 2*beta._ppf(q, c/2, c/2) - 1
def _rvs(self, c, size=None, random_state=None):
return 2 * random_state.beta(c/2, c/2, size) - 1
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
r"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is:
.. math::
f(x) = x \exp(-x^2/2)
for :math:`x \ge 0`.
`rayleigh` is a special case of `chi` with ``df=2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return chi.rvs(2, size=size, random_state=random_state)
def _pdf(self, r):
# rayleigh.pdf(r) = r * exp(-r**2/2)
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Notes specifically for ``rayleigh.fit``: If the location is fixed with
the `floc` parameter, this method uses an analytical formula to find
the scale. Otherwise, this function uses a numerical root finder on
the first order conditions of the log-likelihood function to find the
MLE. Only the (optional) `loc` parameter is used as the initial guess
for the root finder; the `scale` parameter and any other parameters
for the optimizer are ignored.\n\n""")
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
def scale_mle(loc):
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 175
return (np.sum((data - loc) ** 2) / (2 * len(data))) ** .5
def loc_mle(loc):
# This implicit equation for `loc` is used when
# both `loc` and `scale` are free.
xm = data - loc
s1 = xm.sum()
s2 = (xm**2).sum()
s3 = (1/xm).sum()
return s1 - s2/(2*len(data))*s3
def loc_mle_scale_fixed(loc, scale=fscale):
# This implicit equation for `loc` is used when
# `scale` is fixed but `loc` is not.
xm = data - loc
return xm.sum() - scale**2 * (1/xm).sum()
if floc is not None:
# `loc` is fixed, analytically determine `scale`.
if np.any(data - floc <= 0):
raise FitDataError("rayleigh", lower=1, upper=np.inf)
else:
return floc, scale_mle(floc)
# Account for user provided guess of `loc`.
loc0 = kwds.get('loc')
if loc0 is None:
# Use _fitstart to estimate loc; ignore the returned scale.
loc0 = self._fitstart(data)[0]
fun = loc_mle if fscale is None else loc_mle_scale_fixed
rbrack = np.nextafter(np.min(data), -np.inf)
lbrack = _get_left_bracket(fun, rbrack)
res = optimize.root_scalar(fun, bracket=(lbrack, rbrack))
if not res.converged:
raise FitSolverError(res.flag)
loc = res.root
scale = fscale or scale_mle(loc)
return loc, scale
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
r"""A loguniform or reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for this class is:
.. math::
f(x, a, b) = \frac{1}{x \log(b/a)}
for :math:`a \le x \le b`, :math:`b > a > 0`. This class takes
:math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
This doesn't show the equal probability of ``0.01``, ``0.1`` and
``1``. This is best when the x-axis is log-scaled:
>>> import numpy as np
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log10(r))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$10^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
This random variable will be log-uniform regardless of the base chosen for
``a`` and ``b``. Let's specify with base ``2`` instead:
>>> rvs = %(name)s(2**-2, 2**0).rvs(size=1000)
Values of ``1/4``, ``1/2`` and ``1`` are equally likely with this random
variable. Here's the histogram:
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log2(rvs))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$2^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
"""
def _argcheck(self, a, b):
return (a > 0) & (b > a)
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ia, ib]
def _fitstart(self, data):
if isinstance(data, CensoredData):
data = data._uncensor()
# Reasonable, since support is [a, b]
return super()._fitstart(data, args=(np.min(data), np.max(data)))
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
# reciprocal.pdf(x, a, b) = 1 / (x*(log(b) - log(a)))
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(np.log(b) - np.log(a))
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / (np.log(b) - np.log(a))
def _ppf(self, q, a, b):
return np.exp(np.log(a) + q*(np.log(b) - np.log(a)))
def _munp(self, n, a, b):
t1 = 1 / (np.log(b) - np.log(a)) / n
t2 = np.real(np.exp(_log_diff(n * np.log(b), n*np.log(a))))
return t1 * t2
def _entropy(self, a, b):
return 0.5*(np.log(a) + np.log(b)) + np.log(np.log(b) - np.log(a))
fit_note = """\
`loguniform`/`reciprocal` is over-parameterized. `fit` automatically
fixes `scale` to 1 unless `fscale` is provided by the user.\n\n"""
@extend_notes_in_docstring(rv_continuous, notes=fit_note)
def fit(self, data, *args, **kwds):
fscale = kwds.pop('fscale', 1)
return super().fit(data, *args, fscale=fscale, **kwds)
# Details related to the decision of not defining
# the survival function for this distribution can be
# found in the PR: https://github.com/scipy/scipy/pull/18614
loguniform = reciprocal_gen(name="loguniform")
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
r"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is:
.. math::
f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I_0(x b)
for :math:`x >= 0`, :math:`b > 0`. :math:`I_0` is the modified Bessel
function of order zero (`scipy.special.i0`).
`rice` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
The Rice distribution describes the length, :math:`r`, of a 2-D vector with
components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u,
v` are independent Gaussian random variables with standard deviation
:math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _shape_info(self):
return [_ShapeInfo("b", False, (0, np.inf), (True, False))]
def _rvs(self, b, size=None, random_state=None):
# https://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + random_state.standard_normal(size=(2,) + size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
#
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
class recipinvgauss_gen(rv_continuous):
r"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2\pi x}}
\exp\left(\frac{-(1-\mu x)^2}{2\mu^2x}\right)
for :math:`x \ge 0`.
`recipinvgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("mu", False, (0, np.inf), (False, False))]
def _pdf(self, x, mu):
# recipinvgauss.pdf(x, mu) =
# 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
return np.exp(self._logpdf(x, mu))
def _logpdf(self, x, mu):
return _lazywhere(x > 0, (x, mu),
lambda x, mu: (-(1 - mu*x)**2.0 / (2*x*mu**2.0)
- 0.5*np.log(2*np.pi*x)),
fillvalue=-np.inf)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return _norm_cdf(-isqx*trm1) - np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _sf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return _norm_cdf(isqx*trm1) + np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu, size=None, random_state=None):
return 1.0/random_state.wald(mu, 1.0, size=size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
r"""A semicircular continuous random variable.
%(before_notes)s
See Also
--------
rdist
Notes
-----
The probability density function for `semicircular` is:
.. math::
f(x) = \frac{2}{\pi} \sqrt{1-x^2}
for :math:`-1 \le x \le 1`.
The distribution is a special case of `rdist` with `c = 3`.
%(after_notes)s
References
----------
.. [1] "Wigner semicircle distribution",
https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _logpdf(self, x):
return np.log(2/np.pi) + 0.5*sc.log1p(-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _ppf(self, q):
return rdist._ppf(q, 3)
def _rvs(self, size=None, random_state=None):
# generate values uniformly distributed on the area under the pdf
# (semi-circle) by randomly generating the radius and angle
r = np.sqrt(random_state.uniform(size=size))
a = np.cos(np.pi * random_state.uniform(size=size))
return r * a
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skewcauchy_gen(rv_continuous):
r"""A skewed Cauchy random variable.
%(before_notes)s
See Also
--------
cauchy : Cauchy distribution
Notes
-----
The probability density function for `skewcauchy` is:
.. math::
f(x) = \frac{1}{\pi \left(\frac{x^2}{\left(a\, \text{sign}(x) + 1
\right)^2} + 1 \right)}
for a real number :math:`x` and skewness parameter :math:`-1 < a < 1`.
When :math:`a=0`, the distribution reduces to the usual Cauchy
distribution.
%(after_notes)s
References
----------
.. [1] "Skewed generalized *t* distribution", Wikipedia
https://en.wikipedia.org/wiki/Skewed_generalized_t_distribution#Skewed_Cauchy_distribution
%(example)s
"""
def _argcheck(self, a):
return np.abs(a) < 1
def _shape_info(self):
return [_ShapeInfo("a", False, (-1.0, 1.0), (False, False))]
def _pdf(self, x, a):
return 1 / (np.pi * (x**2 / (a * np.sign(x) + 1)**2 + 1))
def _cdf(self, x, a):
return np.where(x <= 0,
(1 - a) / 2 + (1 - a) / np.pi * np.arctan(x / (1 - a)),
(1 - a) / 2 + (1 + a) / np.pi * np.arctan(x / (1 + a)))
def _ppf(self, x, a):
i = x < self._cdf(0, a)
return np.where(i,
np.tan(np.pi / (1 - a) * (x - (1 - a) / 2)) * (1 - a),
np.tan(np.pi / (1 + a) * (x - (1 - a) / 2)) * (1 + a))
def _stats(self, a, moments='mvsk'):
return np.nan, np.nan, np.nan, np.nan
def _fitstart(self, data):
# Use 0 as the initial guess of the skewness shape parameter.
# For the location and scale, estimate using the median and
# quartiles.
if isinstance(data, CensoredData):
data = data._uncensor()
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return 0.0, p50, (p75 - p25)/2
skewcauchy = skewcauchy_gen(name='skewcauchy')
class skewnorm_gen(rv_continuous):
r"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
`skewnorm` takes a real number :math:`a` as a skewness parameter
When ``a = 0`` the distribution is identical to a normal distribution
(`norm`). `rvs` implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of
the multivariate skew-normal distribution. J. Roy. Statist. Soc.,
B 61, 579-602. :arxiv:`0911.2093`
"""
def _argcheck(self, a):
return np.isfinite(a)
def _shape_info(self):
return [_ShapeInfo("a", False, (-np.inf, np.inf), (False, False))]
def _pdf(self, x, a):
return _lazywhere(
a == 0, (x, a), lambda x, a: _norm_pdf(x),
f2=lambda x, a: 2.*_norm_pdf(x)*_norm_cdf(a*x)
)
def _cdf(self, x, a):
a = np.atleast_1d(a)
cdf = _boost._skewnorm_cdf(x, 0, 1, a)
# for some reason, a isn't broadcasted if some of x are invalid
a = np.broadcast_to(a, cdf.shape)
# Boost is not accurate in left tail when a > 0
i_small_cdf = (cdf < 1e-6) & (a > 0)
cdf[i_small_cdf] = super()._cdf(x[i_small_cdf], a[i_small_cdf])
return np.clip(cdf, 0, 1)
def _ppf(self, x, a):
return _boost._skewnorm_ppf(x, 0, 1, a)
def _sf(self, x, a):
# Boost's SF is implemented this way. Use whatever customizations
# we made in the _cdf.
return self._cdf(-x, -a)
def _isf(self, x, a):
return _boost._skewnorm_isf(x, 0, 1, a)
def _rvs(self, a, size=None, random_state=None):
u0 = random_state.normal(size=size)
v = random_state.normal(size=size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
# For odd order, the each noncentral moment of the skew-normal distribution
# with location 0 and scale 1 can be expressed as a polynomial in delta,
# where delta = a/sqrt(1 + a**2) and `a` is the skew-normal shape
# parameter. The dictionary _skewnorm_odd_moments defines those
# polynomials for orders up to 19. The dict is implemented as a cached
# property to reduce the impact of the creation of the dict on import time.
@cached_property
def _skewnorm_odd_moments(self):
skewnorm_odd_moments = {
1: Polynomial([1]),
3: Polynomial([3, -1]),
5: Polynomial([15, -10, 3]),
7: Polynomial([105, -105, 63, -15]),
9: Polynomial([945, -1260, 1134, -540, 105]),
11: Polynomial([10395, -17325, 20790, -14850, 5775, -945]),
13: Polynomial([135135, -270270, 405405, -386100, 225225, -73710,
10395]),
15: Polynomial([2027025, -4729725, 8513505, -10135125, 7882875,
-3869775, 1091475, -135135]),
17: Polynomial([34459425, -91891800, 192972780, -275675400,
268017750, -175429800, 74220300, -18378360,
2027025]),
19: Polynomial([654729075, -1964187225, 4714049340, -7856748900,
9166207050, -7499623950, 4230557100, -1571349780,
346621275, -34459425]),
}
return skewnorm_odd_moments
def _munp(self, order, a):
if order & 1:
if order > 19:
raise NotImplementedError("skewnorm noncentral moments not "
"implemented for odd orders greater "
"than 19.")
# Use the precomputed polynomials that were derived from the
# moment generating function.
delta = a/np.sqrt(1 + a**2)
return (delta * self._skewnorm_odd_moments[order](delta**2)
* _SQRT_2_OVER_PI)
else:
# For even order, the moment is just (order-1)!!, where !! is the
# notation for the double factorial; for an odd integer m, m!! is
# m*(m-2)*...*3*1.
# We could use special.factorial2, but we know the argument is odd,
# so avoid the overhead of that function and compute the result
# directly here.
return sc.gamma((order + 1)/2) * 2**(order/2) / _SQRT_PI
@extend_notes_in_docstring(rv_continuous, notes="""\
If ``method='mm'``, parameters fixed by the user are respected, and the
remaining parameters are used to match distribution and sample moments
where possible. For example, if the user fixes the location with
``floc``, the parameters will only match the distribution skewness and
variance to the sample skewness and variance; no attempt will be made
to match the means or minimize a norm of the errors.
Note that the maximum possible skewness magnitude of a
`scipy.stats.skewnorm` distribution is approximately 0.9952717; if the
magnitude of the data's sample skewness exceeds this, the returned
shape parameter ``a`` will be infinite.
\n\n""")
def fit(self, data, *args, **kwds):
if isinstance(data, CensoredData):
if data.num_censored() == 0:
data = data._uncensor()
else:
return super().fit(data, *args, **kwds)
# this extracts fixed shape, location, and scale however they
# are specified, and also leaves them in `kwds`
data, fa, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
method = kwds.get("method", "mle").lower()
# See https://en.wikipedia.org/wiki/Skew_normal_distribution for
# moment formulas.
def skew_d(d): # skewness in terms of delta
return (4-np.pi)/2 * ((d * np.sqrt(2 / np.pi))**3
/ (1 - 2*d**2 / np.pi)**(3/2))
def d_skew(skew): # delta in terms of skewness
s_23 = np.abs(skew)**(2/3)
return np.sign(skew) * np.sqrt(
np.pi/2 * s_23 / (s_23 + ((4 - np.pi)/2)**(2/3))
)
# If skewness of data is greater than max possible population skewness,
# MoM won't provide a good guess. Get out early.
s = stats.skew(data)
s_max = skew_d(1)
if abs(s) >= s_max and method != "mm" and fa is None and not args:
return super().fit(data, *args, **kwds)
# If method is method of moments, we don't need the user's guesses.
# Otherwise, extract the guesses from args and kwds.
if method == "mm":
a, loc, scale = None, None, None
else:
a = args[0] if len(args) else None
loc = kwds.pop('loc', None)
scale = kwds.pop('scale', None)
if fa is None and a is None: # not fixed and no guess: use MoM
# Solve for a that matches sample distribution skewness to sample
# skewness.
s = np.clip(s, -s_max, s_max)
d = d_skew(s)
with np.errstate(divide='ignore'):
a = np.sqrt(np.divide(d**2, (1-d**2)))*np.sign(s)
else:
a = fa if fa is not None else a
d = a / np.sqrt(1 + a**2)
if fscale is None and scale is None:
v = np.var(data)
scale = np.sqrt(v / (1 - 2*d**2/np.pi))
elif fscale is not None:
scale = fscale
if floc is None and loc is None:
m = np.mean(data)
loc = m - scale*d*np.sqrt(2/np.pi)
elif floc is not None:
loc = floc
if method == 'mm':
return a, loc, scale
else:
# At this point, parameter "guesses" may equal the fixed parameters
# in kwds. No harm in passing them as guesses, too.
return super().fit(data, a, loc=loc, scale=scale, **kwds)
skewnorm = skewnorm_gen(name='skewnorm')
class trapezoid_gen(rv_continuous):
r"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``. This
defines the trapezoid base from ``loc`` to ``(loc+scale)`` and the flat
top from ``c`` to ``d`` proportional to the position along the base
with ``0 <= c <= d <= 1``. When ``c=d``, this is equivalent to `triang`
with the same values for `loc`, `scale` and `c`.
The method of [1]_ is used for computing moments.
`trapezoid` takes :math:`c` and :math:`d` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
References
----------
.. [1] Kacker, R.N. and Lawrence, J.F. (2007). Trapezoidal and triangular
distributions for Type B evaluation of standard uncertainty.
Metrologia 44, 117-127. :doi:`10.1088/0026-1394/44/2/003`
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _shape_info(self):
ic = _ShapeInfo("c", False, (0, 1.0), (True, True))
id = _ShapeInfo("d", False, (0, 1.0), (True, True))
return [ic, id]
def _pdf(self, x, c, d):
u = 2 / (d-c+1)
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d, u: u * x / c,
lambda x, c, d, u: u,
lambda x, c, d, u: u * (1-x) / (1-d)],
(x, c, d, u))
def _cdf(self, x, c, d):
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d: x**2 / c / (d-c+1),
lambda x, c, d: (c + 2 * (x-c)) / (d-c+1),
lambda x, c, d: 1-((1-x) ** 2
/ (d-c+1) / (1-d))],
(x, c, d))
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
def _munp(self, n, c, d):
# Using the parameterization from Kacker, 2007, with
# a=bottom left, c=top left, d=top right, b=bottom right, then
# E[X^n] = h/(n+1)/(n+2) [(b^{n+2}-d^{n+2})/(b-d)
# - ((c^{n+2} - a^{n+2})/(c-a)]
# with h = 2/((b-a) - (d-c)). The corresponding parameterization
# in scipy, has a'=loc, c'=loc+c*scale, d'=loc+d*scale, b'=loc+scale,
# which for standard form reduces to a'=0, b'=1, c'=c, d'=d.
# Substituting into E[X^n] gives the bd' term as (1 - d^{n+2})/(1 - d)
# and the ac' term as c^{n-1} for the standard form. The bd' term has
# numerical difficulties near d=1, so replace (1 - d^{n+2})/(1-d)
# with expm1((n+2)*log(d))/(d-1).
# Testing with n=18 for c=(1e-30,1-eps) shows that this is stable.
# We still require an explicit test for d=1 to prevent divide by zero,
# and now a test for d=0 to prevent log(0).
ab_term = c**(n+1)
dc_term = _lazyselect(
[d == 0.0, (0.0 < d) & (d < 1.0), d == 1.0],
[lambda d: 1.0,
lambda d: np.expm1((n+2) * np.log(d)) / (d-1.0),
lambda d: n+2],
[d])
val = 2.0 / (1.0+d-c) * (dc_term - ab_term) / ((n+1) * (n+2))
return val
def _entropy(self, c, d):
# Using the parameterization from Wikipedia (van Dorp, 2003)
# with a=bottom left, c=top left, d=top right, b=bottom right
# gives a'=loc, b'=loc+c*scale, c'=loc+d*scale, d'=loc+scale,
# which for loc=0, scale=1 is a'=0, b'=c, c'=d, d'=1.
# Substituting into the entropy formula from Wikipedia gives
# the following result.
return 0.5 * (1.0-d+c) / (1.0+d-c) + np.log(0.5 * (1.0+d-c))
trapezoid = trapezoid_gen(a=0.0, b=1.0, name="trapezoid")
# Note: alias kept for backwards compatibility. Rename was done
# because trapz is a slur in colloquial English (see gh-12924).
trapz = trapezoid_gen(a=0.0, b=1.0, name="trapz")
if trapz.__doc__:
trapz.__doc__ = "trapz is an alias for `trapezoid`"
class triang_gen(rv_continuous):
r"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc + scale)``.
`triang` takes ``c`` as a shape parameter for :math:`0 \le c \le 1`.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return random_state.triangular(0, c, 1, size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _shape_info(self):
return [_ShapeInfo("c", False, (0, 1.0), (True, True))]
def _pdf(self, x, c):
# 0: edge case where c=0
# 1: generalised case for x < c, don't use x <= c, as it doesn't cope
# with c = 0.
# 2: generalised case for x >= c, but doesn't cope with c = 1
# 3: edge case where c=1
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2 - 2 * x,
lambda x, c: 2 * x / c,
lambda x, c: 2 * (1 - x) / (1 - c),
lambda x, c: 2 * x],
(x, c))
return r
def _cdf(self, x, c):
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2*x - x*x,
lambda x, c: x * x / c,
lambda x, c: (x*x - 2*x + c) / (c-1),
lambda x, c: x * x],
(x, c))
return r
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
r"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is:
.. math::
f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)}
for :math:`0 <= x <= b`.
`truncexpon` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("b", False, (0, np.inf), (False, False))]
def _get_support(self, b):
return self.a, b
def _pdf(self, x, b):
# truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _sf(self, x, b):
return (np.exp(-b) - np.exp(-x))/sc.expm1(-b)
def _isf(self, q, b):
return -np.log(np.exp(-b) - q * sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
return super()._munp(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
# logsumexp trick for log(p + q) with only log(p) and log(q)
def _log_sum(log_p, log_q):
return sc.logsumexp([log_p, log_q], axis=0)
# same as above, but using -exp(x) = exp(x + πi)
def _log_diff(log_p, log_q):
return sc.logsumexp([log_p, log_q+np.pi*1j], axis=0)
def _log_gauss_mass(a, b):
"""Log of Gaussian probability mass within an interval"""
a, b = np.broadcast_arrays(a, b)
# Calculations in right tail are inaccurate, so we'll exploit the
# symmetry and work only in the left tail
case_left = b <= 0
case_right = a > 0
case_central = ~(case_left | case_right)
def mass_case_left(a, b):
return _log_diff(_norm_logcdf(b), _norm_logcdf(a))
def mass_case_right(a, b):
return mass_case_left(-b, -a)
def mass_case_central(a, b):
# Previously, this was implemented as:
# left_mass = mass_case_left(a, 0)
# right_mass = mass_case_right(0, b)
# return _log_sum(left_mass, right_mass)
# Catastrophic cancellation occurs as np.exp(log_mass) approaches 1.
# Correct for this with an alternative formulation.
# We're not concerned with underflow here: if only one term
# underflows, it was insignificant; if both terms underflow,
# the result can't accurately be represented in logspace anyway
# because sc.log1p(x) ~ x for small x.
return sc.log1p(-_norm_cdf(a) - _norm_cdf(-b))
# _lazyselect not working; don't care to debug it
out = np.full_like(a, fill_value=np.nan, dtype=np.complex128)
if a[case_left].size:
out[case_left] = mass_case_left(a[case_left], b[case_left])
if a[case_right].size:
out[case_right] = mass_case_right(a[case_right], b[case_right])
if a[case_central].size:
out[case_central] = mass_case_central(a[case_central], b[case_central])
return np.real(out) # discard ~0j
class truncnorm_gen(rv_continuous):
r"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
This distribution is the normal distribution centered on ``loc`` (default
0), with standard deviation ``scale`` (default 1), and truncated at ``a``
and ``b`` *standard deviations* from ``loc``. For arbitrary ``loc`` and
``scale``, ``a`` and ``b`` are *not* the abscissae at which the shifted
and scaled distribution is truncated.
.. note::
If ``a_trunc`` and ``b_trunc`` are the abscissae at which we wish
to truncate the distribution (as opposed to the number of standard
deviations from ``loc``), then we can calculate the distribution
parameters ``a`` and ``b`` as follows::
a, b = (a_trunc - loc) / scale, (b_trunc - loc) / scale
This is a common point of confusion. For additional clarification,
please see the example below.
%(example)s
In the examples above, ``loc=0`` and ``scale=1``, so the plot is truncated
at ``a`` on the left and ``b`` on the right. However, suppose we were to
produce the same histogram with ``loc = 1`` and ``scale=0.5``.
>>> loc, scale = 1, 0.5
>>> rv = truncnorm(a, b, loc=loc, scale=scale)
>>> x = np.linspace(truncnorm.ppf(0.01, a, b),
... truncnorm.ppf(0.99, a, b), 100)
>>> r = rv.rvs(size=1000)
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
>>> ax.hist(r, density=True, bins='auto', histtype='stepfilled', alpha=0.2)
>>> ax.set_xlim(a, b)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Note that the distribution is no longer appears to be truncated at
abscissae ``a`` and ``b``. That is because the *standard* normal
distribution is first truncated at ``a`` and ``b``, *then* the resulting
distribution is scaled by ``scale`` and shifted by ``loc``. If we instead
want the shifted and scaled distribution to be truncated at ``a`` and
``b``, we need to transform these values before passing them as the
distribution parameters.
>>> a_transformed, b_transformed = (a - loc) / scale, (b - loc) / scale
>>> rv = truncnorm(a_transformed, b_transformed, loc=loc, scale=scale)
>>> x = np.linspace(truncnorm.ppf(0.01, a, b),
... truncnorm.ppf(0.99, a, b), 100)
>>> r = rv.rvs(size=10000)
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
>>> ax.hist(r, density=True, bins='auto', histtype='stepfilled', alpha=0.2)
>>> ax.set_xlim(a-0.1, b+0.1)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
def _argcheck(self, a, b):
return a < b
def _shape_info(self):
ia = _ShapeInfo("a", False, (-np.inf, np.inf), (True, False))
ib = _ShapeInfo("b", False, (-np.inf, np.inf), (False, True))
return [ia, ib]
def _fitstart(self, data):
# Reasonable, since support is [a, b]
if isinstance(data, CensoredData):
data = data._uncensor()
return super()._fitstart(data, args=(np.min(data), np.max(data)))
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - _log_gauss_mass(a, b)
def _cdf(self, x, a, b):
return np.exp(self._logcdf(x, a, b))
def _logcdf(self, x, a, b):
x, a, b = np.broadcast_arrays(x, a, b)
logcdf = np.asarray(_log_gauss_mass(a, x) - _log_gauss_mass(a, b))
i = logcdf > -0.1 # avoid catastrophic cancellation
if np.any(i):
logcdf[i] = np.log1p(-np.exp(self._logsf(x[i], a[i], b[i])))
return logcdf
def _sf(self, x, a, b):
return np.exp(self._logsf(x, a, b))
def _logsf(self, x, a, b):
x, a, b = np.broadcast_arrays(x, a, b)
logsf = np.asarray(_log_gauss_mass(x, b) - _log_gauss_mass(a, b))
i = logsf > -0.1 # avoid catastrophic cancellation
if np.any(i):
logsf[i] = np.log1p(-np.exp(self._logcdf(x[i], a[i], b[i])))
return logsf
def _entropy(self, a, b):
A = _norm_cdf(a)
B = _norm_cdf(b)
Z = B - A
C = np.log(np.sqrt(2 * np.pi * np.e) * Z)
D = (a * _norm_pdf(a) - b * _norm_pdf(b)) / (2 * Z)
h = C + D
return h
def _ppf(self, q, a, b):
q, a, b = np.broadcast_arrays(q, a, b)
case_left = a < 0
case_right = ~case_left
def ppf_left(q, a, b):
log_Phi_x = _log_sum(_norm_logcdf(a),
np.log(q) + _log_gauss_mass(a, b))
return sc.ndtri_exp(log_Phi_x)
def ppf_right(q, a, b):
log_Phi_x = _log_sum(_norm_logcdf(-b),
np.log1p(-q) + _log_gauss_mass(a, b))
return -sc.ndtri_exp(log_Phi_x)
out = np.empty_like(q)
q_left = q[case_left]
q_right = q[case_right]
if q_left.size:
out[case_left] = ppf_left(q_left, a[case_left], b[case_left])
if q_right.size:
out[case_right] = ppf_right(q_right, a[case_right], b[case_right])
return out
def _isf(self, q, a, b):
# Mostly copy-paste of _ppf, but I think this is simpler than combining
q, a, b = np.broadcast_arrays(q, a, b)
case_left = b < 0
case_right = ~case_left
def isf_left(q, a, b):
log_Phi_x = _log_diff(_norm_logcdf(b),
np.log(q) + _log_gauss_mass(a, b))
return sc.ndtri_exp(np.real(log_Phi_x))
def isf_right(q, a, b):
log_Phi_x = _log_diff(_norm_logcdf(-a),
np.log1p(-q) + _log_gauss_mass(a, b))
return -sc.ndtri_exp(np.real(log_Phi_x))
out = np.empty_like(q)
q_left = q[case_left]
q_right = q[case_right]
if q_left.size:
out[case_left] = isf_left(q_left, a[case_left], b[case_left])
if q_right.size:
out[case_right] = isf_right(q_right, a[case_right], b[case_right])
return out
def _munp(self, n, a, b):
def n_th_moment(n, a, b):
"""
Returns n-th moment. Defined only if n >= 0.
Function cannot broadcast due to the loop over n
"""
pA, pB = self._pdf(np.asarray([a, b]), a, b)
probs = [pA, -pB]
moments = [0, 1]
for k in range(1, n+1):
# a or b might be infinite, and the corresponding pdf value
# is 0 in that case, but nan is returned for the
# multiplication. However, as b->infinity, pdf(b)*b**k -> 0.
# So it is safe to use _lazywhere to avoid the nan.
vals = _lazywhere(probs, [probs, [a, b]],
lambda x, y: x * y**(k-1), fillvalue=0)
mk = np.sum(vals) + (k-1) * moments[-2]
moments.append(mk)
return moments[-1]
return _lazywhere((n >= 0) & (a == a) & (b == b), (n, a, b),
np.vectorize(n_th_moment, otypes=[np.float64]),
np.nan)
def _stats(self, a, b, moments='mv'):
pA, pB = self.pdf(np.array([a, b]), a, b)
def _truncnorm_stats_scalar(a, b, pA, pB, moments):
m1 = pA - pB
mu = m1
# use _lazywhere to avoid nan (See detailed comment in _munp)
probs = [pA, -pB]
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y,
fillvalue=0)
m2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a-mu, b-mu]], lambda x, y: x*y,
fillvalue=0)
# mu2 = m2 - mu**2, but not as numerically stable as:
# mu2 = (a-mu)*pA - (b-mu)*pB + 1
mu2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**2,
fillvalue=0)
m3 = 2*m1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**3,
fillvalue=0)
m4 = 3*m2 + np.sum(vals)
mu3 = m3 + m1 * (-3*m2 + 2*m1**2)
g1 = mu3 / np.power(mu2, 1.5)
mu4 = m4 + m1*(-4*m3 + 3*m1*(2*m2 - m1**2))
g2 = mu4 / mu2**2 - 3
return mu, mu2, g1, g2
_truncnorm_stats = np.vectorize(_truncnorm_stats_scalar,
excluded=('moments',))
return _truncnorm_stats(a, b, pA, pB, moments)
truncnorm = truncnorm_gen(name='truncnorm', momtype=1)
class truncpareto_gen(rv_continuous):
r"""An upper truncated Pareto continuous random variable.
%(before_notes)s
See Also
--------
pareto : Pareto distribution
Notes
-----
The probability density function for `truncpareto` is:
.. math::
f(x, b, c) = \frac{b}{1 - c^{-b}} \frac{1}{x^{b+1}}
for :math:`b > 0`, :math:`c > 1` and :math:`1 \le x \le c`.
`truncpareto` takes `b` and `c` as shape parameters for :math:`b` and
:math:`c`.
Notice that the upper truncation value :math:`c` is defined in
standardized form so that random values of an unscaled, unshifted variable
are within the range ``[1, c]``.
If ``u_r`` is the upper bound to a scaled and/or shifted variable,
then ``c = (u_r - loc) / scale``. In other words, the support of the
distribution becomes ``(scale + loc) <= x <= (c*scale + loc)`` when
`scale` and/or `loc` are provided.
%(after_notes)s
References
----------
.. [1] Burroughs, S. M., and Tebbens S. F.
"Upper-truncated power laws in natural systems."
Pure and Applied Geophysics 158.4 (2001): 741-757.
%(example)s
"""
def _shape_info(self):
ib = _ShapeInfo("b", False, (0.0, np.inf), (False, False))
ic = _ShapeInfo("c", False, (1.0, np.inf), (False, False))
return [ib, ic]
def _argcheck(self, b, c):
return (b > 0.) & (c > 1.)
def _get_support(self, b, c):
return self.a, c
def _pdf(self, x, b, c):
return b * x**-(b+1) / (1 - c**-b)
def _logpdf(self, x, b, c):
# return np.log(b) - np.log1p(-c**-b) - (b+1)*np.log(x)
return np.log(b) - np.log(-np.expm1(-b*np.log(c))) - (b+1)*np.log(x)
def _cdf(self, x, b, c):
return (1 - x**-b) / (1 - c**-b)
def _logcdf(self, x, b, c):
return np.log1p(-x**-b) - np.log1p(-c**-b)
def _ppf(self, q, b, c):
return pow(1 - (1 - c**-b)*q, -1/b)
def _sf(self, x, b, c):
return (x**-b - c**-b) / (1 - c**-b)
def _logsf(self, x, b, c):
return np.log(x**-b - c**-b) - np.log1p(-c**-b)
def _isf(self, q, b, c):
return pow(c**-b + (1 - c**-b)*q, -1/b)
def _entropy(self, b, c):
return -(np.log(b/(1 - c**-b))
+ (b+1)*(np.log(c)/(c**b - 1) - 1/b))
def _munp(self, n, b, c):
if (n == b).all():
return b*np.log(c) / (1 - c**-b)
else:
return b / (b-n) * (c**b - c**n) / (c**b - 1)
def _fitstart(self, data):
if isinstance(data, CensoredData):
data = data._uncensor()
b, loc, scale = pareto.fit(data)
c = (max(data) - loc)/scale
return b, c, loc, scale
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
if kwds.pop("superfit", False):
return super().fit(data, *args, **kwds)
def log_mean(x):
return np.mean(np.log(x))
def harm_mean(x):
return 1/np.mean(1/x)
def get_b(c, loc, scale):
u = (data-loc)/scale
harm_m = harm_mean(u)
log_m = log_mean(u)
quot = (harm_m-1)/log_m
return (1 - (quot-1) / (quot - (1 - 1/c)*harm_m/np.log(c)))/log_m
def get_c(loc, scale):
return (mx - loc)/scale
def get_loc(fc, fscale):
if fscale: # (fscale and fc) or (fscale and not fc)
loc = mn - fscale
return loc
if fc:
loc = (fc*mn - mx)/(fc - 1)
return loc
def get_scale(loc):
return mn - loc
# Functions used for optimisation; partial derivatives of
# the Lagrangian, set to equal 0.
def dL_dLoc(loc, b_=None):
# Partial derivative wrt location.
# Optimised upon when no parameters, or only b, are fixed.
scale = get_scale(loc)
c = get_c(loc, scale)
b = get_b(c, loc, scale) if b_ is None else b_
harm_m = harm_mean((data - loc)/scale)
return 1 - (1 + (c - 1)/(c**(b+1) - c)) * (1 - 1/(b+1)) * harm_m
def dL_dB(b, logc, logm):
# Partial derivative wrt b.
# Optimised upon whenever at least one parameter but b is fixed,
# and b is free.
return b - np.log1p(b*logc / (1 - b*logm)) / logc
def fallback(data, *args, **kwargs):
# Should any issue arise, default to the general fit method.
return super(truncpareto_gen, self).fit(data, *args, **kwargs)
parameters = _check_fit_input_parameters(self, data, args, kwds)
data, fb, fc, floc, fscale = parameters
mn, mx = data.min(), data.max()
mn_inf = np.nextafter(mn, -np.inf)
if (fb is not None
and fc is not None
and floc is not None
and fscale is not None):
raise ValueError("All parameters fixed."
"There is nothing to optimize.")
elif fc is None and floc is None and fscale is None:
if fb is None:
def cond_b(loc):
# b is positive only if this function is positive
scale = get_scale(loc)
c = get_c(loc, scale)
harm_m = harm_mean((data - loc)/scale)
return (1 + 1/(c-1)) * np.log(c) / harm_m - 1
# This gives an upper bound on loc allowing for a positive b.
# Iteratively look for a bracket for root_scalar.
mn_inf = np.nextafter(mn, -np.inf)
rbrack = mn_inf
i = 0
lbrack = rbrack - 1
while ((lbrack > -np.inf)
and (cond_b(lbrack)*cond_b(rbrack) >= 0)):
i += 1
lbrack = rbrack - np.power(2., i)
if not lbrack > -np.inf:
return fallback(data, *args, **kwds)
res = root_scalar(cond_b, bracket=(lbrack, rbrack))
if not res.converged:
return fallback(data, *args, **kwds)
# Determine the MLE for loc.
# Iteratively look for a bracket for root_scalar.
rbrack = res.root - 1e-3 # grad_loc is numerically ill-behaved
lbrack = rbrack - 1
i = 0
while ((lbrack > -np.inf)
and (dL_dLoc(lbrack)*dL_dLoc(rbrack) >= 0)):
i += 1
lbrack = rbrack - np.power(2., i)
if not lbrack > -np.inf:
return fallback(data, *args, **kwds)
res = root_scalar(dL_dLoc, bracket=(lbrack, rbrack))
if not res.converged:
return fallback(data, *args, **kwds)
loc = res.root
scale = get_scale(loc)
c = get_c(loc, scale)
b = get_b(c, loc, scale)
std_data = (data - loc)/scale
# The expression of b relies on b being bounded above.
up_bound_b = min(1/log_mean(std_data),
1/(harm_mean(std_data)-1))
if not (b < up_bound_b):
return fallback(data, *args, **kwds)
else:
# We know b is positive (or a FitError will be triggered)
# so we let loc get close to min(data).
rbrack = mn_inf
lbrack = mn_inf - 1
i = 0
# Iteratively look for a bracket for root_scalar.
while (lbrack > -np.inf
and (dL_dLoc(lbrack, fb)
* dL_dLoc(rbrack, fb) >= 0)):
i += 1
lbrack = rbrack - 2**i
if not lbrack > -np.inf:
return fallback(data, *args, **kwds)
res = root_scalar(dL_dLoc, (fb,),
bracket=(lbrack, rbrack))
if not res.converged:
return fallback(data, *args, **kwds)
loc = res.root
scale = get_scale(loc)
c = get_c(loc, scale)
b = fb
else:
# At least one of the parameters determining the support is fixed;
# the others then have analytical expressions from the constraints.
# The completely determined case (fixed c, loc and scale)
# has to be checked for not overflowing the support.
# If not fixed, b has to be determined numerically.
loc = floc if floc is not None else get_loc(fc, fscale)
scale = fscale or get_scale(loc)
c = fc or get_c(loc, scale)
# Unscaled, translated values should be positive when the location
# is fixed. If it is not the case, we end up with negative `scale`
# and `c`, which would trigger a FitError before exiting the
# method.
if floc is not None and data.min() - floc < 0:
raise FitDataError("truncpareto", lower=1, upper=c)
# Standardised values should be within the distribution support
# when all parameters controlling it are fixed. If it not the case,
# `fc` is overidden by `c` determined from `floc` and `fscale` when
# raising the exception.
if fc and (floc is not None) and fscale:
if data.max() > fc*fscale + floc:
raise FitDataError("truncpareto", lower=1,
upper=get_c(loc, scale))
# The other constraints should be automatically satisfied
# from the analytical expressions of the parameters.
# If fc or fscale are respectively less than one or less than 0,
# a FitError is triggered before exiting the method.
if fb is None:
std_data = (data - loc)/scale
logm = log_mean(std_data)
logc = np.log(c)
# Condition for a positive root to exist.
if not (2*logm < logc):
return fallback(data, *args, **kwds)
lbrack = 1/logm + 1/(logm - logc)
rbrack = np.nextafter(1/logm, 0)
try:
res = root_scalar(dL_dB, (logc, logm),
bracket=(lbrack, rbrack))
# we should then never get there
if not res.converged:
return fallback(data, *args, **kwds)
b = res.root
except ValueError:
b = rbrack
else:
b = fb
# The distribution requires that `scale+loc <= data <= c*scale+loc`.
# To avoid numerical issues, some tuning may be necessary.
# We adjust `scale` to satisfy the lower bound, and we adjust
# `c` to satisfy the upper bound.
if not (scale+loc) < mn:
if fscale:
loc = np.nextafter(loc, -np.inf)
else:
scale = get_scale(loc)
scale = np.nextafter(scale, 0)
if not (c*scale+loc) > mx:
c = get_c(loc, scale)
c = np.nextafter(c, np.inf)
if not (np.all(self._argcheck(b, c)) and (scale > 0)):
return fallback(data, *args, **kwds)
params_override = b, c, loc, scale
if floc is None and fscale is None:
# Based on testing in gh-16782, the following methods are only
# reliable if either `floc` or `fscale` are provided. They are
# fast, though, so might as well see if they are better than the
# generic method.
params_super = fallback(data, *args, **kwds)
nllf_override = self.nnlf(params_override, data)
nllf_super = self.nnlf(params_super, data)
if nllf_super < nllf_override:
return params_super
return params_override
truncpareto = truncpareto_gen(a=1.0, name='truncpareto')
class tukeylambda_gen(rv_continuous):
r"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (:math:`lambda = -1`)
- logistic (:math:`lambda = 0`)
- approx Normal (:math:`lambda = 0.14`)
- uniform from -1 to 1 (:math:`lambda = 1`)
`tukeylambda` takes a real number :math:`lambda` (denoted ``lam``
in the implementation) as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.isfinite(lam)
def _shape_info(self):
return [_ShapeInfo("lam", False, (-np.inf, np.inf), (False, False))]
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class FitUniformFixedScaleDataError(FitDataError):
def __init__(self, ptp, fscale):
self.args = (
"Invalid values in `data`. Maximum likelihood estimation with "
"the uniform distribution and fixed scale requires that "
f"np.ptp(data) <= fscale, but np.ptp(data) = {ptp} and "
f"fscale = {fscale}."
)
class uniform_gen(rv_continuous):
r"""A uniform continuous random variable.
In the standard form, the distribution is uniform on ``[0, 1]``. Using
the parameters ``loc`` and ``scale``, one obtains the uniform distribution
on ``[loc, loc + scale]``.
%(before_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return random_state.uniform(0.0, 1.0, size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
@_call_super_mom
def fit(self, data, *args, **kwds):
"""
Maximum likelihood estimate for the location and scale parameters.
`uniform.fit` uses only the following parameters. Because exact
formulas are used, the parameters related to optimization that are
available in the `fit` method of other distributions are ignored
here. The only positional argument accepted is `data`.
Parameters
----------
data : array_like
Data to use in calculating the maximum likelihood estimate.
floc : float, optional
Hold the location parameter fixed to the specified value.
fscale : float, optional
Hold the scale parameter fixed to the specified value.
Returns
-------
loc, scale : float
Maximum likelihood estimates for the location and scale.
Notes
-----
An error is raised if `floc` is given and any values in `data` are
less than `floc`, or if `fscale` is given and `fscale` is less
than ``data.max() - data.min()``. An error is also raised if both
`floc` and `fscale` are given.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import uniform
We'll fit the uniform distribution to `x`:
>>> x = np.array([2, 2.5, 3.1, 9.5, 13.0])
For a uniform distribution MLE, the location is the minimum of the
data, and the scale is the maximum minus the minimum.
>>> loc, scale = uniform.fit(x)
>>> loc
2.0
>>> scale
11.0
If we know the data comes from a uniform distribution where the support
starts at 0, we can use `floc=0`:
>>> loc, scale = uniform.fit(x, floc=0)
>>> loc
0.0
>>> scale
13.0
Alternatively, if we know the length of the support is 12, we can use
`fscale=12`:
>>> loc, scale = uniform.fit(x, fscale=12)
>>> loc
1.5
>>> scale
12.0
In that last example, the support interval is [1.5, 13.5]. This
solution is not unique. For example, the distribution with ``loc=2``
and ``scale=12`` has the same likelihood as the one above. When
`fscale` is given and it is larger than ``data.max() - data.min()``,
the parameters returned by the `fit` method center the support over
the interval ``[data.min(), data.max()]``.
"""
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise ValueError("The data contains non-finite values.")
# MLE for the uniform distribution
# --------------------------------
# The PDF is
#
# f(x, loc, scale) = {1/scale for loc <= x <= loc + scale
# {0 otherwise}
#
# The likelihood function is
# L(x, loc, scale) = (1/scale)**n
# where n is len(x), assuming loc <= x <= loc + scale for all x.
# The log-likelihood is
# l(x, loc, scale) = -n*log(scale)
# The log-likelihood is maximized by making scale as small as possible,
# while keeping loc <= x <= loc + scale. So if neither loc nor scale
# are fixed, the log-likelihood is maximized by choosing
# loc = x.min()
# scale = np.ptp(x)
# If loc is fixed, it must be less than or equal to x.min(), and then
# the scale is
# scale = x.max() - loc
# If scale is fixed, it must not be less than np.ptp(x). If scale is
# greater than np.ptp(x), the solution is not unique. Note that the
# likelihood does not depend on loc, except for the requirement that
# loc <= x <= loc + scale. All choices of loc for which
# x.max() - scale <= loc <= x.min()
# have the same log-likelihood. In this case, we choose loc such that
# the support is centered over the interval [data.min(), data.max()]:
# loc = x.min() = 0.5*(scale - np.ptp(x))
if fscale is None:
# scale is not fixed.
if floc is None:
# loc is not fixed, scale is not fixed.
loc = data.min()
scale = np.ptp(data)
else:
# loc is fixed, scale is not fixed.
loc = floc
scale = data.max() - loc
if data.min() < loc:
raise FitDataError("uniform", lower=loc, upper=loc + scale)
else:
# loc is not fixed, scale is fixed.
ptp = np.ptp(data)
if ptp > fscale:
raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)
# If ptp < fscale, the ML estimate is not unique; see the comments
# above. We choose the distribution for which the support is
# centered over the interval [data.min(), data.max()].
loc = data.min() - 0.5*(fscale - ptp)
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
r"""A Von Mises continuous random variable.
%(before_notes)s
See Also
--------
scipy.stats.vonmises_fisher : Von-Mises Fisher distribution on a
hypersphere
Notes
-----
The probability density function for `vonmises` and `vonmises_line` is:
.. math::
f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) }
for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. :math:`I_0` is the
modified Bessel function of order zero (`scipy.special.i0`).
`vonmises` is a circular distribution which does not restrict the
distribution to a fixed interval. Currently, there is no circular
distribution framework in SciPy. The ``cdf`` is implemented such that
``cdf(x + 2*np.pi) == cdf(x) + 1``.
`vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]`
on the real line. This is a regular (i.e. non-circular) distribution.
Note about distribution parameters: `vonmises` and `vonmises_line` take
``kappa`` as a shape parameter (concentration) and ``loc`` as the location
(circular mean). A ``scale`` parameter is accepted but does not have any
effect.
Examples
--------
Import the necessary modules.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import vonmises
Define distribution parameters.
>>> loc = 0.5 * np.pi # circular mean
>>> kappa = 1 # concentration
Compute the probability density at ``x=0`` via the ``pdf`` method.
>>> vonmises.pdf(loc, kappa, 0)
0.12570826359722018
Verify that the percentile function ``ppf`` inverts the cumulative
distribution function ``cdf`` up to floating point accuracy.
>>> x = 1
>>> cdf_value = vonmises.cdf(loc=loc, kappa=kappa, x=x)
>>> ppf_value = vonmises.ppf(cdf_value, loc=loc, kappa=kappa)
>>> x, cdf_value, ppf_value
(1, 0.31489339900904967, 1.0000000000000004)
Draw 1000 random variates by calling the ``rvs`` method.
>>> number_of_samples = 1000
>>> samples = vonmises(loc=loc, kappa=kappa).rvs(number_of_samples)
Plot the von Mises density on a Cartesian and polar grid to emphasize
that is is a circular distribution.
>>> fig = plt.figure(figsize=(12, 6))
>>> left = plt.subplot(121)
>>> right = plt.subplot(122, projection='polar')
>>> x = np.linspace(-np.pi, np.pi, 500)
>>> vonmises_pdf = vonmises.pdf(loc, kappa, x)
>>> ticks = [0, 0.15, 0.3]
The left image contains the Cartesian plot.
>>> left.plot(x, vonmises_pdf)
>>> left.set_yticks(ticks)
>>> number_of_bins = int(np.sqrt(number_of_samples))
>>> left.hist(samples, density=True, bins=number_of_bins)
>>> left.set_title("Cartesian plot")
>>> left.set_xlim(-np.pi, np.pi)
>>> left.grid(True)
The right image contains the polar plot.
>>> right.plot(x, vonmises_pdf, label="PDF")
>>> right.set_yticks(ticks)
>>> right.hist(samples, density=True, bins=number_of_bins,
... label="Histogram")
>>> right.set_title("Polar plot")
>>> right.legend(bbox_to_anchor=(0.15, 1.06))
"""
def _shape_info(self):
return [_ShapeInfo("kappa", False, (0, np.inf), (False, False))]
def _rvs(self, kappa, size=None, random_state=None):
return random_state.vonmises(0.0, kappa, size=size)
@inherit_docstring_from(rv_continuous)
def rvs(self, *args, **kwds):
rvs = super().rvs(*args, **kwds)
return np.mod(rvs + np.pi, 2*np.pi) - np.pi
def _pdf(self, x, kappa):
# vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
# = exp(kappa * (cos(x) - 1)) /
# (2*pi*exp(-kappa)*I[0](kappa))
# = exp(kappa * cosm1(x)) / (2*pi*i0e(kappa))
return np.exp(kappa*sc.cosm1(x)) / (2*np.pi*sc.i0e(kappa))
def _logpdf(self, x, kappa):
# vonmises.pdf(x, kappa) = exp(kappa * cosm1(x)) / (2*pi*i0e(kappa))
return kappa * sc.cosm1(x) - np.log(2*np.pi) - np.log(sc.i0e(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
# vonmises.entropy(kappa) = -kappa * I[1](kappa) / I[0](kappa) +
# log(2 * np.pi * I[0](kappa))
# = -kappa * I[1](kappa) * exp(-kappa) /
# (I[0](kappa) * exp(-kappa)) +
# log(2 * np.pi *
# I[0](kappa) * exp(-kappa) / exp(-kappa))
# = -kappa * sc.i1e(kappa) / sc.i0e(kappa) +
# log(2 * np.pi * i0e(kappa)) + kappa
return (-kappa * sc.i1e(kappa) / sc.i0e(kappa) +
np.log(2 * np.pi * sc.i0e(kappa)) + kappa)
@extend_notes_in_docstring(rv_continuous, notes="""\
The default limits of integration are endpoints of the interval
of width ``2*pi`` centered at `loc` (e.g. ``[-pi, pi]`` when
``loc=0``).\n\n""")
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
_a, _b = -np.pi, np.pi
if lb is None:
lb = loc + _a
if ub is None:
ub = loc + _b
return super().expect(func, args, loc,
scale, lb, ub, conditional, **kwds)
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Fit data is assumed to represent angles and will be wrapped onto the
unit circle. `f0` and `fscale` are ignored; the returned shape is
always the maximum likelihood estimate and the scale is always
1. Initial guesses are ignored.\n\n""")
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
data, fshape, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
if self.a == -np.pi:
# vonmises line case, here the default fit method will be used
return super().fit(data, *args, **kwds)
# wrap data to interval [0, 2*pi]
data = np.mod(data, 2 * np.pi)
def find_mu(data):
return stats.circmean(data)
def find_kappa(data, loc):
# Usually, sources list the following as the equation to solve for
# the MLE of the shape parameter:
# r = I[1](kappa)/I[0](kappa), where r = mean resultant length
# This is valid when the location is the MLE of location.
# More generally, when the location may be fixed at an arbitrary
# value, r should be defined as follows:
r = np.sum(np.cos(loc - data))/len(data)
# See gh-18128 for more information.
if r > 0:
def solve_for_kappa(kappa):
return sc.i1e(kappa)/sc.i0e(kappa) - r
root_res = root_scalar(solve_for_kappa, method="brentq",
bracket=(np.finfo(float).tiny, 1e16))
return root_res.root
else:
# if the provided floc is very far from the circular mean,
# the mean resultant length r can become negative.
# In that case, the equation
# I[1](kappa)/I[0](kappa) = r does not have a solution.
# The maximum likelihood kappa is then 0 which practically
# results in the uniform distribution on the circle. As
# vonmises is defined for kappa > 0, return instead the
# smallest floating point value.
# See gh-18190 for more information
return np.finfo(float).tiny
# location likelihood equation has a solution independent of kappa
loc = floc if floc is not None else find_mu(data)
# shape likelihood equation depends on location
shape = fshape if fshape is not None else find_kappa(data, loc)
loc = np.mod(loc + np.pi, 2 * np.pi) - np.pi # ensure in [-pi, pi]
return shape, loc, 1 # scale is not handled
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
r"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x })
for :math:`x >= 0`.
`wald` is a special case of `invgauss` with ``mu=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return random_state.wald(1.0, 1.0, size=size)
def _pdf(self, x):
# wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
return invgauss._pdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _sf(self, x):
return invgauss._sf(x, 1.0)
def _ppf(self, x):
return invgauss._ppf(x, 1.0)
def _isf(self, x):
return invgauss._isf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _logcdf(self, x):
return invgauss._logcdf(x, 1.0)
def _logsf(self, x):
return invgauss._logsf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
def _entropy(self):
return invgauss._entropy(1.0)
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
r"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is:
.. math::
f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
`wrapcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _shape_info(self):
return [_ShapeInfo("c", False, (0, 1), (False, False))]
def _pdf(self, x, c):
# wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
def f1(x, cr):
# CDF for 0 <= x < pi
return 1/np.pi * np.arctan(cr*np.tan(x/2))
def f2(x, cr):
# CDF for pi <= x <= 2*pi
return 1 - 1/np.pi * np.arctan(cr*np.tan((2*np.pi - x)/2))
cr = (1 + c)/(1 - c)
return _lazywhere(x < np.pi, (x, cr), f=f1, f2=f2)
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
def _fitstart(self, data):
# Use 0.5 as the initial guess of the shape parameter.
# For the location and scale, use the minimum and
# peak-to-peak/(2*pi), respectively.
if isinstance(data, CensoredData):
data = data._uncensor()
return 0.5, np.min(data), np.ptp(data)/(2*np.pi)
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
r"""A generalized normal continuous random variable.
%(before_notes)s
See Also
--------
laplace : Laplace distribution
norm : normal distribution
Notes
-----
The probability density function for `gennorm` is [1]_:
.. math::
f(x, \beta) = \frac{\beta}{2 \Gamma(1/\beta)} \exp(-|x|^\beta),
where :math:`x` is a real number, :math:`\beta > 0` and
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to a Laplace distribution.
For :math:`\beta = 2`, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
.. [2] Nardon, Martina, and Paolo Pianca. "Simulation techniques for
generalized Gaussian densities." Journal of Statistical
Computation and Simulation 79.11 (2009): 1317-1329
.. [3] Wicklin, Rick. "Simulate data from a generalized Gaussian
distribution" in The DO Loop blog, September 21, 2016,
https://blogs.sas.com/content/iml/2016/09/21/simulate-generalized-gaussian-sas.html
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("beta", False, (0, np.inf), (False, False))]
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
def _rvs(self, beta, size=None, random_state=None):
# see [2]_ for the algorithm
# see [3]_ for reference implementation in SAS
z = random_state.gamma(1/beta, size=size)
y = z ** (1/beta)
# convert y to array to ensure masking support
y = np.asarray(y)
mask = random_state.random(size=y.shape) < 0.5
y[mask] = -y[mask]
return y
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
r"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
Notes
-----
The probability density function for `halfgennorm` is:
.. math::
f(x, \beta) = \frac{\beta}{\Gamma(1/\beta)} \exp(-|x|^\beta)
for :math:`x, \beta > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`halfgennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to an exponential distribution.
For :math:`\beta = 2`, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("beta", False, (0, np.inf), (False, False))]
def _pdf(self, x, beta):
# beta
# halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
# gamma(1/beta)
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
class crystalball_gen(rv_continuous):
r"""
Crystalball distribution
%(before_notes)s
Notes
-----
The probability density function for `crystalball` is:
.. math::
f(x, \beta, m) = \begin{cases}
N \exp(-x^2 / 2), &\text{for } x > -\beta\\
N A (B - x)^{-m} &\text{for } x \le -\beta
\end{cases}
where :math:`A = (m / |\beta|)^m \exp(-\beta^2 / 2)`,
:math:`B = m/|\beta| - |\beta|` and :math:`N` is a normalisation constant.
`crystalball` takes :math:`\beta > 0` and :math:`m > 1` as shape
parameters. :math:`\beta` defines the point where the pdf changes
from a power-law to a Gaussian distribution. :math:`m` is the power
of the power-law tail.
References
----------
.. [1] "Crystal Ball Function",
https://en.wikipedia.org/wiki/Crystal_Ball_function
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _argcheck(self, beta, m):
"""
Shape parameter bounds are m > 1 and beta > 0.
"""
return (m > 1) & (beta > 0)
def _shape_info(self):
ibeta = _ShapeInfo("beta", False, (0, np.inf), (False, False))
im = _ShapeInfo("m", False, (1, np.inf), (False, False))
return [ibeta, im]
def _fitstart(self, data):
# Arbitrary, but the default m=1 is not valid
return super()._fitstart(data, args=(1, 1.5))
def _pdf(self, x, beta, m):
"""
Return PDF of the crystalball function.
--
| exp(-x**2 / 2), for x > -beta
crystalball.pdf(x, beta, m) = N * |
| A * (B - x)**(-m), for x <= -beta
--
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return np.exp(-x**2 / 2)
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _logpdf(self, x, beta, m):
"""
Return the log of the PDF of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return -x**2/2
def lhs(x, beta, m):
return m*np.log(m/beta) - beta**2/2 - m*np.log(m/beta - beta - x)
return np.log(N) + _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _cdf(self, x, beta, m):
"""
Return CDF of the crystalball function
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return ((m/beta) * np.exp(-beta**2 / 2.0) / (m-1) +
_norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta)))
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m+1) / (m-1))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _ppf(self, p, beta, m):
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
pbeta = N * (m/beta) * np.exp(-beta**2/2) / (m - 1)
def ppf_less(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return (m/beta - beta -
((m - 1)*(m/beta)**(-m)/eb2*p/N)**(1/(1-m)))
def ppf_greater(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return _norm_ppf(_norm_cdf(-beta) + (1/_norm_pdf_C)*(p/N - C))
return _lazywhere(p < pbeta, (p, beta, m), f=ppf_less, f2=ppf_greater)
def _munp(self, n, beta, m):
"""
Returns the n-th non-central moment of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def n_th_moment(n, beta, m):
"""
Returns n-th moment. Defined only if n+1 < m
Function cannot broadcast due to the loop over n
"""
A = (m/beta)**m * np.exp(-beta**2 / 2.0)
B = m/beta - beta
rhs = (2**((n-1)/2.0) * sc.gamma((n+1)/2) *
(1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2)))
lhs = np.zeros(rhs.shape)
for k in range(n + 1):
lhs += (sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) *
(m/beta)**(-m + k + 1))
return A * lhs + rhs
return N * _lazywhere(n + 1 < m, (n, beta, m),
np.vectorize(n_th_moment, otypes=[np.float64]),
np.inf)
crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function")
def _argus_phi(chi):
"""
Utility function for the argus distribution used in the pdf, sf and
moment calculation.
Note that for all x > 0:
gammainc(1.5, x**2/2) = 2 * (_norm_cdf(x) - x * _norm_pdf(x) - 0.5).
This can be verified directly by noting that the cdf of Gamma(1.5) can
be written as erf(sqrt(x)) - 2*sqrt(x)*exp(-x)/sqrt(Pi).
We use gammainc instead of the usual definition because it is more precise
for small chi.
"""
return sc.gammainc(1.5, chi**2/2) / 2
class argus_gen(rv_continuous):
r"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is:
.. math::
f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2}
\exp(-\chi^2 (1 - x^2)/2)
for :math:`0 < x < 1` and :math:`\chi > 0`, where
.. math::
\Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2
with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard
normal distribution, respectively.
`argus` takes :math:`\chi` as shape a parameter.
%(after_notes)s
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
.. versionadded:: 0.19.0
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("chi", False, (0, np.inf), (False, False))]
def _logpdf(self, x, chi):
# for x = 0 or 1, logpdf returns -np.inf
with np.errstate(divide='ignore'):
y = 1.0 - x*x
A = 3*np.log(chi) - _norm_pdf_logC - np.log(_argus_phi(chi))
return A + np.log(x) + 0.5*np.log1p(-x*x) - chi**2 * y / 2
def _pdf(self, x, chi):
return np.exp(self._logpdf(x, chi))
def _cdf(self, x, chi):
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
def _rvs(self, chi, size=None, random_state=None):
chi = np.asarray(chi)
if chi.size == 1:
out = self._rvs_scalar(chi, numsamples=size,
random_state=random_state)
else:
shp, bc = _check_shape(chi.shape, size)
numsamples = int(np.prod(shp))
out = np.empty(size)
it = np.nditer([chi],
flags=['multi_index'],
op_flags=[['readonly']])
while not it.finished:
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
r = self._rvs_scalar(it[0], numsamples=numsamples,
random_state=random_state)
out[idx] = r.reshape(shp)
it.iternext()
if size == ():
out = out[()]
return out
def _rvs_scalar(self, chi, numsamples=None, random_state=None):
# if chi <= 1.8:
# use rejection method, see Devroye:
# Non-Uniform Random Variate Generation, 1986, section II.3.2.
# write: PDF f(x) = c * g(x) * h(x), where
# h is [0,1]-valued and g is a density
# we use two ways to write f
#
# Case 1:
# write g(x) = 3*x*sqrt(1-x**2), h(x) = exp(-chi**2 (1-x**2) / 2)
# If X has a distribution with density g its ppf G_inv is given by:
# G_inv(u) = np.sqrt(1 - u**(2/3))
#
# Case 2:
# g(x) = chi**2 * x * exp(-chi**2 * (1-x**2)/2) / (1 - exp(-chi**2 /2))
# h(x) = sqrt(1 - x**2), 0 <= x <= 1
# one can show that
# G_inv(u) = np.sqrt(2*np.log(u*(np.exp(chi**2/2)-1)+1))/chi
# = np.sqrt(1 + 2*np.log(np.exp(-chi**2/2)*(1-u)+u)/chi**2)
# the latter expression is used for precision with small chi
#
# In both cases, the inverse cdf of g can be written analytically, and
# we can apply the rejection method:
#
# REPEAT
# Generate U uniformly distributed on [0, 1]
# Generate X with density g (e.g. via inverse transform sampling:
# X = G_inv(V) with V uniformly distributed on [0, 1])
# UNTIL X <= h(X)
# RETURN X
#
# We use case 1 for chi <= 0.5 as it maintains precision for small chi
# and case 2 for 0.5 < chi <= 1.8 due to its speed for moderate chi.
#
# if chi > 1.8:
# use relation to the Gamma distribution: if X is ARGUS with parameter
# chi), then Y = chi**2 * (1 - X**2) / 2 has density proportional to
# sqrt(u) * exp(-u) on [0, chi**2 / 2], i.e. a Gamma(3/2) distribution
# conditioned on [0, chi**2 / 2]). Therefore, to sample X from the
# ARGUS distribution, we sample Y from the gamma distribution, keeping
# only samples on [0, chi**2 / 2], and apply the inverse
# transformation X = (1 - 2*Y/chi**2)**(1/2). Since we only
# look at chi > 1.8, gamma(1.5).cdf(chi**2/2) is large enough such
# Y falls in the inteval [0, chi**2 / 2] with a high probability:
# stats.gamma(1.5).cdf(1.8**2/2) = 0.644...
#
# The points to switch between the different methods are determined
# by a comparison of the runtime of the different methods. However,
# the runtime is platform-dependent. The implemented values should
# ensure a good overall performance and are supported by an analysis
# of the rejection constants of different methods.
size1d = tuple(np.atleast_1d(numsamples))
N = int(np.prod(size1d))
x = np.zeros(N)
simulated = 0
chi2 = chi * chi
if chi <= 0.5:
d = -chi2 / 2
while simulated < N:
k = N - simulated
u = random_state.uniform(size=k)
v = random_state.uniform(size=k)
z = v**(2/3)
# acceptance condition: u <= h(G_inv(v)). This simplifies to
accept = (np.log(u) <= d * z)
num_accept = np.sum(accept)
if num_accept > 0:
# we still need to transform z=v**(2/3) to X = G_inv(v)
rvs = np.sqrt(1 - z[accept])
x[simulated:(simulated + num_accept)] = rvs
simulated += num_accept
elif chi <= 1.8:
echi = np.exp(-chi2 / 2)
while simulated < N:
k = N - simulated
u = random_state.uniform(size=k)
v = random_state.uniform(size=k)
z = 2 * np.log(echi * (1 - v) + v) / chi2
# as in case one, simplify u <= h(G_inv(v)) and then transform
# z to the target distribution X = G_inv(v)
accept = (u**2 + z <= 0)
num_accept = np.sum(accept)
if num_accept > 0:
rvs = np.sqrt(1 + z[accept])
x[simulated:(simulated + num_accept)] = rvs
simulated += num_accept
else:
# conditional Gamma for chi > 1.8
while simulated < N:
k = N - simulated
g = random_state.standard_gamma(1.5, size=k)
accept = (g <= chi2 / 2)
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = g[accept]
simulated += num_accept
x = np.sqrt(1 - 2 * x / chi2)
return np.reshape(x, size1d)
def _stats(self, chi):
# need to ensure that dtype is float
# otherwise the mask below does not work for integers
chi = np.asarray(chi, dtype=float)
phi = _argus_phi(chi)
m = np.sqrt(np.pi/8) * chi * sc.ive(1, chi**2/4) / phi
# compute second moment, use Taylor expansion for small chi (<= 0.1)
mu2 = np.empty_like(chi)
mask = chi > 0.1
c = chi[mask]
mu2[mask] = 1 - 3 / c**2 + c * _norm_pdf(c) / phi[mask]
c = chi[~mask]
coef = [-358/65690625, 0, -94/1010625, 0, 2/2625, 0, 6/175, 0, 0.4]
mu2[~mask] = np.polyval(coef, c)
return m, mu2 - m**2, None, None
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects.
The first containing the content of n bins,
the second containing the (n+1) bin boundaries.
In particular, the return value of `numpy.histogram` is accepted.
density : bool, optional
If False, assumes the histogram is proportional to counts per bin;
otherwise, assumes it is proportional to a density.
For constant bin widths, these are equivalent, but the distinction
is important when bin widths vary (see Notes).
If None (default), sets ``density=True`` for backwards compatibility,
but warns if the bin widths are variable. Set `density` explicitly
to silence the warning.
.. versionadded:: 1.10.0
Notes
-----
When a histogram has unequal bin widths, there is a distinction between
histograms that are proportional to counts per bin and histograms that are
proportional to probability density over a bin. If `numpy.histogram` is
called with its default ``density=False``, the resulting histogram is the
number of counts per bin, so ``density=False`` should be passed to
`rv_histogram`. If `numpy.histogram` is called with ``density=True``, the
resulting histogram is in terms of probability density, so ``density=True``
should be passed to `rv_histogram`. To avoid warnings, always pass
``density`` explicitly when the input histogram has unequal bin widths.
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram.
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5,
... random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist, density=False)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> fig, ax = plt.subplots()
>>> ax.set_title("PDF from Template")
>>> ax.hist(data, density=True, bins=100)
>>> ax.plot(X, hist_dist.pdf(X), label='PDF')
>>> ax.plot(X, hist_dist.cdf(X), label='CDF')
>>> ax.legend()
>>> fig.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, density=None, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects.
The first containing the content of n bins,
the second containing the (n+1) bin boundaries.
In particular, the return value of np.histogram is accepted.
density : bool, optional
If False, assumes the histogram is proportional to counts per bin;
otherwise, assumes it is proportional to a density.
For constant bin widths, these are equivalent.
If None (default), sets ``density=True`` for backward
compatibility, but warns if the bin widths are variable. Set
`density` explicitly to silence the warning.
"""
self._histogram = histogram
self._density = density
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
bins_vary = not np.allclose(self._hbin_widths, self._hbin_widths[0])
if density is None and bins_vary:
message = ("Bin widths are not constant. Assuming `density=True`."
"Specify `density` explicitly to silence this warning.")
warnings.warn(message, RuntimeWarning, stacklevel=2)
density = True
elif not density:
self._hpdf = self._hpdf / self._hbin_widths
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self.a = self._hbins[0]
kwargs['b'] = self.b = self._hbins[-1]
super().__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super()._updated_ctor_param()
dct['histogram'] = self._histogram
dct['density'] = self._density
return dct
class studentized_range_gen(rv_continuous):
r"""A studentized range continuous random variable.
%(before_notes)s
See Also
--------
t: Student's t distribution
Notes
-----
The probability density function for `studentized_range` is:
.. math::
f(x; k, \nu) = \frac{k(k-1)\nu^{\nu/2}}{\Gamma(\nu/2)
2^{\nu/2-1}} \int_{0}^{\infty} \int_{-\infty}^{\infty}
s^{\nu} e^{-\nu s^2/2} \phi(z) \phi(sx + z)
[\Phi(sx + z) - \Phi(z)]^{k-2} \,dz \,ds
for :math:`x ≥ 0`, :math:`k > 1`, and :math:`\nu > 0`.
`studentized_range` takes ``k`` for :math:`k` and ``df`` for :math:`\nu`
as shape parameters.
When :math:`\nu` exceeds 100,000, an asymptotic approximation (infinite
degrees of freedom) is used to compute the cumulative distribution
function [4]_ and probability distribution function.
%(after_notes)s
References
----------
.. [1] "Studentized range distribution",
https://en.wikipedia.org/wiki/Studentized_range_distribution
.. [2] Batista, Ben Dêivide, et al. "Externally Studentized Normal Midrange
Distribution." Ciência e Agrotecnologia, vol. 41, no. 4, 2017, pp.
378-389., doi:10.1590/1413-70542017414047716.
.. [3] Harter, H. Leon. "Tables of Range and Studentized Range." The Annals
of Mathematical Statistics, vol. 31, no. 4, 1960, pp. 1122-1147.
JSTOR, www.jstor.org/stable/2237810. Accessed 18 Feb. 2021.
.. [4] Lund, R. E., and J. R. Lund. "Algorithm AS 190: Probabilities and
Upper Quantiles for the Studentized Range." Journal of the Royal
Statistical Society. Series C (Applied Statistics), vol. 32, no. 2,
1983, pp. 204-210. JSTOR, www.jstor.org/stable/2347300. Accessed 18
Feb. 2021.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import studentized_range
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
>>> k, df = 3, 10
>>> mean, var, skew, kurt = studentized_range.stats(k, df, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(studentized_range.ppf(0.01, k, df),
... studentized_range.ppf(0.99, k, df), 100)
>>> ax.plot(x, studentized_range.pdf(x, k, df),
... 'r-', lw=5, alpha=0.6, label='studentized_range pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = studentized_range(k, df)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = studentized_range.ppf([0.001, 0.5, 0.999], k, df)
>>> np.allclose([0.001, 0.5, 0.999], studentized_range.cdf(vals, k, df))
True
Rather than using (``studentized_range.rvs``) to generate random variates,
which is very slow for this distribution, we can approximate the inverse
CDF using an interpolator, and then perform inverse transform sampling
with this approximate inverse CDF.
This distribution has an infinite but thin right tail, so we focus our
attention on the leftmost 99.9 percent.
>>> a, b = studentized_range.ppf([0, .999], k, df)
>>> a, b
0, 7.41058083802274
>>> from scipy.interpolate import interp1d
>>> rng = np.random.default_rng()
>>> xs = np.linspace(a, b, 50)
>>> cdf = studentized_range.cdf(xs, k, df)
# Create an interpolant of the inverse CDF
>>> ppf = interp1d(cdf, xs, fill_value='extrapolate')
# Perform inverse transform sampling using the interpolant
>>> r = ppf(rng.uniform(size=1000))
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
def _argcheck(self, k, df):
return (k > 1) & (df > 0)
def _shape_info(self):
ik = _ShapeInfo("k", False, (1, np.inf), (False, False))
idf = _ShapeInfo("df", False, (0, np.inf), (False, False))
return [ik, idf]
def _fitstart(self, data):
# Default is k=1, but that is not a valid value of the parameter.
return super()._fitstart(data, args=(2, 1))
def _munp(self, K, k, df):
cython_symbol = '_studentized_range_moment'
_a, _b = self._get_support()
# all three of these are used to create a numpy array so they must
# be the same shape.
def _single_moment(K, k, df):
log_const = _stats._studentized_range_pdf_logconst(k, df)
arg = [K, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
ranges = [(-np.inf, np.inf), (0, np.inf), (_a, _b)]
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_moment, 3, 1)
return np.asarray(ufunc(K, k, df), dtype=np.float64)[()]
def _pdf(self, x, k, df):
def _single_pdf(q, k, df):
# The infinite form of the PDF is derived from the infinite
# CDF.
if df < 100000:
cython_symbol = '_studentized_range_pdf'
log_const = _stats._studentized_range_pdf_logconst(k, df)
arg = [q, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf), (0, np.inf)]
else:
cython_symbol = '_studentized_range_pdf_asymptotic'
arg = [q, k]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf)]
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_pdf, 3, 1)
return np.asarray(ufunc(x, k, df), dtype=np.float64)[()]
def _cdf(self, x, k, df):
def _single_cdf(q, k, df):
# "When the degrees of freedom V are infinite the probability
# integral takes [on a] simpler form," and a single asymptotic
# integral is evaluated rather than the standard double integral.
# (Lund, Lund, page 205)
if df < 100000:
cython_symbol = '_studentized_range_cdf'
log_const = _stats._studentized_range_cdf_logconst(k, df)
arg = [q, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf), (0, np.inf)]
else:
cython_symbol = '_studentized_range_cdf_asymptotic'
arg = [q, k]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf)]
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_cdf, 3, 1)
# clip p-values to ensure they are in [0, 1].
return np.clip(np.asarray(ufunc(x, k, df), dtype=np.float64)[()], 0, 1)
studentized_range = studentized_range_gen(name='studentized_range', a=0,
b=np.inf)
class rel_breitwigner_gen(rv_continuous):
r"""A relativistic Breit-Wigner random variable.
%(before_notes)s
See Also
--------
cauchy: Cauchy distribution, also known as the Breit-Wigner distribution.
Notes
-----
The probability density function for `rel_breitwigner` is
.. math::
f(x, \rho) = \frac{k}{(x^2 - \rho^2)^2 + \rho^2}
where
.. math::
k = \frac{2\sqrt{2}\rho^2\sqrt{\rho^2 + 1}}
{\pi\sqrt{\rho^2 + \rho\sqrt{\rho^2 + 1}}}
The relativistic Breit-Wigner distribution is used in high energy physics
to model resonances [1]_. It gives the uncertainty in the invariant mass,
:math:`M` [2]_, of a resonance with characteristic mass :math:`M_0` and
decay-width :math:`\Gamma`, where :math:`M`, :math:`M_0` and :math:`\Gamma`
are expressed in natural units. In SciPy's parametrization, the shape
parameter :math:`\rho` is equal to :math:`M_0/\Gamma` and takes values in
:math:`(0, \infty)`.
Equivalently, the relativistic Breit-Wigner distribution is said to give
the uncertainty in the center-of-mass energy :math:`E_{\text{cm}}`. In
natural units, the speed of light :math:`c` is equal to 1 and the invariant
mass :math:`M` is equal to the rest energy :math:`Mc^2`. In the
center-of-mass frame, the rest energy is equal to the total energy [3]_.
%(after_notes)s
:math:`\rho = M/\Gamma` and :math:`\Gamma` is the scale parameter. For
example, if one seeks to model the :math:`Z^0` boson with :math:`M_0
\approx 91.1876 \text{ GeV}` and :math:`\Gamma \approx 2.4952\text{ GeV}`
[4]_ one can set ``rho=91.1876/2.4952`` and ``scale=2.4952``.
To ensure a physically meaningful result when using the `fit` method, one
should set ``floc=0`` to fix the location parameter to 0.
References
----------
.. [1] Relativistic Breit-Wigner distribution, Wikipedia,
https://en.wikipedia.org/wiki/Relativistic_Breit-Wigner_distribution
.. [2] Invariant mass, Wikipedia,
https://en.wikipedia.org/wiki/Invariant_mass
.. [3] Center-of-momentum frame, Wikipedia,
https://en.wikipedia.org/wiki/Center-of-momentum_frame
.. [4] M. Tanabashi et al. (Particle Data Group) Phys. Rev. D 98, 030001 -
Published 17 August 2018
%(example)s
"""
def _argcheck(self, rho):
return rho > 0
def _shape_info(self):
return [_ShapeInfo("rho", False, (0, np.inf), (False, False))]
def _pdf(self, x, rho):
# C = k / rho**2
C = np.sqrt(
2 * (1 + 1/rho**2) / (1 + np.sqrt(1 + 1/rho**2))
) * 2 / np.pi
with np.errstate(over='ignore'):
return C / (((x - rho)*(x + rho)/rho)**2 + 1)
def _cdf(self, x, rho):
# C = k / (2 * rho**2) / np.sqrt(1 + 1/rho**2)
C = np.sqrt(2/(1 + np.sqrt(1 + 1/rho**2)))/np.pi
result = (
np.sqrt(-1 + 1j/rho)
* np.arctan(x/np.sqrt(-rho*(rho + 1j)))
)
result = C * 2 * np.imag(result)
# Sometimes above formula produces values greater than 1.
return np.clip(result, None, 1)
def _munp(self, n, rho):
if n == 1:
# C = k / (2 * rho)
C = np.sqrt(
2 * (1 + 1/rho**2) / (1 + np.sqrt(1 + 1/rho**2))
) / np.pi * rho
return C * (np.pi/2 + np.arctan(rho))
if n == 2:
# C = pi * k / (4 * rho)
C = np.sqrt(
(1 + 1/rho**2) / (2 * (1 + np.sqrt(1 + 1/rho**2)))
) * rho
result = (1 - rho * 1j) / np.sqrt(-1 - 1j/rho)
return 2 * C * np.real(result)
else:
return np.inf
def _stats(self, rho):
# Returning None from stats makes public stats use _munp.
# nan values will be omitted from public stats. Skew and
# kurtosis are actually infinite.
return None, None, np.nan, np.nan
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit to better handle case where floc is set.
data, _, floc, fscale = _check_fit_input_parameters(
self, data, args, kwds
)
censored = isinstance(data, CensoredData)
if censored:
if data.num_censored() == 0:
# There are no censored values in data, so replace the
# CensoredData instance with a regular array.
data = data._uncensored
censored = False
if floc is None or censored:
return super().fit(data, *args, **kwds)
if fscale is None:
# The interquartile range approximates the scale parameter gamma.
# The median approximates rho * gamma.
p25, p50, p75 = np.quantile(data - floc, [0.25, 0.5, 0.75])
scale_0 = p75 - p25
rho_0 = p50 / scale_0
if not args:
args = [rho_0]
if "scale" not in kwds:
kwds["scale"] = scale_0
else:
M_0 = np.median(data - floc)
rho_0 = M_0 / fscale
if not args:
args = [rho_0]
return super().fit(data, *args, **kwds)
rel_breitwigner = rel_breitwigner_gen(a=0.0, name="rel_breitwigner")
# Collect names of classes and objects in this module.
pairs = list(globals().copy().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
| 376,025
| 31.207794
| 97
|
py
|
scipy
|
scipy-main/scipy/stats/_survival.py
|
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
import warnings
import numpy as np
from scipy import special, interpolate, stats
from scipy.stats._censored_data import CensoredData
from scipy.stats._common import ConfidenceInterval
if TYPE_CHECKING:
from typing import Literal
import numpy.typing as npt
__all__ = ['ecdf', 'logrank']
@dataclass
class EmpiricalDistributionFunction:
"""An empirical distribution function produced by `scipy.stats.ecdf`
Attributes
----------
quantiles : ndarray
The unique values of the sample from which the
`EmpiricalDistributionFunction` was estimated.
probabilities : ndarray
The point estimates of the cumulative distribution function (CDF) or
its complement, the survival function (SF), corresponding with
`quantiles`.
"""
quantiles: np.ndarray
probabilities: np.ndarray
# Exclude these from __str__
_n: np.ndarray = field(repr=False) # number "at risk"
_d: np.ndarray = field(repr=False) # number of "deaths"
_sf: np.ndarray = field(repr=False) # survival function for var estimate
_kind: str = field(repr=False) # type of function: "cdf" or "sf"
def __init__(self, q, p, n, d, kind):
self.probabilities = p
self.quantiles = q
self._n = n
self._d = d
self._sf = p if kind == 'sf' else 1 - p
self._kind = kind
f0 = 1 if kind == 'sf' else 0 # leftmost function value
f1 = 1 - f0
# fill_value can't handle edge cases at infinity
x = np.insert(q, [0, len(q)], [-np.inf, np.inf])
y = np.insert(p, [0, len(p)], [f0, f1])
# `or` conditions handle the case of empty x, points
self._f = interpolate.interp1d(x, y, kind='previous',
assume_sorted=True)
def evaluate(self, x):
"""Evaluate the empirical CDF/SF function at the input.
Parameters
----------
x : ndarray
Argument to the CDF/SF
Returns
-------
y : ndarray
The CDF/SF evaluated at the input
"""
return self._f(x)
def plot(self, ax=None, **matplotlib_kwargs):
"""Plot the empirical distribution function
Available only if ``matplotlib`` is installed.
Parameters
----------
ax : matplotlib.axes.Axes
Axes object to draw the plot onto, otherwise uses the current Axes.
**matplotlib_kwargs : dict, optional
Keyword arguments passed directly to `matplotlib.axes.Axes.step`.
Unless overridden, ``where='post'``.
Returns
-------
lines : list of `matplotlib.lines.Line2D`
Objects representing the plotted data
"""
try:
import matplotlib # noqa
except ModuleNotFoundError as exc:
message = "matplotlib must be installed to use method `plot`."
raise ModuleNotFoundError(message) from exc
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
kwargs = {'where': 'post'}
kwargs.update(matplotlib_kwargs)
delta = np.ptp(self.quantiles)*0.05 # how far past sample edge to plot
q = self.quantiles
q = [q[0] - delta] + list(q) + [q[-1] + delta]
return ax.step(q, self.evaluate(q), **kwargs)
def confidence_interval(self, confidence_level=0.95, *, method='linear'):
"""Compute a confidence interval around the CDF/SF point estimate
Parameters
----------
confidence_level : float, default: 0.95
Confidence level for the computed confidence interval
method : str, {"linear", "log-log"}
Method used to compute the confidence interval. Options are
"linear" for the conventional Greenwood confidence interval
(default) and "log-log" for the "exponential Greenwood",
log-negative-log-transformed confidence interval.
Returns
-------
ci : ``ConfidenceInterval``
An object with attributes ``low`` and ``high``, instances of
`~scipy.stats._result_classes.EmpiricalDistributionFunction` that
represent the lower and upper bounds (respectively) of the
confidence interval.
Notes
-----
Confidence intervals are computed according to the Greenwood formula
(``method='linear'``) or the more recent "exponential Greenwood"
formula (``method='log-log'``) as described in [1]_. The conventional
Greenwood formula can result in lower confidence limits less than 0
and upper confidence limits greater than 1; these are clipped to the
unit interval. NaNs may be produced by either method; these are
features of the formulas.
References
----------
.. [1] Sawyer, Stanley. "The Greenwood and Exponential Greenwood
Confidence Intervals in Survival Analysis."
https://www.math.wustl.edu/~sawyer/handouts/greenwood.pdf
"""
message = ("Confidence interval bounds do not implement a "
"`confidence_interval` method.")
if self._n is None:
raise NotImplementedError(message)
methods = {'linear': self._linear_ci,
'log-log': self._loglog_ci}
message = f"`method` must be one of {set(methods)}."
if method.lower() not in methods:
raise ValueError(message)
message = "`confidence_level` must be a scalar between 0 and 1."
confidence_level = np.asarray(confidence_level)[()]
if confidence_level.shape or not (0 <= confidence_level <= 1):
raise ValueError(message)
method_fun = methods[method.lower()]
low, high = method_fun(confidence_level)
message = ("The confidence interval is undefined at some observations."
" This is a feature of the mathematical formula used, not"
" an error in its implementation.")
if np.any(np.isnan(low) | np.isnan(high)):
warnings.warn(message, RuntimeWarning, stacklevel=2)
low, high = np.clip(low, 0, 1), np.clip(high, 0, 1)
low = EmpiricalDistributionFunction(self.quantiles, low, None, None,
self._kind)
high = EmpiricalDistributionFunction(self.quantiles, high, None, None,
self._kind)
return ConfidenceInterval(low, high)
def _linear_ci(self, confidence_level):
sf, d, n = self._sf, self._d, self._n
# When n == d, Greenwood's formula divides by zero.
# When s != 0, this can be ignored: var == inf, and CI is [0, 1]
# When s == 0, this results in NaNs. Produce an informative warning.
with np.errstate(divide='ignore', invalid='ignore'):
var = sf ** 2 * np.cumsum(d / (n * (n - d)))
se = np.sqrt(var)
z = special.ndtri(1 / 2 + confidence_level / 2)
z_se = z * se
low = self.probabilities - z_se
high = self.probabilities + z_se
return low, high
def _loglog_ci(self, confidence_level):
sf, d, n = self._sf, self._d, self._n
with np.errstate(divide='ignore', invalid='ignore'):
var = 1 / np.log(sf) ** 2 * np.cumsum(d / (n * (n - d)))
se = np.sqrt(var)
z = special.ndtri(1 / 2 + confidence_level / 2)
with np.errstate(divide='ignore'):
lnl_points = np.log(-np.log(sf))
z_se = z * se
low = np.exp(-np.exp(lnl_points + z_se))
high = np.exp(-np.exp(lnl_points - z_se))
if self._kind == "cdf":
low, high = 1-high, 1-low
return low, high
@dataclass
class ECDFResult:
""" Result object returned by `scipy.stats.ecdf`
Attributes
----------
cdf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
An object representing the empirical cumulative distribution function.
sf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
An object representing the complement of the empirical cumulative
distribution function.
"""
cdf: EmpiricalDistributionFunction
sf: EmpiricalDistributionFunction
def __init__(self, q, cdf, sf, n, d):
self.cdf = EmpiricalDistributionFunction(q, cdf, n, d, "cdf")
self.sf = EmpiricalDistributionFunction(q, sf, n, d, "sf")
def _iv_CensoredData(
sample: npt.ArrayLike | CensoredData, param_name: str = 'sample'
) -> CensoredData:
"""Attempt to convert `sample` to `CensoredData`."""
if not isinstance(sample, CensoredData):
try: # takes care of input standardization/validation
sample = CensoredData(uncensored=sample)
except ValueError as e:
message = str(e).replace('uncensored', param_name)
raise type(e)(message) from e
return sample
def ecdf(sample: npt.ArrayLike | CensoredData) -> ECDFResult:
"""Empirical cumulative distribution function of a sample.
The empirical cumulative distribution function (ECDF) is a step function
estimate of the CDF of the distribution underlying a sample. This function
returns objects representing both the empirical distribution function and
its complement, the empirical survival function.
Parameters
----------
sample : 1D array_like or `scipy.stats.CensoredData`
Besides array_like, instances of `scipy.stats.CensoredData` containing
uncensored and right-censored observations are supported. Currently,
other instances of `scipy.stats.CensoredData` will result in a
``NotImplementedError``.
Returns
-------
res : `~scipy.stats._result_classes.ECDFResult`
An object with the following attributes.
cdf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
An object representing the empirical cumulative distribution
function.
sf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
An object representing the empirical survival function.
The `cdf` and `sf` attributes themselves have the following attributes.
quantiles : ndarray
The unique values in the sample that defines the empirical CDF/SF.
probabilities : ndarray
The point estimates of the probabilities corresponding with
`quantiles`.
And the following methods:
evaluate(x) :
Evaluate the CDF/SF at the argument.
plot(ax) :
Plot the CDF/SF on the provided axes.
confidence_interval(confidence_level=0.95) :
Compute the confidence interval around the CDF/SF at the values in
`quantiles`.
Notes
-----
When each observation of the sample is a precise measurement, the ECDF
steps up by ``1/len(sample)`` at each of the observations [1]_.
When observations are lower bounds, upper bounds, or both upper and lower
bounds, the data is said to be "censored", and `sample` may be provided as
an instance of `scipy.stats.CensoredData`.
For right-censored data, the ECDF is given by the Kaplan-Meier estimator
[2]_; other forms of censoring are not supported at this time.
Confidence intervals are computed according to the Greenwood formula or the
more recent "Exponential Greenwood" formula as described in [4]_.
References
----------
.. [1] Conover, William Jay. Practical nonparametric statistics. Vol. 350.
John Wiley & Sons, 1999.
.. [2] Kaplan, Edward L., and Paul Meier. "Nonparametric estimation from
incomplete observations." Journal of the American statistical
association 53.282 (1958): 457-481.
.. [3] Goel, Manish Kumar, Pardeep Khanna, and Jugal Kishore.
"Understanding survival analysis: Kaplan-Meier estimate."
International journal of Ayurveda research 1.4 (2010): 274.
.. [4] Sawyer, Stanley. "The Greenwood and Exponential Greenwood Confidence
Intervals in Survival Analysis."
https://www.math.wustl.edu/~sawyer/handouts/greenwood.pdf
Examples
--------
**Uncensored Data**
As in the example from [1]_ page 79, five boys were selected at random from
those in a single high school. Their one-mile run times were recorded as
follows.
>>> sample = [6.23, 5.58, 7.06, 6.42, 5.20] # one-mile run times (minutes)
The empirical distribution function, which approximates the distribution
function of one-mile run times of the population from which the boys were
sampled, is calculated as follows.
>>> from scipy import stats
>>> res = stats.ecdf(sample)
>>> res.cdf.quantiles
array([5.2 , 5.58, 6.23, 6.42, 7.06])
>>> res.cdf.probabilities
array([0.2, 0.4, 0.6, 0.8, 1. ])
To plot the result as a step function:
>>> import matplotlib.pyplot as plt
>>> ax = plt.subplot()
>>> res.cdf.plot(ax)
>>> ax.set_xlabel('One-Mile Run Time (minutes)')
>>> ax.set_ylabel('Empirical CDF')
>>> plt.show()
**Right-censored Data**
As in the example from [1]_ page 91, the lives of ten car fanbelts were
tested. Five tests concluded because the fanbelt being tested broke, but
the remaining tests concluded for other reasons (e.g. the study ran out of
funding, but the fanbelt was still functional). The mileage driven
with the fanbelts were recorded as follows.
>>> broken = [77, 47, 81, 56, 80] # in thousands of miles driven
>>> unbroken = [62, 60, 43, 71, 37]
Precise survival times of the fanbelts that were still functional at the
end of the tests are unknown, but they are known to exceed the values
recorded in ``unbroken``. Therefore, these observations are said to be
"right-censored", and the data is represented using
`scipy.stats.CensoredData`.
>>> sample = stats.CensoredData(uncensored=broken, right=unbroken)
The empirical survival function is calculated as follows.
>>> res = stats.ecdf(sample)
>>> res.sf.quantiles
array([37., 43., 47., 56., 60., 62., 71., 77., 80., 81.])
>>> res.sf.probabilities
array([1. , 1. , 0.875, 0.75 , 0.75 , 0.75 , 0.75 , 0.5 , 0.25 , 0. ])
To plot the result as a step function:
>>> ax = plt.subplot()
>>> res.cdf.plot(ax)
>>> ax.set_xlabel('Fanbelt Survival Time (thousands of miles)')
>>> ax.set_ylabel('Empirical SF')
>>> plt.show()
"""
sample = _iv_CensoredData(sample)
if sample.num_censored() == 0:
res = _ecdf_uncensored(sample._uncensor())
elif sample.num_censored() == sample._right.size:
res = _ecdf_right_censored(sample)
else:
# Support additional censoring options in follow-up PRs
message = ("Currently, only uncensored and right-censored data is "
"supported.")
raise NotImplementedError(message)
t, cdf, sf, n, d = res
return ECDFResult(t, cdf, sf, n, d)
def _ecdf_uncensored(sample):
sample = np.sort(sample)
x, counts = np.unique(sample, return_counts=True)
# [1].81 "the fraction of [observations] that are less than or equal to x
events = np.cumsum(counts)
n = sample.size
cdf = events / n
# [1].89 "the relative frequency of the sample that exceeds x in value"
sf = 1 - cdf
at_risk = np.concatenate(([n], n - events[:-1]))
return x, cdf, sf, at_risk, counts
def _ecdf_right_censored(sample):
# It is conventional to discuss right-censored data in terms of
# "survival time", "death", and "loss" (e.g. [2]). We'll use that
# terminology here.
# This implementation was influenced by the references cited and also
# https://www.youtube.com/watch?v=lxoWsVco_iM
# https://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
# In retrospect it is probably most easily compared against [3].
# Ultimately, the data needs to be sorted, so this implementation is
# written to avoid a separate call to `unique` after sorting. In hope of
# better performance on large datasets, it also computes survival
# probabilities at unique times only rather than at each observation.
tod = sample._uncensored # time of "death"
tol = sample._right # time of "loss"
times = np.concatenate((tod, tol))
died = np.asarray([1]*tod.size + [0]*tol.size)
# sort by times
i = np.argsort(times)
times = times[i]
died = died[i]
at_risk = np.arange(times.size, 0, -1)
# logical indices of unique times
j = np.diff(times, prepend=-np.inf, append=np.inf) > 0
j_l = j[:-1] # first instances of unique times
j_r = j[1:] # last instances of unique times
# get number at risk and deaths at each unique time
t = times[j_l] # unique times
n = at_risk[j_l] # number at risk at each unique time
cd = np.cumsum(died)[j_r] # cumulative deaths up to/including unique times
d = np.diff(cd, prepend=0) # deaths at each unique time
# compute survival function
sf = np.cumprod((n - d) / n)
cdf = 1 - sf
return t, cdf, sf, n, d
@dataclass
class LogRankResult:
"""Result object returned by `scipy.stats.logrank`.
Attributes
----------
statistic : float ndarray
The computed statistic (defined below). Its magnitude is the
square root of the magnitude returned by most other logrank test
implementations.
pvalue : float ndarray
The computed p-value of the test.
"""
statistic: np.ndarray
pvalue: np.ndarray
def logrank(
x: npt.ArrayLike | CensoredData,
y: npt.ArrayLike | CensoredData,
alternative: Literal['two-sided', 'less', 'greater'] = "two-sided"
) -> LogRankResult:
r"""Compare the survival distributions of two samples via the logrank test.
Parameters
----------
x, y : array_like or CensoredData
Samples to compare based on their empirical survival functions.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The null hypothesis is that the survival distributions of the two
groups, say *X* and *Y*, are identical.
The following alternative hypotheses [4]_ are available (default is
'two-sided'):
* 'two-sided': the survival distributions of the two groups are not
identical.
* 'less': survival of group *X* is favored: the group *X* failure rate
function is less than the group *Y* failure rate function at some
times.
* 'greater': survival of group *Y* is favored: the group *X* failure
rate function is greater than the group *Y* failure rate function at
some times.
Returns
-------
res : `~scipy.stats._result_classes.LogRankResult`
An object containing attributes:
statistic : float ndarray
The computed statistic (defined below). Its magnitude is the
square root of the magnitude returned by most other logrank test
implementations.
pvalue : float ndarray
The computed p-value of the test.
See Also
--------
scipy.stats.ecdf
Notes
-----
The logrank test [1]_ compares the observed number of events to
the expected number of events under the null hypothesis that the two
samples were drawn from the same distribution. The statistic is
.. math::
Z_i = \frac{\sum_{j=1}^J(O_{i,j}-E_{i,j})}{\sqrt{\sum_{j=1}^J V_{i,j}}}
\rightarrow \mathcal{N}(0,1)
where
.. math::
E_{i,j} = O_j \frac{N_{i,j}}{N_j},
\qquad
V_{i,j} = E_{i,j} \left(\frac{N_j-O_j}{N_j}\right)
\left(\frac{N_j-N_{i,j}}{N_j-1}\right),
:math:`i` denotes the group (i.e. it may assume values :math:`x` or
:math:`y`, or it may be omitted to refer to the combined sample)
:math:`j` denotes the time (at which an event occured),
:math:`N` is the number of subjects at risk just before an event occured,
and :math:`O` is the observed number of events at that time.
The ``statistic`` :math:`Z_x` returned by `logrank` is the (signed) square
root of the statistic returned by many other implementations. Under the
null hypothesis, :math:`Z_x**2` is asymptotically distributed according to
the chi-squared distribution with one degree of freedom. Consequently,
:math:`Z_x` is asymptotically distributed according to the standard normal
distribution. The advantage of using :math:`Z_x` is that the sign
information (i.e. whether the observed number of events tends to be less
than or greater than the number expected under the null hypothesis) is
preserved, allowing `scipy.stats.logrank` to offer one-sided alternative
hypotheses.
References
----------
.. [1] Mantel N. "Evaluation of survival data and two new rank order
statistics arising in its consideration."
Cancer Chemotherapy Reports, 50(3):163-170, PMID: 5910392, 1966
.. [2] Bland, Altman, "The logrank test", BMJ, 328:1073,
:doi:`10.1136/bmj.328.7447.1073`, 2004
.. [3] "Logrank test", Wikipedia,
https://en.wikipedia.org/wiki/Logrank_test
.. [4] Brown, Mark. "On the choice of variance for the log rank test."
Biometrika 71.1 (1984): 65-74.
.. [5] Klein, John P., and Melvin L. Moeschberger. Survival analysis:
techniques for censored and truncated data. Vol. 1230. New York:
Springer, 2003.
Examples
--------
Reference [2]_ compared the survival times of patients with two different
types of recurrent malignant gliomas. The samples below record the time
(number of weeks) for which each patient participated in the study. The
`scipy.stats.CensoredData` class is used because the data is
right-censored: the uncensored observations correspond with observed deaths
whereas the censored observations correspond with the patient leaving the
study for another reason.
>>> from scipy import stats
>>> x = stats.CensoredData(
... uncensored=[6, 13, 21, 30, 37, 38, 49, 50,
... 63, 79, 86, 98, 202, 219],
... right=[31, 47, 80, 82, 82, 149]
... )
>>> y = stats.CensoredData(
... uncensored=[10, 10, 12, 13, 14, 15, 16, 17, 18, 20, 24, 24,
... 25, 28,30, 33, 35, 37, 40, 40, 46, 48, 76, 81,
... 82, 91, 112, 181],
... right=[34, 40, 70]
... )
We can calculate and visualize the empirical survival functions
of both groups as follows.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> ax = plt.subplot()
>>> ecdf_x = stats.ecdf(x)
>>> ecdf_x.sf.plot(ax, label='Astrocytoma')
>>> ecdf_y = stats.ecdf(y)
>>> ecdf_x.sf.plot(ax, label='Glioblastoma')
>>> ax.set_xlabel('Time to death (weeks)')
>>> ax.set_ylabel('Empirical SF')
>>> plt.legend()
>>> plt.show()
Visual inspection of the empirical survival functions suggests that the
survival times tend to be different between the two groups. To formally
assess whether the difference is significant at the 1% level, we use the
logrank test.
>>> res = stats.logrank(x=x, y=y)
>>> res.statistic
-2.73799...
>>> res.pvalue
0.00618...
The p-value is less than 1%, so we can consider the data to be evidence
against the null hypothesis in favor of the alternative that there is a
difference between the two survival functions.
"""
# Input validation. `alternative` IV handled in `_normtest_finish` below.
x = _iv_CensoredData(sample=x, param_name='x')
y = _iv_CensoredData(sample=y, param_name='y')
# Combined sample. (Under H0, the two groups are identical.)
xy = CensoredData(
uncensored=np.concatenate((x._uncensored, y._uncensored)),
right=np.concatenate((x._right, y._right))
)
# Extract data from the combined sample
res = ecdf(xy)
idx = res.sf._d.astype(bool) # indices of observed events
times_xy = res.sf.quantiles[idx] # unique times of observed events
at_risk_xy = res.sf._n[idx] # combined number of subjects at risk
deaths_xy = res.sf._d[idx] # combined number of events
# Get the number at risk within each sample.
# First compute the number at risk in group X at each of the `times_xy`.
# Could use `interpolate_1d`, but this is more compact.
res_x = ecdf(x)
i = np.searchsorted(res_x.sf.quantiles, times_xy)
at_risk_x = np.append(res_x.sf._n, 0)[i] # 0 at risk after last time
# Subtract from the combined number at risk to get number at risk in Y
at_risk_y = at_risk_xy - at_risk_x
# Compute the variance.
num = at_risk_x * at_risk_y * deaths_xy * (at_risk_xy - deaths_xy)
den = at_risk_xy**2 * (at_risk_xy - 1)
# Note: when `at_risk_xy == 1`, we would have `at_risk_xy - 1 == 0` in the
# numerator and denominator. Simplifying the fraction symbolically, we
# would always find the overall quotient to be zero, so don't compute it.
i = at_risk_xy > 1
sum_var = np.sum(num[i]/den[i])
# Get the observed and expected number of deaths in group X
n_died_x = x._uncensored.size
sum_exp_deaths_x = np.sum(at_risk_x * (deaths_xy/at_risk_xy))
# Compute the statistic. This is the square root of that in references.
statistic = (n_died_x - sum_exp_deaths_x)/np.sqrt(sum_var)
# Equivalent to chi2(df=1).sf(statistic**2) when alternative='two-sided'
_, pvalue = stats._stats_py._normtest_finish(
z=statistic, alternative=alternative
)
return LogRankResult(statistic=statistic, pvalue=pvalue)
| 25,965
| 36.741279
| 81
|
py
|
scipy
|
scipy-main/scipy/stats/distributions.py
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
# NOTE: To look at history using `git blame`, use `git blame -M -C -C`
# instead of `git blame -Lxxx,+x`.
#
from ._distn_infrastructure import (rv_discrete, rv_continuous, rv_frozen) # noqa: F401
from . import _continuous_distns
from . import _discrete_distns
from ._continuous_distns import *
from ._levy_stable import levy_stable
from ._discrete_distns import *
from ._entropy import entropy
# For backwards compatibility e.g. pymc expects distributions.__all__.
__all__ = ['rv_discrete', 'rv_continuous', 'rv_histogram', 'entropy']
# Add only the distribution names, not the *_gen names.
__all__ += _continuous_distns._distn_names
__all__ += ['levy_stable']
__all__ += _discrete_distns._distn_names
| 817
| 31.72
| 88
|
py
|
scipy
|
scipy-main/scipy/stats/_hypotests.py
|
from collections import namedtuple
from dataclasses import dataclass
from math import comb
import numpy as np
import warnings
from itertools import combinations
import scipy.stats
from scipy.optimize import shgo
from . import distributions
from ._common import ConfidenceInterval
from ._continuous_distns import chi2, norm
from scipy.special import gamma, kv, gammaln
from scipy.fft import ifft
from ._stats_pythran import _a_ij_Aij_Dij2
from ._stats_pythran import (
_concordant_pairs as _P, _discordant_pairs as _Q
)
from scipy.stats import _stats_py
__all__ = ['epps_singleton_2samp', 'cramervonmises', 'somersd',
'barnard_exact', 'boschloo_exact', 'cramervonmises_2samp',
'tukey_hsd', 'poisson_means_test']
Epps_Singleton_2sampResult = namedtuple('Epps_Singleton_2sampResult',
('statistic', 'pvalue'))
def epps_singleton_2samp(x, y, t=(0.4, 0.8)):
"""Compute the Epps-Singleton (ES) test statistic.
Test the null hypothesis that two samples have the same underlying
probability distribution.
Parameters
----------
x, y : array-like
The two samples of observations to be tested. Input must not have more
than one dimension. Samples can have different lengths.
t : array-like, optional
The points (t1, ..., tn) where the empirical characteristic function is
to be evaluated. It should be positive distinct numbers. The default
value (0.4, 0.8) is proposed in [1]_. Input must not have more than
one dimension.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The associated p-value based on the asymptotic chi2-distribution.
See Also
--------
ks_2samp, anderson_ksamp
Notes
-----
Testing whether two samples are generated by the same underlying
distribution is a classical question in statistics. A widely used test is
the Kolmogorov-Smirnov (KS) test which relies on the empirical
distribution function. Epps and Singleton introduce a test based on the
empirical characteristic function in [1]_.
One advantage of the ES test compared to the KS test is that is does
not assume a continuous distribution. In [1]_, the authors conclude
that the test also has a higher power than the KS test in many
examples. They recommend the use of the ES test for discrete samples as
well as continuous samples with at least 25 observations each, whereas
`anderson_ksamp` is recommended for smaller sample sizes in the
continuous case.
The p-value is computed from the asymptotic distribution of the test
statistic which follows a `chi2` distribution. If the sample size of both
`x` and `y` is below 25, the small sample correction proposed in [1]_ is
applied to the test statistic.
The default values of `t` are determined in [1]_ by considering
various distributions and finding good values that lead to a high power
of the test in general. Table III in [1]_ gives the optimal values for
the distributions tested in that study. The values of `t` are scaled by
the semi-interquartile range in the implementation, see [1]_.
References
----------
.. [1] T. W. Epps and K. J. Singleton, "An omnibus test for the two-sample
problem using the empirical characteristic function", Journal of
Statistical Computation and Simulation 26, p. 177--203, 1986.
.. [2] S. J. Goerg and J. Kaiser, "Nonparametric testing of distributions
- the Epps-Singleton two-sample test using the empirical characteristic
function", The Stata Journal 9(3), p. 454--465, 2009.
"""
x, y, t = np.asarray(x), np.asarray(y), np.asarray(t)
# check if x and y are valid inputs
if x.ndim > 1:
raise ValueError(f'x must be 1d, but x.ndim equals {x.ndim}.')
if y.ndim > 1:
raise ValueError(f'y must be 1d, but y.ndim equals {y.ndim}.')
nx, ny = len(x), len(y)
if (nx < 5) or (ny < 5):
raise ValueError('x and y should have at least 5 elements, but len(x) '
'= {} and len(y) = {}.'.format(nx, ny))
if not np.isfinite(x).all():
raise ValueError('x must not contain nonfinite values.')
if not np.isfinite(y).all():
raise ValueError('y must not contain nonfinite values.')
n = nx + ny
# check if t is valid
if t.ndim > 1:
raise ValueError(f't must be 1d, but t.ndim equals {t.ndim}.')
if np.less_equal(t, 0).any():
raise ValueError('t must contain positive elements only.')
# rescale t with semi-iqr as proposed in [1]; import iqr here to avoid
# circular import
from scipy.stats import iqr
sigma = iqr(np.hstack((x, y))) / 2
ts = np.reshape(t, (-1, 1)) / sigma
# covariance estimation of ES test
gx = np.vstack((np.cos(ts*x), np.sin(ts*x))).T # shape = (nx, 2*len(t))
gy = np.vstack((np.cos(ts*y), np.sin(ts*y))).T
cov_x = np.cov(gx.T, bias=True) # the test uses biased cov-estimate
cov_y = np.cov(gy.T, bias=True)
est_cov = (n/nx)*cov_x + (n/ny)*cov_y
est_cov_inv = np.linalg.pinv(est_cov)
r = np.linalg.matrix_rank(est_cov_inv)
if r < 2*len(t):
warnings.warn('Estimated covariance matrix does not have full rank. '
'This indicates a bad choice of the input t and the '
'test might not be consistent.') # see p. 183 in [1]_
# compute test statistic w distributed asympt. as chisquare with df=r
g_diff = np.mean(gx, axis=0) - np.mean(gy, axis=0)
w = n*np.dot(g_diff.T, np.dot(est_cov_inv, g_diff))
# apply small-sample correction
if (max(nx, ny) < 25):
corr = 1.0/(1.0 + n**(-0.45) + 10.1*(nx**(-1.7) + ny**(-1.7)))
w = corr * w
p = chi2.sf(w, r)
return Epps_Singleton_2sampResult(w, p)
def poisson_means_test(k1, n1, k2, n2, *, diff=0, alternative='two-sided'):
r"""
Performs the Poisson means test, AKA the "E-test".
This is a test of the null hypothesis that the difference between means of
two Poisson distributions is `diff`. The samples are provided as the
number of events `k1` and `k2` observed within measurement intervals
(e.g. of time, space, number of observations) of sizes `n1` and `n2`.
Parameters
----------
k1 : int
Number of events observed from distribution 1.
n1: float
Size of sample from distribution 1.
k2 : int
Number of events observed from distribution 2.
n2 : float
Size of sample from distribution 2.
diff : float, default=0
The hypothesized difference in means between the distributions
underlying the samples.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the difference between distribution means is not
equal to `diff`
* 'less': the difference between distribution means is less than
`diff`
* 'greater': the difference between distribution means is greater
than `diff`
Returns
-------
statistic : float
The test statistic (see [1]_ equation 3.3).
pvalue : float
The probability of achieving such an extreme value of the test
statistic under the null hypothesis.
Notes
-----
Let:
.. math:: X_1 \sim \mbox{Poisson}(\mathtt{n1}\lambda_1)
be a random variable independent of
.. math:: X_2 \sim \mbox{Poisson}(\mathtt{n2}\lambda_2)
and let ``k1`` and ``k2`` be the observed values of :math:`X_1`
and :math:`X_2`, respectively. Then `poisson_means_test` uses the number
of observed events ``k1`` and ``k2`` from samples of size ``n1`` and
``n2``, respectively, to test the null hypothesis that
.. math::
H_0: \lambda_1 - \lambda_2 = \mathtt{diff}
A benefit of the E-test is that it has good power for small sample sizes,
which can reduce sampling costs [1]_. It has been evaluated and determined
to be more powerful than the comparable C-test, sometimes referred to as
the Poisson exact test.
References
----------
.. [1] Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
comparing two Poisson means. Journal of Statistical Planning and
Inference, 119(1), 23-35.
.. [2] Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
testing samples from Poisson series: With an application to testing
clover seed for dodder. Biometrika, 31(3/4), 313-323.
Examples
--------
Suppose that a gardener wishes to test the number of dodder (weed) seeds
in a sack of clover seeds that they buy from a seed company. It has
previously been established that the number of dodder seeds in clover
follows the Poisson distribution.
A 100 gram sample is drawn from the sack before being shipped to the
gardener. The sample is analyzed, and it is found to contain no dodder
seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
another 100 gram sample from the sack. This time, three dodder seeds are
found in the sample; that is, `k2` is 3. The gardener would like to
know if the difference is significant and not due to chance. The
null hypothesis is that the difference between the two samples is merely
due to chance, or that :math:`\lambda_1 - \lambda_2 = \mathtt{diff}`
where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
difference is not due to chance, or :math:`\lambda_1 - \lambda_2 \ne 0`.
The gardener selects a significance level of 5% to reject the null
hypothesis in favor of the alternative [2]_.
>>> import scipy.stats as stats
>>> res = stats.poisson_means_test(0, 100, 3, 100)
>>> res.statistic, res.pvalue
(-1.7320508075688772, 0.08837900929018157)
The p-value is .088, indicating a near 9% chance of observing a value of
the test statistic under the null hypothesis. This exceeds 5%, so the
gardener does not reject the null hypothesis as the difference cannot be
regarded as significant at this level.
"""
_poisson_means_test_iv(k1, n1, k2, n2, diff, alternative)
# "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4)
lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2))
# "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this
# case the null hypothesis cannot be rejected ... [and] it is not necessary
# to compute the p-value". [1] page 26 below eq. (3.6).
if lmbd_hat2 <= 0:
return _stats_py.SignificanceResult(0, 1)
# The unbiased variance estimate [1] (3.2)
var = k1 / (n1 ** 2) + k2 / (n2 ** 2)
# The _observed_ pivot statistic from the input. It follows the
# unnumbered equation following equation (3.3) This is used later in
# comparison with the computed pivot statistics in an indicator function.
t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var)
# Equation (3.5) of [1] is lengthy, so it is broken into several parts,
# beginning here. Note that the probability mass function of poisson is
# exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted
# here as nlmbd_hat*. The strategy for evaluating the double summation in
# (3.5) is to create two arrays of the values of the two products inside
# the summation and then broadcast them together into a matrix, and then
# sum across the entire matrix.
# Compute constants (as seen in the first and second separated products in
# (3.5).). (This is the shape (\mu) parameter of the poisson distribution.)
nlmbd_hat1 = n1 * (lmbd_hat2 + diff)
nlmbd_hat2 = n2 * lmbd_hat2
# Determine summation bounds for tail ends of distribution rather than
# summing to infinity. `x1*` is for the outer sum and `x2*` is the inner
# sum.
x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1)
x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2)
# Construct arrays to function as the x_1 and x_2 counters on the summation
# in (3.5). `x1` is in columns and `x2` is in rows to allow for
# broadcasting.
x1 = np.arange(x1_lb, x1_ub + 1)
x2 = np.arange(x2_lb, x2_ub + 1)[:, None]
# These are the two products in equation (3.5) with `prob_x1` being the
# first (left side) and `prob_x2` being the second (right side). (To
# make as clear as possible: the 1st contains a "+ d" term, the 2nd does
# not.)
prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1)
prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2)
# compute constants for use in the "pivot statistic" per the
# unnumbered equation following (3.3).
lmbd_x1 = x1 / n1
lmbd_x2 = x2 / n2
lmbds_diff = lmbd_x1 - lmbd_x2 - diff
var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2
# This is the 'pivot statistic' for use in the indicator of the summation
# (left side of "I[.]").
with np.errstate(invalid='ignore', divide='ignore'):
t_x1x2 = lmbds_diff / np.sqrt(var_x1x2)
# `[indicator]` implements the "I[.] ... the indicator function" per
# the paragraph following equation (3.5).
if alternative == 'two-sided':
indicator = np.abs(t_x1x2) >= np.abs(t_k1k2)
elif alternative == 'less':
indicator = t_x1x2 <= t_k1k2
else:
indicator = t_x1x2 >= t_k1k2
# Multiply all combinations of the products together, exclude terms
# based on the `indicator` and then sum. (3.5)
pvalue = np.sum((prob_x1 * prob_x2)[indicator])
return _stats_py.SignificanceResult(t_k1k2, pvalue)
def _poisson_means_test_iv(k1, n1, k2, n2, diff, alternative):
# """check for valid types and values of input to `poisson_mean_test`."""
if k1 != int(k1) or k2 != int(k2):
raise TypeError('`k1` and `k2` must be integers.')
count_err = '`k1` and `k2` must be greater than or equal to 0.'
if k1 < 0 or k2 < 0:
raise ValueError(count_err)
if n1 <= 0 or n2 <= 0:
raise ValueError('`n1` and `n2` must be greater than 0.')
if diff < 0:
raise ValueError('diff must be greater than or equal to 0.')
alternatives = {'two-sided', 'less', 'greater'}
if alternative.lower() not in alternatives:
raise ValueError(f"Alternative must be one of '{alternatives}'.")
class CramerVonMisesResult:
def __init__(self, statistic, pvalue):
self.statistic = statistic
self.pvalue = pvalue
def __repr__(self):
return (f"{self.__class__.__name__}(statistic={self.statistic}, "
f"pvalue={self.pvalue})")
def _psi1_mod(x):
"""
psi1 is defined in equation 1.10 in Csörgő, S. and Faraway, J. (1996).
This implements a modified version by excluding the term V(x) / 12
(here: _cdf_cvm_inf(x) / 12) to avoid evaluating _cdf_cvm_inf(x)
twice in _cdf_cvm.
Implementation based on MAPLE code of Julian Faraway and R code of the
function pCvM in the package goftest (v1.1.1), permission granted
by Adrian Baddeley. Main difference in the implementation: the code
here keeps adding terms of the series until the terms are small enough.
"""
def _ed2(y):
z = y**2 / 4
b = kv(1/4, z) + kv(3/4, z)
return np.exp(-z) * (y/2)**(3/2) * b / np.sqrt(np.pi)
def _ed3(y):
z = y**2 / 4
c = np.exp(-z) / np.sqrt(np.pi)
return c * (y/2)**(5/2) * (2*kv(1/4, z) + 3*kv(3/4, z) - kv(5/4, z))
def _Ak(k, x):
m = 2*k + 1
sx = 2 * np.sqrt(x)
y1 = x**(3/4)
y2 = x**(5/4)
e1 = m * gamma(k + 1/2) * _ed2((4 * k + 3)/sx) / (9 * y1)
e2 = gamma(k + 1/2) * _ed3((4 * k + 1) / sx) / (72 * y2)
e3 = 2 * (m + 2) * gamma(k + 3/2) * _ed3((4 * k + 5) / sx) / (12 * y2)
e4 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 1) / sx) / (144 * y1)
e5 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 5) / sx) / (144 * y1)
return e1 + e2 + e3 + e4 + e5
x = np.asarray(x)
tot = np.zeros_like(x, dtype='float')
cond = np.ones_like(x, dtype='bool')
k = 0
while np.any(cond):
z = -_Ak(k, x[cond]) / (np.pi * gamma(k + 1))
tot[cond] = tot[cond] + z
cond[cond] = np.abs(z) >= 1e-7
k += 1
return tot
def _cdf_cvm_inf(x):
"""
Calculate the cdf of the Cramér-von Mises statistic (infinite sample size).
See equation 1.2 in Csörgő, S. and Faraway, J. (1996).
Implementation based on MAPLE code of Julian Faraway and R code of the
function pCvM in the package goftest (v1.1.1), permission granted
by Adrian Baddeley. Main difference in the implementation: the code
here keeps adding terms of the series until the terms are small enough.
The function is not expected to be accurate for large values of x, say
x > 4, when the cdf is very close to 1.
"""
x = np.asarray(x)
def term(x, k):
# this expression can be found in [2], second line of (1.3)
u = np.exp(gammaln(k + 0.5) - gammaln(k+1)) / (np.pi**1.5 * np.sqrt(x))
y = 4*k + 1
q = y**2 / (16*x)
b = kv(0.25, q)
return u * np.sqrt(y) * np.exp(-q) * b
tot = np.zeros_like(x, dtype='float')
cond = np.ones_like(x, dtype='bool')
k = 0
while np.any(cond):
z = term(x[cond], k)
tot[cond] = tot[cond] + z
cond[cond] = np.abs(z) >= 1e-7
k += 1
return tot
def _cdf_cvm(x, n=None):
"""
Calculate the cdf of the Cramér-von Mises statistic for a finite sample
size n. If N is None, use the asymptotic cdf (n=inf).
See equation 1.8 in Csörgő, S. and Faraway, J. (1996) for finite samples,
1.2 for the asymptotic cdf.
The function is not expected to be accurate for large values of x, say
x > 2, when the cdf is very close to 1 and it might return values > 1
in that case, e.g. _cdf_cvm(2.0, 12) = 1.0000027556716846. Moreover, it
is not accurate for small values of n, especially close to the bounds of
the distribution's domain, [1/(12*n), n/3], where the value jumps to 0
and 1, respectively. These are limitations of the approximation by Csörgő
and Faraway (1996) implemented in this function.
"""
x = np.asarray(x)
if n is None:
y = _cdf_cvm_inf(x)
else:
# support of the test statistic is [12/n, n/3], see 1.1 in [2]
y = np.zeros_like(x, dtype='float')
sup = (1./(12*n) < x) & (x < n/3.)
# note: _psi1_mod does not include the term _cdf_cvm_inf(x) / 12
# therefore, we need to add it here
y[sup] = _cdf_cvm_inf(x[sup]) * (1 + 1./(12*n)) + _psi1_mod(x[sup]) / n
y[x >= n/3] = 1
if y.ndim == 0:
return y[()]
return y
def cramervonmises(rvs, cdf, args=()):
"""Perform the one-sample Cramér-von Mises test for goodness of fit.
This performs a test of the goodness of fit of a cumulative distribution
function (cdf) :math:`F` compared to the empirical distribution function
:math:`F_n` of observed random variates :math:`X_1, ..., X_n` that are
assumed to be independent and identically distributed ([1]_).
The null hypothesis is that the :math:`X_i` have cumulative distribution
:math:`F`.
Parameters
----------
rvs : array_like
A 1-D array of observed values of the random variables :math:`X_i`.
cdf : str or callable
The cumulative distribution function :math:`F` to test the
observations against. If a string, it should be the name of a
distribution in `scipy.stats`. If a callable, that callable is used
to calculate the cdf: ``cdf(x, *args) -> float``.
args : tuple, optional
Distribution parameters. These are assumed to be known; see Notes.
Returns
-------
res : object with attributes
statistic : float
Cramér-von Mises statistic.
pvalue : float
The p-value.
See Also
--------
kstest, cramervonmises_2samp
Notes
-----
.. versionadded:: 1.6.0
The p-value relies on the approximation given by equation 1.8 in [2]_.
It is important to keep in mind that the p-value is only accurate if
one tests a simple hypothesis, i.e. the parameters of the reference
distribution are known. If the parameters are estimated from the data
(composite hypothesis), the computed p-value is not reliable.
References
----------
.. [1] Cramér-von Mises criterion, Wikipedia,
https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion
.. [2] Csörgő, S. and Faraway, J. (1996). The Exact and Asymptotic
Distribution of Cramér-von Mises Statistics. Journal of the
Royal Statistical Society, pp. 221-234.
Examples
--------
Suppose we wish to test whether data generated by ``scipy.stats.norm.rvs``
were, in fact, drawn from the standard normal distribution. We choose a
significance level of ``alpha=0.05``.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng(165417232101553420507139617764912913465)
>>> x = stats.norm.rvs(size=500, random_state=rng)
>>> res = stats.cramervonmises(x, 'norm')
>>> res.statistic, res.pvalue
(0.1072085112565724, 0.5508482238203407)
The p-value exceeds our chosen significance level, so we do not
reject the null hypothesis that the observed sample is drawn from the
standard normal distribution.
Now suppose we wish to check whether the same samples shifted by 2.1 is
consistent with being drawn from a normal distribution with a mean of 2.
>>> y = x + 2.1
>>> res = stats.cramervonmises(y, 'norm', args=(2,))
>>> res.statistic, res.pvalue
(0.8364446265294695, 0.00596286797008283)
Here we have used the `args` keyword to specify the mean (``loc``)
of the normal distribution to test the data against. This is equivalent
to the following, in which we create a frozen normal distribution with
mean 2.1, then pass its ``cdf`` method as an argument.
>>> frozen_dist = stats.norm(loc=2)
>>> res = stats.cramervonmises(y, frozen_dist.cdf)
>>> res.statistic, res.pvalue
(0.8364446265294695, 0.00596286797008283)
In either case, we would reject the null hypothesis that the observed
sample is drawn from a normal distribution with a mean of 2 (and default
variance of 1) because the p-value is less than our chosen
significance level.
"""
if isinstance(cdf, str):
cdf = getattr(distributions, cdf).cdf
vals = np.sort(np.asarray(rvs))
if vals.size <= 1:
raise ValueError('The sample must contain at least two observations.')
if vals.ndim > 1:
raise ValueError('The sample must be one-dimensional.')
n = len(vals)
cdfvals = cdf(vals, *args)
u = (2*np.arange(1, n+1) - 1)/(2*n)
w = 1/(12*n) + np.sum((u - cdfvals)**2)
# avoid small negative values that can occur due to the approximation
p = max(0, 1. - _cdf_cvm(w, n))
return CramerVonMisesResult(statistic=w, pvalue=p)
def _get_wilcoxon_distr(n):
"""
Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
of ranks of positive differences).
Returns an array with the probabilities of all the possible ranks
r = 0, ..., n*(n+1)/2
"""
c = np.ones(1, dtype=np.double)
for k in range(1, n + 1):
prev_c = c
c = np.zeros(k * (k + 1) // 2 + 1, dtype=np.double)
m = len(prev_c)
c[:m] = prev_c * 0.5
c[-m:] += prev_c * 0.5
return c
def _get_wilcoxon_distr2(n):
"""
Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
of ranks of positive differences).
Returns an array with the probabilities of all the possible ranks
r = 0, ..., n*(n+1)/2
This is a slower reference function
References
----------
.. [1] 1. Harris T, Hardin JW. Exact Wilcoxon Signed-Rank and Wilcoxon
Mann-Whitney Ranksum Tests. The Stata Journal. 2013;13(2):337-343.
"""
ai = np.arange(1, n+1)[:, None]
t = n*(n+1)/2
q = 2*t
j = np.arange(q)
theta = 2*np.pi/q*j
phi_sp = np.prod(np.cos(theta*ai), axis=0)
phi_s = np.exp(1j*theta*t) * phi_sp
p = np.real(ifft(phi_s))
res = np.zeros(int(t)+1)
res[:-1:] = p[::2]
res[0] /= 2
res[-1] = res[0]
return res
def _tau_b(A):
"""Calculate Kendall's tau-b and p-value from contingency table."""
# See [2] 2.2 and 4.2
# contingency table must be truly 2D
if A.shape[0] == 1 or A.shape[1] == 1:
return np.nan, np.nan
NA = A.sum()
PA = _P(A)
QA = _Q(A)
Sri2 = (A.sum(axis=1)**2).sum()
Scj2 = (A.sum(axis=0)**2).sum()
denominator = (NA**2 - Sri2)*(NA**2 - Scj2)
tau = (PA-QA)/(denominator)**0.5
numerator = 4*(_a_ij_Aij_Dij2(A) - (PA - QA)**2 / NA)
s02_tau_b = numerator/denominator
if s02_tau_b == 0: # Avoid divide by zero
return tau, 0
Z = tau/s02_tau_b**0.5
p = 2*norm.sf(abs(Z)) # 2-sided p-value
return tau, p
def _somers_d(A, alternative='two-sided'):
"""Calculate Somers' D and p-value from contingency table."""
# See [3] page 1740
# contingency table must be truly 2D
if A.shape[0] <= 1 or A.shape[1] <= 1:
return np.nan, np.nan
NA = A.sum()
NA2 = NA**2
PA = _P(A)
QA = _Q(A)
Sri2 = (A.sum(axis=1)**2).sum()
d = (PA - QA)/(NA2 - Sri2)
S = _a_ij_Aij_Dij2(A) - (PA-QA)**2/NA
with np.errstate(divide='ignore'):
Z = (PA - QA)/(4*(S))**0.5
_, p = scipy.stats._stats_py._normtest_finish(Z, alternative)
return d, p
@dataclass
class SomersDResult:
statistic: float
pvalue: float
table: np.ndarray
def somersd(x, y=None, alternative='two-sided'):
r"""Calculates Somers' D, an asymmetric measure of ordinal association.
Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the
correspondence between two rankings. Both statistics consider the
difference between the number of concordant and discordant pairs in two
rankings :math:`X` and :math:`Y`, and both are normalized such that values
close to 1 indicate strong agreement and values close to -1 indicate
strong disagreement. They differ in how they are normalized. To show the
relationship, Somers' :math:`D` can be defined in terms of Kendall's
:math:`\tau_a`:
.. math::
D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)}
Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the
second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of
:math:`n` rankings can also be viewed as an :math:`r \times s` contingency
table in which element :math:`i, j` is the number of rank pairs with rank
:math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`.
Accordingly, `somersd` also allows the input data to be supplied as a
single, 2D contingency table instead of as two separate, 1D rankings.
Note that the definition of Somers' :math:`D` is asymmetric: in general,
:math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers'
:math:`D(Y|X)`: the "row" variable :math:`X` is treated as an independent
variable, and the "column" variable :math:`Y` is dependent. For Somers'
:math:`D(X|Y)`, swap the input lists or transpose the input table.
Parameters
----------
x : array_like
1D array of rankings, treated as the (row) independent variable.
Alternatively, a 2D contingency table.
y : array_like, optional
If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the
same length, treated as the (column) dependent variable.
If `x` is 2D, `y` is ignored.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the rank correlation is nonzero
* 'less': the rank correlation is negative (less than zero)
* 'greater': the rank correlation is positive (greater than zero)
Returns
-------
res : SomersDResult
A `SomersDResult` object with the following fields:
statistic : float
The Somers' :math:`D` statistic.
pvalue : float
The p-value for a hypothesis test whose null
hypothesis is an absence of association, :math:`D=0`.
See notes for more information.
table : 2D array
The contingency table formed from rankings `x` and `y` (or the
provided contingency table, if `x` is a 2D array)
See Also
--------
kendalltau : Calculates Kendall's tau, another correlation measure.
weightedtau : Computes a weighted version of Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
pearsonr : Calculates a Pearson correlation coefficient.
Notes
-----
This function follows the contingency table approach of [2]_ and
[3]_. *p*-values are computed based on an asymptotic approximation of
the test statistic distribution under the null hypothesis :math:`D=0`.
Theoretically, hypothesis tests based on Kendall's :math:`tau` and Somers'
:math:`D` should be identical.
However, the *p*-values returned by `kendalltau` are based
on the null hypothesis of *independence* between :math:`X` and :math:`Y`
(i.e. the population from which pairs in :math:`X` and :math:`Y` are
sampled contains equal numbers of all possible pairs), which is more
specific than the null hypothesis :math:`D=0` used here. If the null
hypothesis of independence is desired, it is acceptable to use the
*p*-value returned by `kendalltau` with the statistic returned by
`somersd` and vice versa. For more information, see [2]_.
Contingency tables are formatted according to the convention used by
SAS and R: the first ranking supplied (``x``) is the "row" variable, and
the second ranking supplied (``y``) is the "column" variable. This is
opposite the convention of Somers' original paper [1]_.
References
----------
.. [1] Robert H. Somers, "A New Asymmetric Measure of Association for
Ordinal Variables", *American Sociological Review*, Vol. 27, No. 6,
pp. 799--811, 1962.
.. [2] Morton B. Brown and Jacqueline K. Benedetti, "Sampling Behavior of
Tests for Correlation in Two-Way Contingency Tables", *Journal of
the American Statistical Association* Vol. 72, No. 358, pp.
309--315, 1977.
.. [3] SAS Institute, Inc., "The FREQ Procedure (Book Excerpt)",
*SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009.
.. [4] Laerd Statistics, "Somers' d using SPSS Statistics", *SPSS
Statistics Tutorials and Statistical Guides*,
https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php,
Accessed July 31, 2020.
Examples
--------
We calculate Somers' D for the example given in [4]_, in which a hotel
chain owner seeks to determine the association between hotel room
cleanliness and customer satisfaction. The independent variable, hotel
room cleanliness, is ranked on an ordinal scale: "below average (1)",
"average (2)", or "above average (3)". The dependent variable, customer
satisfaction, is ranked on a second scale: "very dissatisfied (1)",
"moderately dissatisfied (2)", "neither dissatisfied nor satisfied (3)",
"moderately satisfied (4)", or "very satisfied (5)". 189 customers
respond to the survey, and the results are cast into a contingency table
with the hotel room cleanliness as the "row" variable and customer
satisfaction as the "column" variable.
+-----+-----+-----+-----+-----+-----+
| | (1) | (2) | (3) | (4) | (5) |
+=====+=====+=====+=====+=====+=====+
| (1) | 27 | 25 | 14 | 7 | 0 |
+-----+-----+-----+-----+-----+-----+
| (2) | 7 | 14 | 18 | 35 | 12 |
+-----+-----+-----+-----+-----+-----+
| (3) | 1 | 3 | 2 | 7 | 17 |
+-----+-----+-----+-----+-----+-----+
For example, 27 customers assigned their room a cleanliness ranking of
"below average (1)" and a corresponding satisfaction of "very
dissatisfied (1)". We perform the analysis as follows.
>>> from scipy.stats import somersd
>>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
>>> res = somersd(table)
>>> res.statistic
0.6032766111513396
>>> res.pvalue
1.0007091191074533e-27
The value of the Somers' D statistic is approximately 0.6, indicating
a positive correlation between room cleanliness and customer satisfaction
in the sample.
The *p*-value is very small, indicating a very small probability of
observing such an extreme value of the statistic under the null
hypothesis that the statistic of the entire population (from which
our sample of 189 customers is drawn) is zero. This supports the
alternative hypothesis that the true value of Somers' D for the population
is nonzero.
"""
x, y = np.array(x), np.array(y)
if x.ndim == 1:
if x.size != y.size:
raise ValueError("Rankings must be of equal length.")
table = scipy.stats.contingency.crosstab(x, y)[1]
elif x.ndim == 2:
if np.any(x < 0):
raise ValueError("All elements of the contingency table must be "
"non-negative.")
if np.any(x != x.astype(int)):
raise ValueError("All elements of the contingency table must be "
"integer.")
if x.nonzero()[0].size < 2:
raise ValueError("At least two elements of the contingency table "
"must be nonzero.")
table = x
else:
raise ValueError("x must be either a 1D or 2D array")
# The table type is converted to a float to avoid an integer overflow
d, p = _somers_d(table.astype(float), alternative)
# add alias for consistency with other correlation functions
res = SomersDResult(d, p, table)
res.correlation = d
return res
# This could be combined with `_all_partitions` in `_resampling.py`
def _all_partitions(nx, ny):
"""
Partition a set of indices into two fixed-length sets in all possible ways
Partition a set of indices 0 ... nx + ny - 1 into two sets of length nx and
ny in all possible ways (ignoring order of elements).
"""
z = np.arange(nx+ny)
for c in combinations(z, nx):
x = np.array(c)
mask = np.ones(nx+ny, bool)
mask[x] = False
y = z[mask]
yield x, y
def _compute_log_combinations(n):
"""Compute all log combination of C(n, k)."""
gammaln_arr = gammaln(np.arange(n + 1) + 1)
return gammaln(n + 1) - gammaln_arr - gammaln_arr[::-1]
@dataclass
class BarnardExactResult:
statistic: float
pvalue: float
def barnard_exact(table, alternative="two-sided", pooled=True, n=32):
r"""Perform a Barnard exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes section below.
pooled : bool, optional
Whether to compute score statistic with pooled variance (as in
Student's t-test, for example) or unpooled variance (as in Welch's
t-test). Default is ``True``.
n : int, optional
Number of sampling points used in the construction of the sampling
method. Note that this argument will automatically be converted to
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
select sample points. Default is 32. Must be positive. In most cases,
32 points is enough to reach good precision. More points comes at
performance cost.
Returns
-------
ber : BarnardExactResult
A result object with the following attributes.
statistic : float
The Wald statistic with pooled or unpooled variance, depending
on the user choice of `pooled`.
pvalue : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
boschloo_exact : Boschloo's exact test on a 2x2 contingency table,
which is an uniformly more powerful alternative to Fisher's exact test.
Notes
-----
Barnard's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a more powerful alternative than Fisher's exact test
for 2x2 contingency tables.
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
Barnard exact test, we can assert three different null hypotheses :
- :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
with `alternative` = "less"
- :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
with `alternative` = "greater"
- :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
with `alternative` = "two-sided" (default one)
In order to compute Barnard's exact test, we are using the Wald
statistic [3]_ with pooled or unpooled variance.
Under the default assumption that both variances are equal
(``pooled = True``), the statistic is computed as:
.. math::
T(X) = \frac{
\hat{p}_1 - \hat{p}_2
}{
\sqrt{
\hat{p}(1 - \hat{p})
(\frac{1}{c_1} +
\frac{1}{c_2})
}
}
with :math:`\hat{p}_1, \hat{p}_2` and :math:`\hat{p}` the estimator of
:math:`p_1, p_2` and :math:`p`, the latter being the combined probability,
given the assumption that :math:`p_1 = p_2`.
If this assumption is invalid (``pooled = False``), the statistic is:
.. math::
T(X) = \frac{
\hat{p}_1 - \hat{p}_2
}{
\sqrt{
\frac{\hat{p}_1 (1 - \hat{p}_1)}{c_1} +
\frac{\hat{p}_2 (1 - \hat{p}_2)}{c_2}
}
}
The p-value is then computed as:
.. math::
\sum
\binom{c_1}{x_{11}}
\binom{c_2}{x_{12}}
\pi^{x_{11} + x_{12}}
(1 - \pi)^{t - x_{11} - x_{12}}
where the sum is over all 2x2 contingency tables :math:`X` such that:
* :math:`T(X) \leq T(X_0)` when `alternative` = "less",
* :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
* :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
and :math:`t` the total (sum of the 4 sample's element).
The returned p-value is the maximum p-value taken over the nuisance
parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.
This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
number of sample points.
References
----------
.. [1] Barnard, G. A. "Significance Tests for 2x2 Tables". *Biometrika*.
34.1/2 (1947): 123-138. :doi:`dpgkg3`
.. [2] Mehta, Cyrus R., and Pralay Senchaudhuri. "Conditional versus
unconditional exact tests for comparing two binomials."
*Cytel Software Corporation* 675 (2003): 1-5.
.. [3] "Wald Test". *Wikipedia*. https://en.wikipedia.org/wiki/Wald_test
Examples
--------
An example use of Barnard's test is presented in [2]_.
Consider the following example of a vaccine efficacy study
(Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
inoculated with a recombinant DNA influenza vaccine and the 15 were
inoculated with a placebo. Twelve of the 15 subjects in the placebo
group (80%) eventually became infected with influenza whereas for the
vaccine group, only 7 of the 15 subjects (47%) became infected. The
data are tabulated as a 2 x 2 table::
Vaccine Placebo
Yes 7 12
No 8 3
When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.
Our alternative hypothesis is that the vaccine will lower the chance of
becoming infected with the virus; that is, the probability :math:`p_1` of
catching the virus with the vaccine will be *less than* the probability
:math:`p_2` of catching the virus without the vaccine. Therefore, we call
`barnard_exact` with the ``alternative="less"`` option:
>>> import scipy.stats as stats
>>> res = stats.barnard_exact([[7, 12], [8, 3]], alternative="less")
>>> res.statistic
-1.894...
>>> res.pvalue
0.03407...
Under the null hypothesis that the vaccine will not lower the chance of
becoming infected, the probability of obtaining test results at least as
extreme as the observed data is approximately 3.4%. Since this p-value is
less than our chosen significance level, we have evidence to reject
:math:`H_0` in favor of the alternative.
Suppose we had used Fisher's exact test instead:
>>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
>>> pvalue
0.0640...
With the same threshold significance of 5%, we would not have been able
to reject the null hypothesis in favor of the alternative. As stated in
[2]_, Barnard's test is uniformly more powerful than Fisher's exact test
because Barnard's test does not condition on any margin. Fisher's test
should only be used when both sets of marginals are fixed.
"""
if n <= 0:
raise ValueError(
"Number of points `n` must be strictly positive, "
f"found {n!r}"
)
table = np.asarray(table, dtype=np.int64)
if not table.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(table < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in table.sum(axis=0):
# If both values in column are zero, the p-value is 1 and
# the score's statistic is NaN.
return BarnardExactResult(np.nan, 1.0)
total_col_1, total_col_2 = table.sum(axis=0)
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(-1, 1)
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(1, -1)
# We need to calculate the wald statistics for each combination of x1 and
# x2.
p1, p2 = x1 / total_col_1, x2 / total_col_2
if pooled:
p = (x1 + x2) / (total_col_1 + total_col_2)
variances = p * (1 - p) * (1 / total_col_1 + 1 / total_col_2)
else:
variances = p1 * (1 - p1) / total_col_1 + p2 * (1 - p2) / total_col_2
# To avoid warning when dividing by 0
with np.errstate(divide="ignore", invalid="ignore"):
wald_statistic = np.divide((p1 - p2), np.sqrt(variances))
wald_statistic[p1 == p2] = 0 # Removing NaN values
wald_stat_obs = wald_statistic[table[0, 0], table[0, 1]]
if alternative == "two-sided":
index_arr = np.abs(wald_statistic) >= abs(wald_stat_obs)
elif alternative == "less":
index_arr = wald_statistic <= wald_stat_obs
elif alternative == "greater":
index_arr = wald_statistic >= wald_stat_obs
else:
msg = (
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
f" found {alternative!r}"
)
raise ValueError(msg)
x1_sum_x2 = x1 + x2
x1_log_comb = _compute_log_combinations(total_col_1)
x2_log_comb = _compute_log_combinations(total_col_2)
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
result = shgo(
_get_binomial_log_p_value_with_nuisance_param,
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
bounds=((0, 1),),
n=n,
sampling_method="sobol",
)
# result.fun is the negative log pvalue and therefore needs to be
# changed before return
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
return BarnardExactResult(wald_stat_obs, p_value)
@dataclass
class BoschlooExactResult:
statistic: float
pvalue: float
def boschloo_exact(table, alternative="two-sided", n=32):
r"""Perform Boschloo's exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes section below.
n : int, optional
Number of sampling points used in the construction of the sampling
method. Note that this argument will automatically be converted to
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
select sample points. Default is 32. Must be positive. In most cases,
32 points is enough to reach good precision. More points comes at
performance cost.
Returns
-------
ber : BoschlooExactResult
A result object with the following attributes.
statistic : float
The statistic used in Boschloo's test; that is, the p-value
from Fisher's exact test.
pvalue : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
Boschloo's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a uniformly more powerful alternative to Fisher's exact test
for 2x2 contingency tables.
Boschloo's exact test uses the p-value of Fisher's exact test as a
statistic, and Boschloo's p-value is the probability under the null
hypothesis of observing such an extreme value of this statistic.
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
Boschloo exact test, we can assert three different alternative hypotheses:
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 < p_2`,
with `alternative` = "less"
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 > p_2`,
with `alternative` = "greater"
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 \neq p_2`,
with `alternative` = "two-sided" (default)
There are multiple conventions for computing a two-sided p-value when the
null distribution is asymmetric. Here, we apply the convention that the
p-value of a two-sided test is twice the minimum of the p-values of the
one-sided tests (clipped to 1.0). Note that `fisher_exact` follows a
different convention, so for a given `table`, the statistic reported by
`boschloo_exact` may differ from the p-value reported by `fisher_exact`
when ``alternative='two-sided'``.
.. versionadded:: 1.7.0
References
----------
.. [1] R.D. Boschloo. "Raised conditional level of significance for the
2 x 2-table when testing the equality of two probabilities",
Statistica Neerlandica, 24(1), 1970
.. [2] "Boschloo's test", Wikipedia,
https://en.wikipedia.org/wiki/Boschloo%27s_test
.. [3] Lise M. Saari et al. "Employee attitudes and job satisfaction",
Human Resource Management, 43(4), 395-407, 2004,
:doi:`10.1002/hrm.20032`.
Examples
--------
In the following example, we consider the article "Employee
attitudes and job satisfaction" [3]_
which reports the results of a survey from 63 scientists and 117 college
professors. Of the 63 scientists, 31 said they were very satisfied with
their jobs, whereas 74 of the college professors were very satisfied
with their work. Is this significant evidence that college
professors are happier with their work than scientists?
The following table summarizes the data mentioned above::
college professors scientists
Very Satisfied 74 31
Dissatisfied 43 32
When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.
Our alternative hypothesis is that college professors are truly more
satisfied with their work than scientists. Therefore, we expect
:math:`p_1` the proportion of very satisfied college professors to be
greater than :math:`p_2`, the proportion of very satisfied scientists.
We thus call `boschloo_exact` with the ``alternative="greater"`` option:
>>> import scipy.stats as stats
>>> res = stats.boschloo_exact([[74, 31], [43, 32]], alternative="greater")
>>> res.statistic
0.0483...
>>> res.pvalue
0.0355...
Under the null hypothesis that scientists are happier in their work than
college professors, the probability of obtaining test
results at least as extreme as the observed data is approximately 3.55%.
Since this p-value is less than our chosen significance level, we have
evidence to reject :math:`H_0` in favor of the alternative hypothesis.
"""
hypergeom = distributions.hypergeom
if n <= 0:
raise ValueError(
"Number of points `n` must be strictly positive,"
f" found {n!r}"
)
table = np.asarray(table, dtype=np.int64)
if not table.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(table < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in table.sum(axis=0):
# If both values in column are zero, the p-value is 1 and
# the score's statistic is NaN.
return BoschlooExactResult(np.nan, np.nan)
total_col_1, total_col_2 = table.sum(axis=0)
total = total_col_1 + total_col_2
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1)
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1)
x1_sum_x2 = x1 + x2
if alternative == 'less':
pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
elif alternative == 'two-sided':
boschloo_less = boschloo_exact(table, alternative="less", n=n)
boschloo_greater = boschloo_exact(table, alternative="greater", n=n)
res = (
boschloo_less if boschloo_less.pvalue < boschloo_greater.pvalue
else boschloo_greater
)
# Two-sided p-value is defined as twice the minimum of the one-sided
# p-values
pvalue = np.clip(2 * res.pvalue, a_min=0, a_max=1)
return BoschlooExactResult(res.statistic, pvalue)
else:
msg = (
f"`alternative` should be one of {'two-sided', 'less', 'greater'},"
f" found {alternative!r}"
)
raise ValueError(msg)
fisher_stat = pvalues[table[0, 0], table[0, 1]]
# fisher_stat * (1+1e-13) guards us from small numerical error. It is
# equivalent to np.isclose with relative tol of 1e-13 and absolute tol of 0
# For more throughout explanations, see gh-14178
index_arr = pvalues <= fisher_stat * (1+1e-13)
x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T
x1_log_comb = _compute_log_combinations(total_col_1)
x2_log_comb = _compute_log_combinations(total_col_2)
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
result = shgo(
_get_binomial_log_p_value_with_nuisance_param,
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
bounds=((0, 1),),
n=n,
sampling_method="sobol",
)
# result.fun is the negative log pvalue and therefore needs to be
# changed before return
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
return BoschlooExactResult(fisher_stat, p_value)
def _get_binomial_log_p_value_with_nuisance_param(
nuisance_param, x1_sum_x2, x1_sum_x2_log_comb, index_arr
):
r"""
Compute the log pvalue in respect of a nuisance parameter considering
a 2x2 sample space.
Parameters
----------
nuisance_param : float
nuisance parameter used in the computation of the maximisation of
the p-value. Must be between 0 and 1
x1_sum_x2 : ndarray
Sum of x1 and x2 inside barnard_exact
x1_sum_x2_log_comb : ndarray
sum of the log combination of x1 and x2
index_arr : ndarray of boolean
Returns
-------
p_value : float
Return the maximum p-value considering every nuisance paramater
between 0 and 1
Notes
-----
Both Barnard's test and Boschloo's test iterate over a nuisance parameter
:math:`\pi \in [0, 1]` to find the maximum p-value. To search this
maxima, this function return the negative log pvalue with respect to the
nuisance parameter passed in params. This negative log p-value is then
used in `shgo` to find the minimum negative pvalue which is our maximum
pvalue.
Also, to compute the different combination used in the
p-values' computation formula, this function uses `gammaln` which is
more tolerant for large value than `scipy.special.comb`. `gammaln` gives
a log combination. For the little precision loss, performances are
improved a lot.
"""
t1, t2 = x1_sum_x2.shape
n = t1 + t2 - 2
with np.errstate(divide="ignore", invalid="ignore"):
log_nuisance = np.log(
nuisance_param,
out=np.zeros_like(nuisance_param),
where=nuisance_param >= 0,
)
log_1_minus_nuisance = np.log(
1 - nuisance_param,
out=np.zeros_like(nuisance_param),
where=1 - nuisance_param >= 0,
)
nuisance_power_x1_x2 = log_nuisance * x1_sum_x2
nuisance_power_x1_x2[(x1_sum_x2 == 0)[:, :]] = 0
nuisance_power_n_minus_x1_x2 = log_1_minus_nuisance * (n - x1_sum_x2)
nuisance_power_n_minus_x1_x2[(x1_sum_x2 == n)[:, :]] = 0
tmp_log_values_arr = (
x1_sum_x2_log_comb
+ nuisance_power_x1_x2
+ nuisance_power_n_minus_x1_x2
)
tmp_values_from_index = tmp_log_values_arr[index_arr]
# To avoid dividing by zero in log function and getting inf value,
# values are centered according to the max
max_value = tmp_values_from_index.max()
# To have better result's precision, the log pvalue is taken here.
# Indeed, pvalue is included inside [0, 1] interval. Passing the
# pvalue to log makes the interval a lot bigger ([-inf, 0]), and thus
# help us to achieve better precision
with np.errstate(divide="ignore", invalid="ignore"):
log_probs = np.exp(tmp_values_from_index - max_value).sum()
log_pvalue = max_value + np.log(
log_probs,
out=np.full_like(log_probs, -np.inf),
where=log_probs > 0,
)
# Since shgo find the minima, minus log pvalue is returned
return -log_pvalue
def _pval_cvm_2samp_exact(s, m, n):
"""
Compute the exact p-value of the Cramer-von Mises two-sample test
for a given value s of the test statistic.
m and n are the sizes of the samples.
[1] Y. Xiao, A. Gordon, and A. Yakovlev, "A C++ Program for
the Cramér-Von Mises Two-Sample Test", J. Stat. Soft.,
vol. 17, no. 8, pp. 1-15, Dec. 2006.
[2] T. W. Anderson "On the Distribution of the Two-Sample Cramer-von Mises
Criterion," The Annals of Mathematical Statistics, Ann. Math. Statist.
33(3), 1148-1159, (September, 1962)
"""
# [1, p. 3]
lcm = np.lcm(m, n)
# [1, p. 4], below eq. 3
a = lcm // m
b = lcm // n
# Combine Eq. 9 in [2] with Eq. 2 in [1] and solve for $\zeta$
# Hint: `s` is $U$ in [2], and $T_2$ in [1] is $T$ in [2]
mn = m * n
zeta = lcm ** 2 * (m + n) * (6 * s - mn * (4 * mn - 1)) // (6 * mn ** 2)
# bound maximum value that may appear in `gs` (remember both rows!)
zeta_bound = lcm**2 * (m + n) # bound elements in row 1
combinations = comb(m + n, m) # sum of row 2
max_gs = max(zeta_bound, combinations)
dtype = np.min_scalar_type(max_gs)
# the frequency table of $g_{u, v}^+$ defined in [1, p. 6]
gs = ([np.array([[0], [1]], dtype=dtype)]
+ [np.empty((2, 0), dtype=dtype) for _ in range(m)])
for u in range(n + 1):
next_gs = []
tmp = np.empty((2, 0), dtype=dtype)
for v, g in enumerate(gs):
# Calculate g recursively with eq. 11 in [1]. Even though it
# doesn't look like it, this also does 12/13 (all of Algorithm 1).
vi, i0, i1 = np.intersect1d(tmp[0], g[0], return_indices=True)
tmp = np.concatenate([
np.stack([vi, tmp[1, i0] + g[1, i1]]),
np.delete(tmp, i0, 1),
np.delete(g, i1, 1)
], 1)
tmp[0] += (a * v - b * u) ** 2
next_gs.append(tmp)
gs = next_gs
value, freq = gs[m]
return np.float64(np.sum(freq[value >= zeta]) / combinations)
def cramervonmises_2samp(x, y, method='auto'):
"""Perform the two-sample Cramér-von Mises test for goodness of fit.
This is the two-sample version of the Cramér-von Mises test ([1]_):
for two independent samples :math:`X_1, ..., X_n` and
:math:`Y_1, ..., Y_m`, the null hypothesis is that the samples
come from the same (unspecified) continuous distribution.
Parameters
----------
x : array_like
A 1-D array of observed values of the random variables :math:`X_i`.
y : array_like
A 1-D array of observed values of the random variables :math:`Y_i`.
method : {'auto', 'asymptotic', 'exact'}, optional
The method used to compute the p-value, see Notes for details.
The default is 'auto'.
Returns
-------
res : object with attributes
statistic : float
Cramér-von Mises statistic.
pvalue : float
The p-value.
See Also
--------
cramervonmises, anderson_ksamp, epps_singleton_2samp, ks_2samp
Notes
-----
.. versionadded:: 1.7.0
The statistic is computed according to equation 9 in [2]_. The
calculation of the p-value depends on the keyword `method`:
- ``asymptotic``: The p-value is approximated by using the limiting
distribution of the test statistic.
- ``exact``: The exact p-value is computed by enumerating all
possible combinations of the test statistic, see [2]_.
If ``method='auto'``, the exact approach is used
if both samples contain equal to or less than 20 observations,
otherwise the asymptotic distribution is used.
If the underlying distribution is not continuous, the p-value is likely to
be conservative (Section 6.2 in [3]_). When ranking the data to compute
the test statistic, midranks are used if there are ties.
References
----------
.. [1] https://en.wikipedia.org/wiki/Cramer-von_Mises_criterion
.. [2] Anderson, T.W. (1962). On the distribution of the two-sample
Cramer-von-Mises criterion. The Annals of Mathematical
Statistics, pp. 1148-1159.
.. [3] Conover, W.J., Practical Nonparametric Statistics, 1971.
Examples
--------
Suppose we wish to test whether two samples generated by
``scipy.stats.norm.rvs`` have the same distribution. We choose a
significance level of alpha=0.05.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = stats.norm.rvs(size=100, random_state=rng)
>>> y = stats.norm.rvs(size=70, random_state=rng)
>>> res = stats.cramervonmises_2samp(x, y)
>>> res.statistic, res.pvalue
(0.29376470588235293, 0.1412873014573014)
The p-value exceeds our chosen significance level, so we do not
reject the null hypothesis that the observed samples are drawn from the
same distribution.
For small sample sizes, one can compute the exact p-values:
>>> x = stats.norm.rvs(size=7, random_state=rng)
>>> y = stats.t.rvs(df=2, size=6, random_state=rng)
>>> res = stats.cramervonmises_2samp(x, y, method='exact')
>>> res.statistic, res.pvalue
(0.197802197802198, 0.31643356643356646)
The p-value based on the asymptotic distribution is a good approximation
even though the sample size is small.
>>> res = stats.cramervonmises_2samp(x, y, method='asymptotic')
>>> res.statistic, res.pvalue
(0.197802197802198, 0.2966041181527128)
Independent of the method, one would not reject the null hypothesis at the
chosen significance level in this example.
"""
xa = np.sort(np.asarray(x))
ya = np.sort(np.asarray(y))
if xa.size <= 1 or ya.size <= 1:
raise ValueError('x and y must contain at least two observations.')
if xa.ndim > 1 or ya.ndim > 1:
raise ValueError('The samples must be one-dimensional.')
if method not in ['auto', 'exact', 'asymptotic']:
raise ValueError('method must be either auto, exact or asymptotic.')
nx = len(xa)
ny = len(ya)
if method == 'auto':
if max(nx, ny) > 20:
method = 'asymptotic'
else:
method = 'exact'
# get ranks of x and y in the pooled sample
z = np.concatenate([xa, ya])
# in case of ties, use midrank (see [1])
r = scipy.stats.rankdata(z, method='average')
rx = r[:nx]
ry = r[nx:]
# compute U (eq. 10 in [2])
u = nx * np.sum((rx - np.arange(1, nx+1))**2)
u += ny * np.sum((ry - np.arange(1, ny+1))**2)
# compute T (eq. 9 in [2])
k, N = nx*ny, nx + ny
t = u / (k*N) - (4*k - 1)/(6*N)
if method == 'exact':
p = _pval_cvm_2samp_exact(u, nx, ny)
else:
# compute expected value and variance of T (eq. 11 and 14 in [2])
et = (1 + 1/N)/6
vt = (N+1) * (4*k*N - 3*(nx**2 + ny**2) - 2*k)
vt = vt / (45 * N**2 * 4 * k)
# computed the normalized statistic (eq. 15 in [2])
tn = 1/6 + (t - et) / np.sqrt(45 * vt)
# approximate distribution of tn with limiting distribution
# of the one-sample test statistic
# if tn < 0.003, the _cdf_cvm_inf(tn) < 1.28*1e-18, return 1.0 directly
if tn < 0.003:
p = 1.0
else:
p = max(0, 1. - _cdf_cvm_inf(tn))
return CramerVonMisesResult(statistic=t, pvalue=p)
class TukeyHSDResult:
"""Result of `scipy.stats.tukey_hsd`.
Attributes
----------
statistic : float ndarray
The computed statistic of the test for each comparison. The element
at index ``(i, j)`` is the statistic for the comparison between groups
``i`` and ``j``.
pvalue : float ndarray
The associated p-value from the studentized range distribution. The
element at index ``(i, j)`` is the p-value for the comparison
between groups ``i`` and ``j``.
Notes
-----
The string representation of this object displays the most recently
calculated confidence interval, and if none have been previously
calculated, it will evaluate ``confidence_interval()``.
References
----------
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
Method."
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
28 November 2020.
"""
def __init__(self, statistic, pvalue, _nobs, _ntreatments, _stand_err):
self.statistic = statistic
self.pvalue = pvalue
self._ntreatments = _ntreatments
self._nobs = _nobs
self._stand_err = _stand_err
self._ci = None
self._ci_cl = None
def __str__(self):
# Note: `__str__` prints the confidence intervals from the most
# recent call to `confidence_interval`. If it has not been called,
# it will be called with the default CL of .95.
if self._ci is None:
self.confidence_interval(confidence_level=.95)
s = ("Tukey's HSD Pairwise Group Comparisons"
f" ({self._ci_cl*100:.1f}% Confidence Interval)\n")
s += "Comparison Statistic p-value Lower CI Upper CI\n"
for i in range(self.pvalue.shape[0]):
for j in range(self.pvalue.shape[0]):
if i != j:
s += (f" ({i} - {j}) {self.statistic[i, j]:>10.3f}"
f"{self.pvalue[i, j]:>10.3f}"
f"{self._ci.low[i, j]:>10.3f}"
f"{self._ci.high[i, j]:>10.3f}\n")
return s
def confidence_interval(self, confidence_level=.95):
"""Compute the confidence interval for the specified confidence level.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval
of the estimated proportion. Default is .95.
Returns
-------
ci : ``ConfidenceInterval`` object
The object has attributes ``low`` and ``high`` that hold the
lower and upper bounds of the confidence intervals for each
comparison. The high and low values are accessible for each
comparison at index ``(i, j)`` between groups ``i`` and ``j``.
References
----------
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1.
Tukey's Method."
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
28 November 2020.
Examples
--------
>>> from scipy.stats import tukey_hsd
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
>>> result = tukey_hsd(group0, group1, group2)
>>> ci = result.confidence_interval()
>>> ci.low
array([[-3.649159, -8.249159, -3.909159],
[ 0.950841, -3.649159, 0.690841],
[-3.389159, -7.989159, -3.649159]])
>>> ci.high
array([[ 3.649159, -0.950841, 3.389159],
[ 8.249159, 3.649159, 7.989159],
[ 3.909159, -0.690841, 3.649159]])
"""
# check to see if the supplied confidence level matches that of the
# previously computed CI.
if (self._ci is not None and self._ci_cl is not None and
confidence_level == self._ci_cl):
return self._ci
if not 0 < confidence_level < 1:
raise ValueError("Confidence level must be between 0 and 1.")
# determine the critical value of the studentized range using the
# appropriate confidence level, number of treatments, and degrees
# of freedom as determined by the number of data less the number of
# treatments. ("Confidence limits for Tukey's method")[1]. Note that
# in the cases of unequal sample sizes there will be a criterion for
# each group comparison.
params = (confidence_level, self._nobs, self._ntreatments - self._nobs)
srd = distributions.studentized_range.ppf(*params)
# also called maximum critical value, the Tukey criterion is the
# studentized range critical value * the square root of mean square
# error over the sample size.
tukey_criterion = srd * self._stand_err
# the confidence levels are determined by the
# `mean_differences` +- `tukey_criterion`
upper_conf = self.statistic + tukey_criterion
lower_conf = self.statistic - tukey_criterion
self._ci = ConfidenceInterval(low=lower_conf, high=upper_conf)
self._ci_cl = confidence_level
return self._ci
def _tukey_hsd_iv(args):
if (len(args)) < 2:
raise ValueError("There must be more than 1 treatment.")
args = [np.asarray(arg) for arg in args]
for arg in args:
if arg.ndim != 1:
raise ValueError("Input samples must be one-dimensional.")
if arg.size <= 1:
raise ValueError("Input sample size must be greater than one.")
if np.isinf(arg).any():
raise ValueError("Input samples must be finite.")
return args
def tukey_hsd(*args):
"""Perform Tukey's HSD test for equality of means over multiple treatments.
Tukey's honestly significant difference (HSD) test performs pairwise
comparison of means for a set of samples. Whereas ANOVA (e.g. `f_oneway`)
assesses whether the true means underlying each sample are identical,
Tukey's HSD is a post hoc test used to compare the mean of each sample
to the mean of each other sample.
The null hypothesis is that the distributions underlying the samples all
have the same mean. The test statistic, which is computed for every
possible pairing of samples, is simply the difference between the sample
means. For each pair, the p-value is the probability under the null
hypothesis (and other assumptions; see notes) of observing such an extreme
value of the statistic, considering that many pairwise comparisons are
being performed. Confidence intervals for the difference between each pair
of means are also available.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two arguments.
Returns
-------
result : `~scipy.stats._result_classes.TukeyHSDResult` instance
The return value is an object with the following attributes:
statistic : float ndarray
The computed statistic of the test for each comparison. The element
at index ``(i, j)`` is the statistic for the comparison between
groups ``i`` and ``j``.
pvalue : float ndarray
The computed p-value of the test for each comparison. The element
at index ``(i, j)`` is the p-value for the comparison between
groups ``i`` and ``j``.
The object has the following methods:
confidence_interval(confidence_level=0.95):
Compute the confidence interval for the specified confidence level.
See Also
--------
dunnett : performs comparison of means against a control group.
Notes
-----
The use of this test relies on several assumptions.
1. The observations are independent within and among groups.
2. The observations within each group are normally distributed.
3. The distributions from which the samples are drawn have the same finite
variance.
The original formulation of the test was for samples of equal size [6]_.
In case of unequal sample sizes, the test uses the Tukey-Kramer method
[4]_.
References
----------
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
Method."
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
28 November 2020.
.. [2] Abdi, Herve & Williams, Lynne. (2021). "Tukey's Honestly Significant
Difference (HSD) Test."
https://personal.utdallas.edu/~herve/abdi-HSD2010-pretty.pdf
.. [3] "One-Way ANOVA Using SAS PROC ANOVA & PROC GLM." SAS
Tutorials, 2007, www.stattutorials.com/SAS/TUTORIAL-PROC-GLM.htm.
.. [4] Kramer, Clyde Young. "Extension of Multiple Range Tests to Group
Means with Unequal Numbers of Replications." Biometrics, vol. 12,
no. 3, 1956, pp. 307-310. JSTOR, www.jstor.org/stable/3001469.
Accessed 25 May 2021.
.. [5] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.3.3.
The ANOVA table and tests of hypotheses about means"
https://www.itl.nist.gov/div898/handbook/prc/section4/prc433.htm,
2 June 2021.
.. [6] Tukey, John W. "Comparing Individual Means in the Analysis of
Variance." Biometrics, vol. 5, no. 2, 1949, pp. 99-114. JSTOR,
www.jstor.org/stable/3001913. Accessed 14 June 2021.
Examples
--------
Here are some data comparing the time to relief of three brands of
headache medicine, reported in minutes. Data adapted from [3]_.
>>> import numpy as np
>>> from scipy.stats import tukey_hsd
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
We would like to see if the means between any of the groups are
significantly different. First, visually examine a box and whisker plot.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.boxplot([group0, group1, group2])
>>> ax.set_xticklabels(["group0", "group1", "group2"]) # doctest: +SKIP
>>> ax.set_ylabel("mean") # doctest: +SKIP
>>> plt.show()
From the box and whisker plot, we can see overlap in the interquartile
ranges group 1 to group 2 and group 3, but we can apply the ``tukey_hsd``
test to determine if the difference between means is significant. We
set a significance level of .05 to reject the null hypothesis.
>>> res = tukey_hsd(group0, group1, group2)
>>> print(res)
Tukey's HSD Pairwise Group Comparisons (95.0% Confidence Interval)
Comparison Statistic p-value Lower CI Upper CI
(0 - 1) -4.600 0.014 -8.249 -0.951
(0 - 2) -0.260 0.980 -3.909 3.389
(1 - 0) 4.600 0.014 0.951 8.249
(1 - 2) 4.340 0.020 0.691 7.989
(2 - 0) 0.260 0.980 -3.389 3.909
(2 - 1) -4.340 0.020 -7.989 -0.691
The null hypothesis is that each group has the same mean. The p-value for
comparisons between ``group0`` and ``group1`` as well as ``group1`` and
``group2`` do not exceed .05, so we reject the null hypothesis that they
have the same means. The p-value of the comparison between ``group0``
and ``group2`` exceeds .05, so we accept the null hypothesis that there
is not a significant difference between their means.
We can also compute the confidence interval associated with our chosen
confidence level.
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
>>> result = tukey_hsd(group0, group1, group2)
>>> conf = res.confidence_interval(confidence_level=.99)
>>> for ((i, j), l) in np.ndenumerate(conf.low):
... # filter out self comparisons
... if i != j:
... h = conf.high[i,j]
... print(f"({i} - {j}) {l:>6.3f} {h:>6.3f}")
(0 - 1) -9.480 0.280
(0 - 2) -5.140 4.620
(1 - 0) -0.280 9.480
(1 - 2) -0.540 9.220
(2 - 0) -4.620 5.140
(2 - 1) -9.220 0.540
"""
args = _tukey_hsd_iv(args)
ntreatments = len(args)
means = np.asarray([np.mean(arg) for arg in args])
nsamples_treatments = np.asarray([a.size for a in args])
nobs = np.sum(nsamples_treatments)
# determine mean square error [5]. Note that this is sometimes called
# mean square error within.
mse = (np.sum([np.var(arg, ddof=1) for arg in args] *
(nsamples_treatments - 1)) / (nobs - ntreatments))
# The calculation of the standard error differs when treatments differ in
# size. See ("Unequal sample sizes")[1].
if np.unique(nsamples_treatments).size == 1:
# all input groups are the same length, so only one value needs to be
# calculated [1].
normalize = 2 / nsamples_treatments[0]
else:
# to compare groups of differing sizes, we must compute a variance
# value for each individual comparison. Use broadcasting to get the
# resulting matrix. [3], verified against [4] (page 308).
normalize = 1 / nsamples_treatments + 1 / nsamples_treatments[None].T
# the standard error is used in the computation of the tukey criterion and
# finding the p-values.
stand_err = np.sqrt(normalize * mse / 2)
# the mean difference is the test statistic.
mean_differences = means[None].T - means
# Calculate the t-statistic to use within the survival function of the
# studentized range to get the p-value.
t_stat = np.abs(mean_differences) / stand_err
params = t_stat, ntreatments, nobs - ntreatments
pvalues = distributions.studentized_range.sf(*params)
return TukeyHSDResult(mean_differences, pvalues, ntreatments,
nobs, stand_err)
| 78,623
| 37.980664
| 90
|
py
|
scipy
|
scipy-main/scipy/stats/_rvs_sampling.py
|
import warnings
from scipy.stats.sampling import RatioUniforms
def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
"""
Generate random samples from a probability density function using the
ratio-of-uniforms method.
.. deprecated:: 1.12.0
`rvs_ratio_uniforms` is deprecated in favour of
`scipy.stats.sampling.RatioUniforms` from version 1.12.0 and will
be removed in SciPy 1.14.0
Parameters
----------
pdf : callable
A function with signature `pdf(x)` that is proportional to the
probability density function of the distribution.
umax : float
The upper bound of the bounding rectangle in the u-direction.
vmin : float
The lower bound of the bounding rectangle in the v-direction.
vmax : float
The upper bound of the bounding rectangle in the v-direction.
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
c : float, optional.
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
rvs : ndarray
The random variates distributed according to the probability
distribution defined by the pdf.
Notes
-----
Please refer to `scipy.stats.sampling.RatioUniforms` for the documentation.
"""
warnings.warn("Please use `RatioUniforms` from the "
"`scipy.stats.sampling` namespace. The "
"`scipy.stats.rvs_ratio_uniforms` namespace is deprecated "
"and will be removed in SciPy 1.14.0",
category=DeprecationWarning, stacklevel=2)
gen = RatioUniforms(pdf, umax=umax, vmin=vmin, vmax=vmax,
c=c, random_state=random_state)
return gen.rvs(size)
| 2,233
| 38.192982
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_generate_pyx.py
|
import pathlib
import subprocess
import sys
import os
import argparse
def make_boost(outdir, distutils_build=False):
# Call code generator inside _boost directory
code_gen = pathlib.Path(__file__).parent / '_boost/include/code_gen.py'
if distutils_build:
subprocess.run([sys.executable, str(code_gen), '-o', outdir,
'--distutils-build', 'True'], check=True)
else:
subprocess.run([sys.executable, str(code_gen), '-o', outdir],
check=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
args = parser.parse_args()
if not args.outdir:
# We're dealing with a distutils build here, write in-place:
outdir_abs = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
outdir_abs_boost = outdir_abs / '_boost' / 'src'
if not os.path.exists(outdir_abs_boost):
os.makedirs(outdir_abs_boost)
make_boost(outdir_abs_boost, distutils_build=True)
else:
# Meson build
srcdir_abs = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
outdir_abs = pathlib.Path(os.getcwd()) / args.outdir
make_boost(outdir_abs)
| 1,312
| 34.486486
| 77
|
py
|
scipy
|
scipy-main/scipy/stats/_binned_statistic.py
|
import builtins
import numpy as np
from numpy.testing import suppress_warnings
from operator import index
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
BinnedStatisticResult(statistic=array([4. , 4.5]),
bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
BinnedStatisticResult(statistic=array([[4. , 4.5],
[8. , 9. ]]), bin_edges=array([1., 4., 7.]),
binnumber=array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
BinnedStatisticResult(statistic=array([1., 2., 4.]),
bin_edges=array([1., 2., 3., 4.]),
binnumber=array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> rng = np.random.default_rng()
>>> windspeed = 8 * rng.random(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
Note that the returned linearized bin indices are used for an array with
extra bins on the outer binedges to capture values outside of the defined
bin bounds.
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny])
>>> ret.statistic
array([[2., 1.],
[1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def _bincount(x, weights):
if np.iscomplexobj(weights):
a = np.bincount(x, np.real(weights))
b = np.bincount(x, np.imag(weights))
z = a + b*1j
else:
z = np.bincount(x, weights)
return z
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False,
binned_statistic_result=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of N arrays of length D, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `sample`, or a list of sequences - each with the
same shape as `sample`. If `values` is such a list, the statistic
will be computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0. If the number of values
within a given bin is 0 or 1, the computed standard deviation value
will be 0 for the bin.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or positive int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
binned_statistic_result : binnedStatisticddResult
Result of a previous call to the function in order to reuse bin edges
and bin numbers with new values and/or a different statistic.
To reuse bin numbers, `expand_binnumbers` must have been set to False
(the default)
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
Take an array of 600 (x, y) coordinates as an example.
`binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
of dimension `D+1` is required.
>>> mu = np.array([0., 1.])
>>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
>>> multinormal = stats.multivariate_normal(mu, sigma)
>>> data = multinormal.rvs(size=600, random_state=235412)
>>> data.shape
(600, 2)
Create bins and count how many arrays fall in each bin:
>>> N = 60
>>> x = np.linspace(-3, 3, N)
>>> y = np.linspace(-3, 4, N)
>>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
... statistic='count')
>>> bincounts = ret.statistic
Set the volume and the location of bars:
>>> dx = x[1] - x[0]
>>> dy = y[1] - y[0]
>>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
>>> z = 0
>>> bincounts = bincounts.ravel()
>>> x = x.ravel()
>>> y = y.ravel()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> with np.errstate(divide='ignore'): # silence random axes3d warning
... ax.bar3d(x, y, z, dx, dy, bincounts)
Reuse bin numbers and bin edges with new values:
>>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),
... binned_statistic_result=ret,
... statistic='mean')
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError(f'invalid statistic {statistic!r}')
try:
bins = index(bins)
except TypeError:
# bins is not an integer
pass
# If bins was an integer-like object, now it is an actual Python int.
# NOTE: for _bin_edges(), see e.g. gh-11365
if isinstance(bins, int) and not np.isfinite(sample).all():
raise ValueError(f'{sample!r} contains non-finite values.')
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if statistic != 'count' and Vlen != Dlen:
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
if binned_statistic_result is None:
nbin, edges, dedges = _bin_edges(sample, bins, range)
binnumbers = _bin_numbers(sample, nbin, edges, dedges)
else:
edges = binned_statistic_result.bin_edges
nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)])
# +1 for outlier bins
dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)]
binnumbers = binned_statistic_result.binnumber
# Avoid overflow with double precision. Complex `values` -> `complex128`.
result_type = np.result_type(values, np.float64)
result = np.empty([Vdim, nbin.prod()], dtype=result_type)
if statistic in {'mean', np.mean}:
result.fill(np.nan)
flatcount = _bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in builtins.range(Vdim):
flatsum = _bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic in {'std', np.std}:
result.fill(np.nan)
flatcount = _bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in builtins.range(Vdim):
flatsum = _bincount(binnumbers, values[vv])
delta = values[vv] - flatsum[binnumbers] / flatcount[binnumbers]
std = np.sqrt(
_bincount(binnumbers, delta*np.conj(delta))[a] / flatcount[a]
)
result[vv, a] = std
result = np.real(result)
elif statistic == 'count':
result = np.empty([Vdim, nbin.prod()], dtype=np.float64)
result.fill(0)
flatcount = _bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic in {'sum', np.sum}:
result.fill(0)
for vv in builtins.range(Vdim):
flatsum = _bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic in {'median', np.median}:
result.fill(np.nan)
for vv in builtins.range(Vdim):
i = np.lexsort((values[vv], binnumbers))
_, j, counts = np.unique(binnumbers[i],
return_index=True, return_counts=True)
mid = j + (counts - 1) / 2
mid_a = values[vv, i][np.floor(mid).astype(int)]
mid_b = values[vv, i][np.ceil(mid).astype(int)]
medians = (mid_a + mid_b) / 2
result[vv, binnumbers[i][j]] = medians
elif statistic in {'min', np.min}:
result.fill(np.nan)
for vv in builtins.range(Vdim):
i = np.argsort(values[vv])[::-1] # Reversed so the min is last
result[vv, binnumbers[i]] = values[vv, i]
elif statistic in {'max', np.max}:
result.fill(np.nan)
for vv in builtins.range(Vdim):
i = np.argsort(values[vv])
result[vv, binnumbers[i]] = values[vv, i]
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except Exception:
null = np.nan
if np.iscomplexobj(null):
result = result.astype(np.complex128)
result.fill(null)
try:
_calc_binned_statistic(
Vdim, binnumbers, result, values, statistic
)
except ValueError:
result = result.astype(np.complex128)
_calc_binned_statistic(
Vdim, binnumbers, result, values, statistic
)
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = tuple([slice(None)] + Ndim * [slice(1, -1)])
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if expand_binnumbers and Ndim > 1:
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`result`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func):
unique_bin_numbers = np.unique(bin_numbers)
for vv in builtins.range(Vdim):
bin_map = _create_binned_data(bin_numbers, unique_bin_numbers,
values, vv)
for i in unique_bin_numbers:
stat = stat_func(np.array(bin_map[i]))
if np.iscomplexobj(stat) and not np.iscomplexobj(result):
raise ValueError("The statistic function returns complex ")
result[vv, i] = stat
def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv):
""" Create hashmap of bin ids to values in bins
key: bin number
value: list of binned data
"""
bin_map = dict()
for i in unique_bin_numbers:
bin_map[i] = []
for i in builtins.range(len(bin_numbers)):
bin_map[bin_numbers[i]].append(values[vv, i])
return bin_map
def _bin_edges(sample, bins=None, range=None):
""" Create edge arrays
"""
Dlen, Ndim = sample.shape
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
if len(range) != Ndim:
raise ValueError(
f"range given for {len(range)} dimensions; {Ndim} required")
smin = np.empty(Ndim)
smax = np.empty(Ndim)
for i in builtins.range(Ndim):
if range[i][1] < range[i][0]:
raise ValueError(
"In {}range, start must be <= stop".format(
f"dimension {i + 1} of " if Ndim > 1 else ""))
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in builtins.range(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Preserve sample floating point precision in bin edges
edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating)
else float)
# Create edge arrays
for i in builtins.range(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1,
dtype=edges_dtype)
else:
edges[i] = np.asarray(bins[i], edges_dtype)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
return nbin, edges, dedges
def _bin_numbers(sample, nbin, edges, dedges):
"""Compute the bin number each sample falls into, in each dimension
"""
Dlen, Ndim = sample.shape
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in range(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in range(Ndim):
# Find the rounding precision
dedges_min = dedges[i].min()
if dedges_min == 0:
raise ValueError('The smallest edge difference is numerically 0.')
decimal = int(-np.log10(dedges_min)) + 6
# Find which points are on the rightmost edge.
on_edge = np.where((sample[:, i] >= edges[i][-1]) &
(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal)))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
return binnumbers
| 32,704
| 40.086683
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_distr_params.py
|
"""
Sane parameters for stats.distributions.
"""
import numpy as np
distcont = [
['alpha', (3.5704770516650459,)],
['anglit', ()],
['arcsine', ()],
['argus', (1.0,)],
['beta', (2.3098496451481823, 0.62687954300963677)],
['betaprime', (5, 6)],
['bradford', (0.29891359763170633,)],
['burr', (10.5, 4.3)],
['burr12', (10, 4)],
['cauchy', ()],
['chi', (78,)],
['chi2', (55,)],
['cosine', ()],
['crystalball', (2.0, 3.0)],
['dgamma', (1.1023326088288166,)],
['dweibull', (2.0685080649914673,)],
['erlang', (10,)],
['expon', ()],
['exponnorm', (1.5,)],
['exponpow', (2.697119160358469,)],
['exponweib', (2.8923945291034436, 1.9505288745913174)],
['f', (29, 18)],
['fatiguelife', (29,)], # correction numargs = 1
['fisk', (3.0857548622253179,)],
['foldcauchy', (4.7164673455831894,)],
['foldnorm', (1.9521253373555869,)],
['gamma', (1.9932305483800778,)],
['gausshyper', (13.763771604130699, 3.1189636648681431,
2.5145980350183019, 5.1811649903971615)], # veryslow
['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)],
['genextreme', (-0.1,)],
['gengamma', (4.4162385429431925, 3.1193091679242761)],
['gengamma', (4.4162385429431925, -3.1193091679242761)],
['genhalflogistic', (0.77274727809929322,)],
['genhyperbolic', (0.5, 1.5, -0.5,)],
['geninvgauss', (2.3, 1.5)],
['genlogistic', (0.41192440799679475,)],
['gennorm', (1.2988442399460265,)],
['halfgennorm', (0.6748054997000371,)],
['genpareto', (0.1,)], # use case with finite moments
['gibrat', ()],
['gompertz', (0.94743713075105251,)],
['gumbel_l', ()],
['gumbel_r', ()],
['halfcauchy', ()],
['halflogistic', ()],
['halfnorm', ()],
['hypsecant', ()],
['invgamma', (4.0668996136993067,)],
['invgauss', (0.14546264555347513,)],
['invweibull', (10.58,)],
['johnsonsb', (4.3172675099141058, 3.1837781130785063)],
['johnsonsu', (2.554395574161155, 2.2482281679651965)],
['kappa4', (0.0, 0.0)],
['kappa4', (-0.1, 0.1)],
['kappa4', (0.0, 0.1)],
['kappa4', (0.1, 0.0)],
['kappa3', (1.0,)],
['ksone', (1000,)], # replace 22 by 100 to avoid failing range, ticket 956
['kstwo', (10,)],
['kstwobign', ()],
['laplace', ()],
['laplace_asymmetric', (2,)],
['levy', ()],
['levy_l', ()],
['levy_stable', (1.8, -0.5)],
['loggamma', (0.41411931826052117,)],
['logistic', ()],
['loglaplace', (3.2505926592051435,)],
['lognorm', (0.95368226960575331,)],
['loguniform', (0.01, 1.25)],
['lomax', (1.8771398388773268,)],
['maxwell', ()],
['mielke', (10.4, 4.6)],
['moyal', ()],
['nakagami', (4.9673794866666237,)],
['ncf', (27, 27, 0.41578441799226107)],
['nct', (14, 0.24045031331198066)],
['ncx2', (21, 1.0560465975116415)],
['norm', ()],
['norminvgauss', (1.25, 0.5)],
['pareto', (2.621716532144454,)],
['pearson3', (0.1,)],
['pearson3', (-2,)],
['powerlaw', (1.6591133289905851,)],
['powerlaw', (0.6591133289905851,)],
['powerlognorm', (2.1413923530064087, 0.44639540782048337)],
['powernorm', (4.4453652254590779,)],
['rayleigh', ()],
['rdist', (1.6,)],
['recipinvgauss', (0.63004267809369119,)],
['reciprocal', (0.01, 1.25)],
['rel_breitwigner', (36.545206797050334, )],
['rice', (0.7749725210111873,)],
['semicircular', ()],
['skewcauchy', (0.5,)],
['skewnorm', (4.0,)],
['studentized_range', (3.0, 10.0)],
['t', (2.7433514990818093,)],
['trapezoid', (0.2, 0.8)],
['triang', (0.15785029824528218,)],
['truncexpon', (4.6907725456810478,)],
['truncnorm', (-1.0978730080013919, 2.7306754109031979)],
['truncnorm', (0.1, 2.)],
['truncpareto', (1.8, 5.3)],
['truncweibull_min', (2.5, 0.25, 1.75)],
['tukeylambda', (3.1321477856738267,)],
['uniform', ()],
['vonmises', (3.9939042581071398,)],
['vonmises_line', (3.9939042581071398,)],
['wald', ()],
['weibull_max', (2.8687961709100187,)],
['weibull_min', (1.7866166930421596,)],
['wrapcauchy', (0.031071279018614728,)]]
distdiscrete = [
['bernoulli',(0.3,)],
['betabinom', (5, 2.3, 0.63)],
['binom', (5, 0.4)],
['boltzmann',(1.4, 19)],
['dlaplace', (0.8,)], # 0.5
['geom', (0.5,)],
['hypergeom',(30, 12, 6)],
['hypergeom',(21,3,12)], # numpy.random (3,18,12) numpy ticket:921
['hypergeom',(21,18,11)], # numpy.random (18,3,11) numpy ticket:921
['nchypergeom_fisher', (140, 80, 60, 0.5)],
['nchypergeom_wallenius', (140, 80, 60, 0.5)],
['logser', (0.6,)], # re-enabled, numpy ticket:921
['nbinom', (0.4, 0.4)], # from tickets: 583
['nbinom', (5, 0.5)],
['planck', (0.51,)], # 4.1
['poisson', (0.6,)],
['randint', (7, 31)],
['skellam', (15, 8)],
['zipf', (6.5,)],
['zipfian', (0.75, 15)],
['zipfian', (1.25, 10)],
['yulesimon', (11.0,)],
['nhypergeom', (20, 7, 1)]
]
invdistdiscrete = [
# In each of the following, at least one shape parameter is invalid
['hypergeom', (3, 3, 4)],
['nhypergeom', (5, 2, 8)],
['nchypergeom_fisher', (3, 3, 4, 1)],
['nchypergeom_wallenius', (3, 3, 4, 1)],
['bernoulli', (1.5, )],
['binom', (10, 1.5)],
['betabinom', (10, -0.4, -0.5)],
['boltzmann', (-1, 4)],
['dlaplace', (-0.5, )],
['geom', (1.5, )],
['logser', (1.5, )],
['nbinom', (10, 1.5)],
['planck', (-0.5, )],
['poisson', (-0.5, )],
['randint', (5, 2)],
['skellam', (-5, -2)],
['zipf', (-2, )],
['yulesimon', (-2, )],
['zipfian', (-0.75, 15)]
]
invdistcont = [
# In each of the following, at least one shape parameter is invalid
['alpha', (-1, )],
['anglit', ()],
['arcsine', ()],
['argus', (-1, )],
['beta', (-2, 2)],
['betaprime', (-2, 2)],
['bradford', (-1, )],
['burr', (-1, 1)],
['burr12', (-1, 1)],
['cauchy', ()],
['chi', (-1, )],
['chi2', (-1, )],
['cosine', ()],
['crystalball', (-1, 2)],
['dgamma', (-1, )],
['dweibull', (-1, )],
['erlang', (-1, )],
['expon', ()],
['exponnorm', (-1, )],
['exponweib', (1, -1)],
['exponpow', (-1, )],
['f', (10, -10)],
['fatiguelife', (-1, )],
['fisk', (-1, )],
['foldcauchy', (-1, )],
['foldnorm', (-1, )],
['genlogistic', (-1, )],
['gennorm', (-1, )],
['genpareto', (np.inf, )],
['genexpon', (1, 2, -3)],
['genextreme', (np.inf, )],
['genhyperbolic', (0.5, -0.5, -1.5,)],
['gausshyper', (1, 2, 3, -4)],
['gamma', (-1, )],
['gengamma', (-1, 0)],
['genhalflogistic', (-1, )],
['geninvgauss', (1, 0)],
['gibrat', ()],
['gompertz', (-1, )],
['gumbel_r', ()],
['gumbel_l', ()],
['halfcauchy', ()],
['halflogistic', ()],
['halfnorm', ()],
['halfgennorm', (-1, )],
['hypsecant', ()],
['invgamma', (-1, )],
['invgauss', (-1, )],
['invweibull', (-1, )],
['johnsonsb', (1, -2)],
['johnsonsu', (1, -2)],
['kappa4', (np.nan, 0)],
['kappa3', (-1, )],
['ksone', (-1, )],
['kstwo', (-1, )],
['kstwobign', ()],
['laplace', ()],
['laplace_asymmetric', (-1, )],
['levy', ()],
['levy_l', ()],
['levy_stable', (-1, 1)],
['logistic', ()],
['loggamma', (-1, )],
['loglaplace', (-1, )],
['lognorm', (-1, )],
['loguniform', (10, 5)],
['lomax', (-1, )],
['maxwell', ()],
['mielke', (1, -2)],
['moyal', ()],
['nakagami', (-1, )],
['ncx2', (-1, 2)],
['ncf', (10, 20, -1)],
['nct', (-1, 2)],
['norm', ()],
['norminvgauss', (5, -10)],
['pareto', (-1, )],
['pearson3', (np.nan, )],
['powerlaw', (-1, )],
['powerlognorm', (1, -2)],
['powernorm', (-1, )],
['rdist', (-1, )],
['rayleigh', ()],
['rice', (-1, )],
['recipinvgauss', (-1, )],
['semicircular', ()],
['skewnorm', (np.inf, )],
['studentized_range', (-1, 1)],
['rel_breitwigner', (-2, )],
['t', (-1, )],
['trapezoid', (0, 2)],
['triang', (2, )],
['truncexpon', (-1, )],
['truncnorm', (10, 5)],
['truncpareto', (-1, 5)],
['truncpareto', (1.8, .5)],
['truncweibull_min', (-2.5, 0.25, 1.75)],
['tukeylambda', (np.nan, )],
['uniform', ()],
['vonmises', (-1, )],
['vonmises_line', (-1, )],
['wald', ()],
['weibull_min', (-1, )],
['weibull_max', (-1, )],
['wrapcauchy', (2, )],
['reciprocal', (15, 10)],
['skewcauchy', (2, )]
]
| 8,577
| 29.204225
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_morestats.py
|
from __future__ import annotations
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray, zeros,
arange, sort, amin, amax, sqrt, array, atleast_1d, # noqa
compress, pi, exp, ravel, count_nonzero, sin, cos,
arctan2, hypot)
from scipy import optimize, special, interpolate, stats
from scipy._lib._bunch import _make_tuple_bunch
from scipy._lib._util import _rename_parameter, _contains_nan, _get_nan
from ._ansari_swilk_statistics import gscale, swilk
from . import _stats_py
from ._fit import FitResult
from ._stats_py import find_repeats, _normtest_finish, SignificanceResult
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
from ._hypotests import _get_wilcoxon_distr
from ._axis_nan_policy import _axis_nan_policy_factory
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene',
'fligner', 'mood', 'wilcoxon', 'median_test',
'circmean', 'circvar', 'circstd', 'anderson_ksamp',
'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',
'yeojohnson_normplot', 'directional_stats',
'false_discovery_control'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, density=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data.
vdist : "frozen" distribution object
Distribution object representing the variance of the data.
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data.
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
@_axis_nan_policy_factory(
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
)
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar : Returns an unbiased estimator of the variance of the k-statistic
moment : Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where :math:`\mu` is the sample mean, :math:`m_2` is the sample
variance, and :math:`m_i` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> from numpy.random import default_rng
>>> rng = default_rng()
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rng.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194 # random
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
@_axis_nan_policy_factory(
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
)
def kstatvar(data, n=2):
r"""Return an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat : Returns the n-th k-statistic.
moment : Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> import numpy as np
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.stats._morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.empty(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, str):
try:
dist = getattr(distributions, dist)
except AttributeError as e:
raise ValueError("%s is not a valid distribution name" % dist) from e
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots."""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles.
If given and `fit` is True, also plots the least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
rvalue : bool, optional
If `plot` is provided and `fit` is True, setting `rvalue` to True
includes the coefficient of determination on the plot.
Default is False.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> rng = np.random.default_rng()
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample, random_state=rng)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample, random_state=rng)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2), random_state=rng).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample, random_state=rng)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500, random_state=rng)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
if fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit:
# perform a linear least squares fit.
slope, intercept, r, prob, _ = _stats_py.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo')
if fit:
plot.plot(osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if fit and rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""Calculate the shape parameter that maximizes the PPCC.
The probability plot correlation coefficient (PPCC) plot can be used
to determine the optimal shape parameter for a one-parameter family
of distributions. ``ppcc_max`` returns the shape parameter that would
maximize the probability plot correlation coefficient for the given
data to a one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See Also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test
for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] Engineering Statistics Handbook, NIST/SEMATEC,
https://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Weibull distribution
with shape parameter 2.5:
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> c = 2.5
>>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng)
Generate the PPCC plot for this data with the Weibull distribution.
>>> fig, ax = plt.subplots(figsize=(8, 6))
>>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax)
We calculate the value where the shape should reach its maximum and a
red line is drawn there. The line should coincide with the highest
point in the PPCC graph.
>>> cmax = stats.ppcc_max(x, brack=(c/2, 2*c), dist='weibull_min')
>>> ax.axvline(cmax, color='r')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = _stats_py.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack,
args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape
parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore
particularly useful in practice.
Parameters
----------
x : array_like
Input array.
a, b : scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See Also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Weibull distribution
with shape parameter 2.5, and plot the histogram of the data:
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> c = 2.5
>>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng)
Take a look at the histogram of the data.
>>> fig1, ax = plt.subplots(figsize=(9, 4))
>>> ax.hist(x, bins=50)
>>> ax.set_title('Histogram of x')
>>> plt.show()
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter ``c``
used above):
>>> fig2 = plt.figure(figsize=(12, 4))
>>> ax1 = fig2.add_subplot(1, 3, 1)
>>> ax2 = fig2.add_subplot(1, 3, 2)
>>> ax3 = fig2.add_subplot(1, 3, 3)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -4, 4, plot=ax2)
>>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax3)
>>> ax3.axvline(c, color='r')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> rng = np.random.default_rng()
>>> x = stats.loggamma.rvs(5, loc=10, size=1000, random_state=rng)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
logdata = np.log(data)
# Compute the variance of the transformed data.
if lmb == 0:
variance = np.var(logdata, axis=0)
else:
# Transform without the constant offset 1/lmb. The offset does
# not effect the variance, and the subtraction of the offset can
# lead to loss of precision.
variance = np.var(data**lmb / lmb, axis=0)
return (lmb - 1) * np.sum(logdata, axis=0) - N/2 * np.log(variance)
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None, optimizer=None):
r"""Return a dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array to be transformed.
If `lmbda` is not None, this is an alias of
`scipy.special.boxcox`.
Returns nan if ``x < 0``; returns -inf if ``x == 0 and lmbda < 0``.
If `lmbda` is None, array must be positive, 1-dimensional, and
non-constant.
lmbda : scalar, optional
If `lmbda` is None (default), find the value of `lmbda` that maximizes
the log-likelihood function and return it as the second output
argument.
If `lmbda` is not None, do the transformation for that value.
alpha : float, optional
If `lmbda` is None and `alpha` is not None (default), return the
``100 * (1-alpha)%`` confidence interval for `lmbda` as the third
output argument. Must be between 0.0 and 1.0.
If `lmbda` is not None, `alpha` is ignored.
optimizer : callable, optional
If `lmbda` is None, `optimizer` is the scalar optimizer used to find
the value of `lmbda` that minimizes the negative log-likelihood
function. `optimizer` is a callable that accepts one argument:
fun : callable
The objective function, which evaluates the negative
log-likelihood function at a provided value of `lmbda`
and returns an object, such as an instance of
`scipy.optimize.OptimizeResult`, which holds the optimal value of
`lmbda` in an attribute `x`.
See the example in `boxcox_normmax` or the documentation of
`scipy.optimize.minimize_scalar` for more information.
If `lmbda` is not None, `optimizer` is ignored.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the `lmbda` that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and `alpha` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given `alpha`.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda != 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when `alpha` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
if x.ndim != 1:
raise ValueError("Data must be 1-dimensional.")
if x.size == 0:
return x
if np.all(x == x[0]):
raise ValueError("Data must not be constant.")
if np.any(x <= 0):
raise ValueError("Data must be positive.")
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle', optimizer=optimizer)
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=None, method='pearsonr', optimizer=None):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array. All entries must be positive, finite, real numbers.
brack : 2-tuple, optional, default (-2.0, 2.0)
The starting interval for a downhill bracket search for the default
`optimize.brent` solver. Note that this is in most cases not
critical; the final result is allowed to be outside this bracket.
If `optimizer` is passed, `brack` must be None.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Maximizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
optimizer : callable, optional
`optimizer` is a callable that accepts one argument:
fun : callable
The objective function to be minimized. `fun` accepts one argument,
the Box-Cox transform parameter `lmbda`, and returns the value of
the function (e.g., the negative log-likelihood) at the provided
argument. The job of `optimizer` is to find the value of `lmbda`
that *minimizes* `fun`.
and returns an object, such as an instance of
`scipy.optimize.OptimizeResult`, which holds the optimal value of
`lmbda` in an attribute `x`.
See the example below or the documentation of
`scipy.optimize.minimize_scalar` for more information.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot, scipy.optimize.minimize_scalar
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We can generate some data and determine the optimal ``lmbda`` in various
ways:
>>> rng = np.random.default_rng()
>>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
2.217563431465757
>>> lmax_pearsonr
2.238318660200961
>>> stats.boxcox_normmax(x, method='all')
array([2.23831866, 2.21756343])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
Alternatively, we can define our own `optimizer` function. Suppose we
are only interested in values of `lmbda` on the interval [6, 7], we
want to use `scipy.optimize.minimize_scalar` with ``method='bounded'``,
and we want to use tighter tolerances when optimizing the log-likelihood
function. To do this, we define a function that accepts positional argument
`fun` and uses `scipy.optimize.minimize_scalar` to minimize `fun` subject
to the provided bounds and tolerances:
>>> from scipy import optimize
>>> options = {'xatol': 1e-12} # absolute tolerance on `x`
>>> def optimizer(fun):
... return optimize.minimize_scalar(fun, bounds=(6, 7),
... method="bounded", options=options)
>>> stats.boxcox_normmax(x, optimizer=optimizer)
6.000...
"""
# If optimizer is not given, define default 'brent' optimizer.
if optimizer is None:
# Set default value for `brack`.
if brack is None:
brack = (-2.0, 2.0)
def _optimizer(func, args):
return optimize.brent(func, args=args, brack=brack)
# Otherwise check optimizer.
else:
if not callable(optimizer):
raise ValueError("`optimizer` must be a callable")
if brack is not None:
raise ValueError("`brack` must be None if `optimizer` is given")
# `optimizer` is expected to return a `OptimizeResult` object, we here
# get the solution to the optimization problem.
def _optimizer(func, args):
def func_wrapped(x):
return func(x, *args)
return getattr(optimizer(func_wrapped), 'x', None)
def _pearsonr(x):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = _stats_py.pearsonr(xvals, yvals)
return 1 - r
return _optimizer(_eval_pearsonr, args=(xvals, x))
def _mle(x):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return _optimizer(_eval_mle, args=(x,))
def _all(x):
maxlog = np.empty(2, dtype=float)
maxlog[0] = _pearsonr(x)
maxlog[1] = _mle(x)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
try:
res = optimfunc(x)
except ValueError as e:
if "infs or NaNs" in str(e):
message = ("The `x` argument of `boxcox_normmax` must contain "
"only positive, finite, real numbers.")
raise ValueError(message) from e
else:
raise e
if res is None:
message = ("The `optimizer` argument of `boxcox_normmax` must return "
"an object containing the optimal `lmbda` in attribute `x`.")
raise ValueError(message)
return res
def _normplot(method, x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox or Yeo-Johnson normality plot,
optionally show it.
See `boxcox_normplot` or `yeojohnson_normplot` for details.
"""
if method == 'boxcox':
title = 'Box-Cox Normality Plot'
transform_func = boxcox
else:
title = 'Yeo-Johnson Normality Plot'
transform_func = yeojohnson
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
if method == 'boxcox' and np.any(x <= 0):
raise ValueError("Data must be positive.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the square root of correlation coefficient
# of transformed x
z = transform_func(x, lmbda=val)
_, (_, _, r) = probplot(z, dist='norm', fit=True)
ppcc[i] = r
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\\lambda$',
ylabel='Prob Plot Corr. Coef.',
title=title)
return lmbdas, ppcc
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
return _normplot('boxcox', x, la, lb, plot, N)
def yeojohnson(x, lmbda=None):
r"""Return a dataset transformed by a Yeo-Johnson power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : float, optional
If ``lmbda`` is ``None``, find the lambda that maximizes the
log-likelihood function and return it as the second output argument.
Otherwise the transformation is done for the given value.
Returns
-------
yeojohnson: ndarray
Yeo-Johnson power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
See Also
--------
probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox
Notes
-----
The Yeo-Johnson transform is given by::
y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0
log(x + 1), for x >= 0, lmbda = 0
-((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2
-log(-x + 1), for x < 0, lmbda = 2
Unlike `boxcox`, `yeojohnson` does not require the input data to be
positive.
.. versionadded:: 1.2.0
References
----------
I. Yeo and R.A. Johnson, "A New Family of Power Transformations to
Improve Normality or Symmetry", Biometrika 87.4 (2000):
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `yeojohnson` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, lmbda = stats.yeojohnson(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Yeo-Johnson transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError('Yeo-Johnson transformation is not defined for '
'complex numbers.')
if np.issubdtype(x.dtype, np.integer):
x = x.astype(np.float64, copy=False)
if lmbda is not None:
return _yeojohnson_transform(x, lmbda)
# if lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = yeojohnson_normmax(x)
y = _yeojohnson_transform(x, lmax)
return y, lmax
def _yeojohnson_transform(x, lmbda):
"""Returns `x` transformed by the Yeo-Johnson power transform with given
parameter `lmbda`.
"""
out = np.zeros_like(x)
pos = x >= 0 # binary mask
# when x >= 0
if abs(lmbda) < np.spacing(1.):
out[pos] = np.log1p(x[pos])
else: # lmbda != 0
out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)
else: # lmbda == 2
out[~pos] = -np.log1p(-x[~pos])
return out
def yeojohnson_llf(lmb, data):
r"""The yeojohnson log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Yeo-Johnson transformation. See `yeojohnson` for
details.
data : array_like
Data to calculate Yeo-Johnson log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float
Yeo-Johnson log-likelihood of `data` given `lmb`.
See Also
--------
yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax
Notes
-----
The Yeo-Johnson log-likelihood function is defined here as
.. math::
llf = -N/2 \log(\hat{\sigma}^2) + (\lambda - 1)
\sum_i \text{ sign }(x_i)\log(|x_i| + 1)
where :math:`\hat{\sigma}^2` is estimated variance of the Yeo-Johnson
transformed input data ``x``.
.. versionadded:: 1.2.0
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
Generate some random variates and calculate Yeo-Johnson log-likelihood
values for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.yeojohnson_llf(lmbda, x)
Also find the optimal lmbda value with `yeojohnson`:
>>> x_most_normal, lmbda_optimal = stats.yeojohnson(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Yeo-Johnson log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `yeojohnson` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.yeojohnson(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
n_samples = data.shape[0]
if n_samples == 0:
return np.nan
trans = _yeojohnson_transform(data, lmb)
trans_var = trans.var(axis=0)
loglike = np.empty_like(trans_var)
# Avoid RuntimeWarning raised by np.log when the variance is too low
tiny_variance = trans_var < np.finfo(trans_var.dtype).tiny
loglike[tiny_variance] = np.inf
loglike[~tiny_variance] = (
-n_samples / 2 * np.log(trans_var[~tiny_variance]))
loglike[~tiny_variance] += (
(lmb - 1) * (np.sign(data) * np.log(np.abs(data) + 1)).sum(axis=0))
return loglike
def yeojohnson_normmax(x, brack=(-2, 2)):
"""Compute optimal Yeo-Johnson transform parameter.
Compute optimal Yeo-Johnson transform parameter for input data, using
maximum likelihood estimation.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
Returns
-------
maxlog : float
The optimal transform parameter found.
See Also
--------
yeojohnson, yeojohnson_llf, yeojohnson_normplot
Notes
-----
.. versionadded:: 1.2.0
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some data and determine optimal ``lmbda``
>>> rng = np.random.default_rng()
>>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5
>>> lmax = stats.yeojohnson_normmax(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax, color='r')
>>> plt.show()
"""
def _neg_llf(lmbda, data):
llf = yeojohnson_llf(lmbda, data)
# reject likelihoods that are inf which are likely due to small
# variance in the transformed space
llf[np.isinf(llf)] = -np.inf
return -llf
with np.errstate(invalid='ignore'):
return optimize.brent(_neg_llf, brack=brack, args=(x,))
def yeojohnson_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Yeo-Johnson normality plot, optionally show it.
A Yeo-Johnson normality plot shows graphically what the best
transformation parameter is to use in `yeojohnson` to obtain a
distribution that is close to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to
`yeojohnson` for Yeo-Johnson transformations. These are also the
limits of the horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Yeo-Johnson transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Yeo-Johnson plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.yeojohnson(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
return _normplot('yeojohnson', x, la, lb, plot, N)
ShapiroResult = namedtuple('ShapiroResult', ('statistic', 'pvalue'))
def shapiro(x):
r"""Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
Returns
-------
statistic : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is
accurate, but the p-value may not be.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
:doi:`10.18434/M32189`
.. [2] Shapiro, S. S. & Wilk, M.B, "An analysis of variance test for
normality (complete samples)", Biometrika, 1965, Vol. 52,
pp. 591-611, :doi:`10.2307/2333709`
.. [3] Razali, N. M. & Wah, Y. B., "Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests", Journal
of Statistical Modeling and Analytics, 2011, Vol. 2, pp. 21-33.
.. [4] Royston P., "Remark AS R94: A Remark on Algorithm AS 181: The
W-test for Normality", 1995, Applied Statistics, Vol. 44,
:doi:`10.2307/2986146`
.. [5] Phipson B., and Smyth, G. K., "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly
Drawn", Statistical Applications in Genetics and Molecular Biology,
2010, Vol.9, :doi:`10.2202/1544-6115.1585`
.. [6] Panagiotakos, D. B., "The value of p-value in biomedical
research", The Open Cardiovascular Medicine Journal, 2008, Vol.2,
pp. 97-99, :doi:`10.2174/1874192400802010097`
Examples
--------
Suppose we wish to infer from measurements whether the weights of adult
human males in a medical study are not normally distributed [2]_.
The weights (lbs) are recorded in the array ``x`` below.
>>> import numpy as np
>>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236])
The normality test of [1]_ and [2]_ begins by computing a statistic based
on the relationship between the observations and the expected order
statistics of a normal distribution.
>>> from scipy import stats
>>> res = stats.shapiro(x)
>>> res.statistic
0.7888147830963135
The value of this statistic tends to be high (close to 1) for samples drawn
from a normal distribution.
The test is performed by comparing the observed value of the statistic
against the null distribution: the distribution of statistic values formed
under the null hypothesis that the weights were drawn from a normal
distribution. For this normality test, the null distribution is not easy to
calculate exactly, so it is usually approximated by Monte Carlo methods,
that is, drawing many samples of the same size as ``x`` from a normal
distribution and computing the values of the statistic for each.
>>> def statistic(x):
... # Get only the `shapiro` statistic; ignore its p-value
... return stats.shapiro(x).statistic
>>> ref = stats.monte_carlo_test(x, stats.norm.rvs, statistic,
... alternative='less')
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> bins = np.linspace(0.65, 1, 50)
>>> def plot(ax): # we'll re-use this
... ax.hist(ref.null_distribution, density=True, bins=bins)
... ax.set_title("Shapiro-Wilk Test Null Distribution \n"
... "(Monte Carlo Approximation, 11 Observations)")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
>>> plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution less than or equal to the observed value of the
statistic.
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> annotation = (f'p-value={res.pvalue:.6f}\n(highlighted area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (0.75, 0.1), (0.68, 0.7), arrowprops=props)
>>> i_extreme = np.where(bins <= res.statistic)[0]
>>> for i in i_extreme:
... ax.patches[i].set_color('C1')
>>> plt.xlim(0.65, 0.9)
>>> plt.ylim(0, 4)
>>> plt.show
>>> res.pvalue
0.006703833118081093
If the p-value is "small" - that is, if there is a low probability of
sampling data from a normally distributed population that produces such an
extreme value of the statistic - this may be taken as evidence against
the null hypothesis in favor of the alternative: the weights were not
drawn from a normal distribution. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence *for* the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [5]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
"""
x = np.ravel(x).astype(np.float64)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
a = zeros(N//2, dtype=np.float64)
init = 0
y = sort(x)
y -= x[N//2] # subtract the median (or a nearby value); see gh-15777
w, pw, ifault = swilk(y, a, init)
if ifault not in [0, 2]:
warnings.warn("scipy.stats.shapiro: Input data has range zero. The"
" results may not be accurate.", stacklevel=2)
if N > 5000:
warnings.warn("scipy.stats.shapiro: For N > 5000, computed p-value "
f"may not be accurate. Current N is {N}.",
stacklevel=2)
return ShapiroResult(w, pw)
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of the American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
# From Richard A. Lockhart and Michael A. Stephens "Estimation and Tests of
# Fit for the Three-Parameter Weibull Distribution"
# Journal of the Royal Statistical Society.Series B(Methodological)
# Vol. 56, No. 3 (1994), pp. 491-500, table 1. Keys are c*100
_Avals_weibull = [[0.292, 0.395, 0.467, 0.522, 0.617, 0.711, 0.836, 0.931],
[0.295, 0.399, 0.471, 0.527, 0.623, 0.719, 0.845, 0.941],
[0.298, 0.403, 0.476, 0.534, 0.631, 0.728, 0.856, 0.954],
[0.301, 0.408, 0.483, 0.541, 0.640, 0.738, 0.869, 0.969],
[0.305, 0.414, 0.490, 0.549, 0.650, 0.751, 0.885, 0.986],
[0.309, 0.421, 0.498, 0.559, 0.662, 0.765, 0.902, 1.007],
[0.314, 0.429, 0.508, 0.570, 0.676, 0.782, 0.923, 1.030],
[0.320, 0.438, 0.519, 0.583, 0.692, 0.802, 0.947, 1.057],
[0.327, 0.448, 0.532, 0.598, 0.711, 0.824, 0.974, 1.089],
[0.334, 0.469, 0.547, 0.615, 0.732, 0.850, 1.006, 1.125],
[0.342, 0.472, 0.563, 0.636, 0.757, 0.879, 1.043, 1.167]]
_Avals_weibull = np.array(_Avals_weibull)
_cvals_weibull = np.linspace(0, 0.5, 11)
_get_As_weibull = interpolate.interp1d(_cvals_weibull, _Avals_weibull.T,
kind='linear', bounds_error=False,
fill_value=_Avals_weibull[-1])
def _weibull_fit_check(params, x):
# Refine the fit returned by `weibull_min.fit` to ensure that the first
# order necessary conditions are satisfied. If not, raise an error.
# Here, use `m` for the shape parameter to be consistent with [7]
# and avoid confusion with `c` as defined in [7].
n = len(x)
m, u, s = params
def dnllf_dm(m, u):
# Partial w.r.t. shape w/ optimal scale. See [7] Equation 5.
xu = x-u
return (1/m - (xu**m*np.log(xu)).sum()/(xu**m).sum()
+ np.log(xu).sum()/n)
def dnllf_du(m, u):
# Partial w.r.t. loc w/ optimal scale. See [7] Equation 6.
xu = x-u
return (m-1)/m*(xu**-1).sum() - n*(xu**(m-1)).sum()/(xu**m).sum()
def get_scale(m, u):
# Partial w.r.t. scale solved in terms of shape and location.
# See [7] Equation 7.
return ((x-u)**m/n).sum()**(1/m)
def dnllf(params):
# Partial derivatives of the NLLF w.r.t. parameters, i.e.
# first order necessary conditions for MLE fit.
return [dnllf_dm(*params), dnllf_du(*params)]
suggestion = ("Maximum likelihood estimation is known to be challenging "
"for the three-parameter Weibull distribution. Consider "
"performing a custom goodness-of-fit test using "
"`scipy.stats.monte_carlo_test`.")
if np.allclose(u, np.min(x)) or m < 1:
# The critical values provided by [7] don't seem to control the
# Type I error rate in this case. Error out.
message = ("Maximum likelihood estimation has converged to "
"a solution in which the location is equal to the minimum "
"of the data, the shape parameter is less than 2, or both. "
"The table of critical values in [7] does not "
"include this case. " + suggestion)
raise ValueError(message)
try:
# Refine the MLE / verify that first-order necessary conditions are
# satisfied. If so, the critical values provided in [7] seem reliable.
with np.errstate(over='raise', invalid='raise'):
res = optimize.root(dnllf, params[:-1])
message = ("Solution of MLE first-order conditions failed: "
f"{res.message}. `anderson` cannot continue. " + suggestion)
if not res.success:
raise ValueError(message)
except (FloatingPointError, ValueError) as e:
message = ("An error occurred while fitting the Weibull distribution "
"to the data, so `anderson` cannot continue. " + suggestion)
raise ValueError(message) from e
m, u = res.x
s = get_scale(m, u)
return m, u, s
AndersonResult = _make_tuple_bunch('AndersonResult',
['statistic', 'critical_values',
'significance_level'], ['fit_result'])
def anderson(x, dist='norm'):
"""Anderson-Darling test for data coming from a particular distribution.
The Anderson-Darling test tests the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, weibull_min, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
Array of sample data.
dist : {'norm', 'expon', 'logistic', 'gumbel', 'gumbel_l', 'gumbel_r', 'extreme1', 'weibull_min'}, optional
The type of distribution to test against. The default is 'norm'.
The names 'extreme1', 'gumbel_l' and 'gumbel' are synonyms for the
same distribution.
Returns
-------
result : AndersonResult
An object with the following attributes:
statistic : float
The Anderson-Darling test statistic.
critical_values : list
The critical values for this distribution.
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
fit_result : `~scipy.stats._result_classes.FitResult`
An object containing the results of fitting the distribution to
the data.
See Also
--------
kstest : The Kolmogorov-Smirnov test for goodness-of-fit.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
gumbel_l / gumbel_r
25%, 10%, 5%, 2.5%, 1%
weibull_min
50%, 25%, 15%, 10%, 5%, 2.5%, 1%, 0.5%
If the returned statistic is larger than these critical values then
for the corresponding significance level, the null hypothesis that
the data come from the chosen distribution can be rejected.
The returned statistic is referred to as 'A2' in the references.
For `weibull_min`, maximum likelihood estimation is known to be
challenging. If the test returns successfully, then the first order
conditions for a maximum likehood estimate have been verified and
the critical values correspond relatively well to the significance levels,
provided that the sample is sufficiently large (>10 observations [7]).
However, for some data - especially data with no left tail - `anderson`
is likely to result in an error message. In this case, consider
performing a custom goodness of fit test using
`scipy.stats.monte_carlo_test`.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
.. [7] Richard A. Lockhart and Michael A. Stephens "Estimation and Tests of
Fit for the Three-Parameter Weibull Distribution"
Journal of the Royal Statistical Society.Series B(Methodological)
Vol. 56, No. 3 (1994), pp. 491-500, Table 0.
Examples
--------
Test the null hypothesis that a random sample was drawn from a normal
distribution (with unspecified mean and standard deviation).
>>> import numpy as np
>>> from scipy.stats import anderson
>>> rng = np.random.default_rng()
>>> data = rng.random(size=35)
>>> res = anderson(data)
>>> res.statistic
0.8398018749744764
>>> res.critical_values
array([0.527, 0.6 , 0.719, 0.839, 0.998])
>>> res.significance_level
array([15. , 10. , 5. , 2.5, 1. ])
The value of the statistic (barely) exceeds the critical value associated
with a significance level of 2.5%, so the null hypothesis may be rejected
at a significance level of 2.5%, but not at a significance level of 1%.
""" # noqa
dist = dist.lower()
if dist in {'extreme1', 'gumbel'}:
dist = 'gumbel_l'
dists = {'norm', 'expon', 'gumbel_l',
'gumbel_r', 'logistic', 'weibull_min'}
if dist not in dists:
raise ValueError(f"Invalid distribution; dist must be in {dists}.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
fit_params = xbar, s
logcdf = distributions.norm.logcdf(w)
logsf = distributions.norm.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
fit_params = 0, xbar
logcdf = distributions.expon.logcdf(w)
logsf = distributions.expon.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
fit_params = sol
logcdf = distributions.logistic.logcdf(w)
logsf = distributions.logistic.logsf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
elif dist == 'gumbel_r':
xbar, s = distributions.gumbel_r.fit(x)
w = (y - xbar) / s
fit_params = xbar, s
logcdf = distributions.gumbel_r.logcdf(w)
logsf = distributions.gumbel_r.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
elif dist == 'gumbel_l':
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
fit_params = xbar, s
logcdf = distributions.gumbel_l.logcdf(w)
logsf = distributions.gumbel_l.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
elif dist == 'weibull_min':
message = ("Critical values of the test statistic are given for the "
"asymptotic distribution. These may not be accurate for "
"samples with fewer than 10 observations. Consider using "
"`scipy.stats.monte_carlo_test`.")
if N < 10:
warnings.warn(message, stacklevel=2)
# [7] writes our 'c' as 'm', and they write `c = 1/m`. Use their names.
m, loc, scale = distributions.weibull_min.fit(y)
m, loc, scale = _weibull_fit_check((m, loc, scale), y)
fit_params = m, loc, scale
logcdf = stats.weibull_min(*fit_params).logcdf(y)
logsf = stats.weibull_min(*fit_params).logsf(y)
c = 1 / m # m and c are as used in [7]
sig = array([0.5, 0.75, 0.85, 0.9, 0.95, 0.975, 0.99, 0.995])
critical = _get_As_weibull(c)
# Goodness-of-fit tests should only be used to provide evidence
# _against_ the null hypothesis. Be conservative and round up.
critical = np.round(critical + 0.0005, decimals=3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)
# FitResult initializer expects an optimize result, so let's work with it
message = '`anderson` successfully fit the distribution to the data.'
res = optimize.OptimizeResult(success=True, message=message)
res.x = np.array(fit_params)
fit_result = FitResult(getattr(distributions, dist), y,
discrete=False, res=res)
return AndersonResult(A2, critical, sig, fit_result=fit_result)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = _make_tuple_bunch(
'Anderson_ksampResult',
['statistic', 'critical_values', 'pvalue'], []
)
def anderson_ksamp(samples, midrank=True, *, method=None):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
method : PermutationMethod, optional
Defines the method used to compute the p-value. If `method` is an
instance of `PermutationMethod`, the p-value is computed using
`scipy.stats.permutation_test` with the provided configuration options
and other appropriate settings. Otherwise, the p-value is interpolated
from tabulated values.
Returns
-------
res : Anderson_ksampResult
An object containing attributes:
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%,
0.5%, 0.1%.
pvalue : float
The approximate p-value of the test. If `method` is not
provided, the value is floored / capped at 0.1% / 25%.
Raises
------
ValueError
If fewer than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
The critical values corresponding to the significance levels from 0.01
to 0.25 are taken from [1]_. p-values are floored / capped
at 0.1% / 25%. Since the range of critical values might be extended in
future releases, it is recommended not to test ``p == 0.25``, but rather
``p >= 0.25`` (analogously for the lower bound).
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> res = stats.anderson_ksamp([rng.normal(size=50),
... rng.normal(loc=0.5, size=30)])
>>> res.statistic, res.pvalue
(1.974403288713695, 0.04991293614572478)
>>> res.critical_values
array([0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546])
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
p-value of 4.99%.
>>> samples = [rng.normal(size=50), rng.normal(size=30),
... rng.normal(size=20)]
>>> res = stats.anderson_ksamp(samples)
>>> res.statistic, res.pvalue
(-0.29103725200789504, 0.25)
>>> res.critical_values
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856,
4.07210043, 5.56419101])
The null hypothesis cannot be rejected for three samples from an
identical distribution. The reported p-value (25%) has been capped and
may not be very accurate (since it corresponds to the value 0.449
whereas the statistic is -0.291).
In such cases where the p-value is capped or when sample sizes are
small, a permutation test may be more accurate.
>>> method = stats.PermutationMethod(n_resamples=9999, random_state=rng)
>>> res = stats.anderson_ksamp(samples, method=method)
>>> res.pvalue
0.5254
""" # noqa
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if np.any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN_fun = _anderson_ksamp_midrank
else:
A2kN_fun = _anderson_ksamp_right
A2kN = A2kN_fun(samples, Z, Zstar, k, n, N)
def statistic(*samples):
return A2kN_fun(samples, Z, Zstar, k, n, N)
if method is not None:
res = stats.permutation_test(samples, statistic, **method._asdict(),
alternative='greater')
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154])
critical = b0 + b1 / math.sqrt(m) + b2 / m
sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001])
if A2 < critical.min() and method is None:
p = sig.max()
msg = (f"p-value capped: true value larger than {p}. Consider "
"specifying `method` "
"(e.g. `method=stats.PermutationMethod()`.)")
warnings.warn(msg, stacklevel=2)
elif A2 > critical.max() and method is None:
p = sig.min()
msg = (f"p-value floored: true value smaller than {p}. Consider "
"specifying `method` "
"(e.g. `method=stats.PermutationMethod()`.)")
warnings.warn(msg, stacklevel=2)
elif method is None:
# interpolation of probit of significance level
pf = np.polyfit(critical, log(sig), 2)
p = math.exp(np.polyval(pf, A2))
else:
p = res.pvalue if method is not None else p
# create result object with alias for backward compatibility
res = Anderson_ksampResult(A2, critical, p)
res.significance_level = p
return res
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
class _ABW:
"""Distribution of Ansari-Bradley W-statistic under the null hypothesis."""
# TODO: calculate exact distribution considering ties
# We could avoid summing over more than half the frequencies,
# but inititally it doesn't seem worth the extra complexity
def __init__(self):
"""Minimal initializer."""
self.m = None
self.n = None
self.astart = None
self.total = None
self.freqs = None
def _recalc(self, n, m):
"""When necessary, recalculate exact distribution."""
if n != self.n or m != self.m:
self.n, self.m = n, m
# distribution is NOT symmetric when m + n is odd
# n is len(x), m is len(y), and ratio of scales is defined x/y
astart, a1, _ = gscale(n, m)
self.astart = astart # minimum value of statistic
# Exact distribution of test statistic under null hypothesis
# expressed as frequencies/counts/integers to maintain precision.
# Stored as floats to avoid overflow of sums.
self.freqs = a1.astype(np.float64)
self.total = self.freqs.sum() # could calculate from m and n
# probability mass is self.freqs / self.total;
def pmf(self, k, n, m):
"""Probability mass function."""
self._recalc(n, m)
# The convention here is that PMF at k = 12.5 is the same as at k = 12,
# -> use `floor` in case of ties.
ind = np.floor(k - self.astart).astype(int)
return self.freqs[ind] / self.total
def cdf(self, k, n, m):
"""Cumulative distribution function."""
self._recalc(n, m)
# Null distribution derived without considering ties is
# approximate. Round down to avoid Type I error.
ind = np.ceil(k - self.astart).astype(int)
return self.freqs[:ind+1].sum() / self.total
def sf(self, k, n, m):
"""Survival function."""
self._recalc(n, m)
# Null distribution derived without considering ties is
# approximate. Round down to avoid Type I error.
ind = np.floor(k - self.astart).astype(int)
return self.freqs[ind:].sum() / self.total
# Maintain state for faster repeat calls to ansari w/ method='exact'
_abw_state = _ABW()
def ansari(x, y, alternative='two-sided'):
"""Perform the Ansari-Bradley test for equal scale parameters.
The Ansari-Bradley test ([1]_, [2]_) is a non-parametric test
for the equality of the scale parameter of the distributions
from which two samples were drawn. The null hypothesis states that
the ratio of the scale of the distribution underlying `x` to the scale
of the distribution underlying `y` is 1.
Parameters
----------
x, y : array_like
Arrays of sample data.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the ratio of scales is not equal to 1.
* 'less': the ratio of scales is less than 1.
* 'greater': the ratio of scales is greater than 1.
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The Ansari-Bradley test statistic.
pvalue : float
The p-value of the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Ansari, A. R. and Bradley, R. A. (1960) Rank-sum tests for
dispersions, Annals of Mathematical Statistics, 31, 1174-1189.
.. [2] Sprent, Peter and N.C. Smeeton. Applied nonparametric
statistical methods. 3rd ed. Chapman and Hall/CRC. 2001.
Section 5.8.2.
.. [3] Nathaniel E. Helwig "Nonparametric Dispersion and Equality
Tests" at http://users.stat.umn.edu/~helwig/notes/npde-Notes.pdf
Examples
--------
>>> import numpy as np
>>> from scipy.stats import ansari
>>> rng = np.random.default_rng()
For these examples, we'll create three random data sets. The first
two, with sizes 35 and 25, are drawn from a normal distribution with
mean 0 and standard deviation 2. The third data set has size 25 and
is drawn from a normal distribution with standard deviation 1.25.
>>> x1 = rng.normal(loc=0, scale=2, size=35)
>>> x2 = rng.normal(loc=0, scale=2, size=25)
>>> x3 = rng.normal(loc=0, scale=1.25, size=25)
First we apply `ansari` to `x1` and `x2`. These samples are drawn
from the same distribution, so we expect the Ansari-Bradley test
should not lead us to conclude that the scales of the distributions
are different.
>>> ansari(x1, x2)
AnsariResult(statistic=541.0, pvalue=0.9762532927399098)
With a p-value close to 1, we cannot conclude that there is a
significant difference in the scales (as expected).
Now apply the test to `x1` and `x3`:
>>> ansari(x1, x3)
AnsariResult(statistic=425.0, pvalue=0.0003087020407974518)
The probability of observing such an extreme value of the statistic
under the null hypothesis of equal scales is only 0.03087%. We take this
as evidence against the null hypothesis in favor of the alternative:
the scales of the distributions from which the samples were drawn
are not equal.
We can use the `alternative` parameter to perform a one-tailed test.
In the above example, the scale of `x1` is greater than `x3` and so
the ratio of scales of `x1` and `x3` is greater than 1. This means
that the p-value when ``alternative='greater'`` should be near 0 and
hence we should be able to reject the null hypothesis:
>>> ansari(x1, x3, alternative='greater')
AnsariResult(statistic=425.0, pvalue=0.0001543510203987259)
As we can see, the p-value is indeed quite low. Use of
``alternative='less'`` should thus yield a large p-value:
>>> ansari(x1, x3, alternative='less')
AnsariResult(statistic=425.0, pvalue=0.9998643258449039)
"""
if alternative not in {'two-sided', 'greater', 'less'}:
raise ValueError("'alternative' must be 'two-sided',"
" 'greater', or 'less'.")
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = _stats_py.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
if alternative == 'two-sided':
pval = 2.0 * np.minimum(_abw_state.cdf(AB, n, m),
_abw_state.sf(AB, n, m))
elif alternative == 'greater':
# AB statistic is _smaller_ when ratio of scales is larger,
# so this is the opposite of the usual calculation
pval = _abw_state.cdf(AB, n, m)
else:
pval = _abw_state.sf(AB, n, m)
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
# Small values of AB indicate larger dispersion for the x sample.
# Large values of AB indicate larger dispersion for the y sample.
# This is opposite to the way we define the ratio of scales. see [1]_.
z = (mnAB - AB) / sqrt(varAB)
z, pval = _normtest_finish(z, alternative)
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*samples):
r"""Perform Bartlett's test for equal variances.
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2, ... : array_like
arrays of sample data. Only 1d arrays are accepted, they may have
different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power
([3]_).
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
.. [5] C.I. BLISS (1952), The Statistics of Bioassay: With Special
Reference to the Vitamins, pp 499-503,
:doi:`10.1016/C2013-0-12584-6`.
.. [6] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly
Drawn." Statistical Applications in Genetics and Molecular Biology
9.1 (2010).
.. [7] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are
superior to t and F tests in biomedical research. The American
Statistician, 52(2), 127-132.
Examples
--------
In [5]_, the influence of vitamin C on the tooth growth of guinea pigs
was investigated. In a control study, 60 subjects were divided into
small dose, medium dose, and large dose groups that received
daily doses of 0.5, 1.0 and 2.0 mg of vitamin C, respectively.
After 42 days, the tooth growth was measured.
The ``small_dose``, ``medium_dose``, and ``large_dose`` arrays below record
tooth growth measurements of the three groups in microns.
>>> import numpy as np
>>> small_dose = np.array([
... 4.2, 11.5, 7.3, 5.8, 6.4, 10, 11.2, 11.2, 5.2, 7,
... 15.2, 21.5, 17.6, 9.7, 14.5, 10, 8.2, 9.4, 16.5, 9.7
... ])
>>> medium_dose = np.array([
... 16.5, 16.5, 15.2, 17.3, 22.5, 17.3, 13.6, 14.5, 18.8, 15.5,
... 19.7, 23.3, 23.6, 26.4, 20, 25.2, 25.8, 21.2, 14.5, 27.3
... ])
>>> large_dose = np.array([
... 23.6, 18.5, 33.9, 25.5, 26.4, 32.5, 26.7, 21.5, 23.3, 29.5,
... 25.5, 26.4, 22.4, 24.5, 24.8, 30.9, 26.4, 27.3, 29.4, 23
... ])
The `bartlett` statistic is sensitive to differences in variances
between the samples.
>>> from scipy import stats
>>> res = stats.bartlett(small_dose, medium_dose, large_dose)
>>> res.statistic
0.6654670663030519
The value of the statistic tends to be high when there is a large
difference in variances.
We can test for inequality of variance among the groups by comparing the
observed value of the statistic against the null distribution: the
distribution of statistic values derived under the null hypothesis that
the population variances of the three groups are equal.
For this test, the null distribution follows the chi-square distribution
as shown below.
>>> import matplotlib.pyplot as plt
>>> k = 3 # number of samples
>>> dist = stats.chi2(df=k-1)
>>> val = np.linspace(0, 5, 100)
>>> pdf = dist.pdf(val)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> def plot(ax): # we'll re-use this
... ax.plot(val, pdf, color='C0')
... ax.set_title("Bartlett Test Null Distribution")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
... ax.set_xlim(0, 5)
... ax.set_ylim(0, 1)
>>> plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution greater than or equal to the observed value of the
statistic.
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> pvalue = dist.sf(res.statistic)
>>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (1.5, 0.22), (2.25, 0.3), arrowprops=props)
>>> i = val >= res.statistic
>>> ax.fill_between(val[i], y1=0, y2=pdf[i], color='C0')
>>> plt.show()
>>> res.pvalue
0.71696121509966
If the p-value is "small" - that is, if there is a low probability of
sampling data from distributions with identical variances that produces
such an extreme value of the statistic - this may be taken as evidence
against the null hypothesis in favor of the alternative: the variances of
the groups are not equal. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence for the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [6]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
- Small p-values are not evidence for a *large* effect; rather, they can
only provide evidence for a "significant" effect, meaning that they are
unlikely to have occurred under the null hypothesis.
Note that the chi-square distribution provides the null distribution
when the observations are normally distributed. For small samples
drawn from non-normal populations, it may be more appropriate to
perform a
permutation test: Under the null hypothesis that all three samples were
drawn from the same population, each of the measurements is equally likely
to have been observed in any of the three samples. Therefore, we can form
a randomized null distribution by calculating the statistic under many
randomly-generated partitionings of the observations into the three
samples.
>>> def statistic(*samples):
... return stats.bartlett(*samples).statistic
>>> ref = stats.permutation_test(
... (small_dose, medium_dose, large_dose), statistic,
... permutation_type='independent', alternative='greater'
... )
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> bins = np.linspace(0, 5, 25)
>>> ax.hist(
... ref.null_distribution, bins=bins, density=True, facecolor="C1"
... )
>>> ax.legend(['aymptotic approximation\n(many observations)',
... 'randomized null distribution'])
>>> plot(ax)
>>> plt.show()
>>> ref.pvalue # randomized test p-value
0.5387 # may vary
Note that there is significant disagreement between the p-value calculated
here and the asymptotic approximation returned by `bartlett` above.
The statistical inferences that can be drawn rigorously from a permutation
test are limited; nonetheless, they may be the preferred approach in many
circumstances [7]_.
Following is another generic example where the null hypothesis would be
rejected.
Test whether the lists `a`, `b` and `c` come from populations
with equal variances.
>>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
>>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
>>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
>>> stat, p = stats.bartlett(a, b, c)
>>> p
1.1254782518834628e-05
The very small p-value suggests that the populations do not have equal
variances.
This is not surprising, given that the sample variance of `b` is much
larger than that of `a` and `c`:
>>> [np.var(x, ddof=1) for x in [a, b, c]]
[0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
"""
# Handle empty input and input that is not 1d
for sample in samples:
if np.asanyarray(sample).size == 0:
return BartlettResult(np.nan, np.nan)
if np.asanyarray(sample).ndim > 1:
raise ValueError('Samples must be one-dimensional.')
k = len(samples)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = np.empty(k)
ssq = np.empty(k, 'd')
for j in range(k):
Ni[j] = len(samples[j])
ssq[j] = np.var(samples[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*samples, center='median', proportiontocut=0.05):
r"""Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths. Only one-dimensional
samples are accepted.
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
bartlett : A parametric test for equality of k variances in normal samples
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
The test version using the mean was proposed in the original article
of Levene ([2]_) while the median and trimmed mean have been studied by
Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe
test.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
.. [4] C.I. BLISS (1952), The Statistics of Bioassay: With Special
Reference to the Vitamins, pp 499-503,
:doi:`10.1016/C2013-0-12584-6`.
.. [5] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly
Drawn." Statistical Applications in Genetics and Molecular Biology
9.1 (2010).
.. [6] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are
superior to t and F tests in biomedical research. The American
Statistician, 52(2), 127-132.
Examples
--------
In [4]_, the influence of vitamin C on the tooth growth of guinea pigs
was investigated. In a control study, 60 subjects were divided into
small dose, medium dose, and large dose groups that received
daily doses of 0.5, 1.0 and 2.0 mg of vitamin C, respectively.
After 42 days, the tooth growth was measured.
The ``small_dose``, ``medium_dose``, and ``large_dose`` arrays below record
tooth growth measurements of the three groups in microns.
>>> import numpy as np
>>> small_dose = np.array([
... 4.2, 11.5, 7.3, 5.8, 6.4, 10, 11.2, 11.2, 5.2, 7,
... 15.2, 21.5, 17.6, 9.7, 14.5, 10, 8.2, 9.4, 16.5, 9.7
... ])
>>> medium_dose = np.array([
... 16.5, 16.5, 15.2, 17.3, 22.5, 17.3, 13.6, 14.5, 18.8, 15.5,
... 19.7, 23.3, 23.6, 26.4, 20, 25.2, 25.8, 21.2, 14.5, 27.3
... ])
>>> large_dose = np.array([
... 23.6, 18.5, 33.9, 25.5, 26.4, 32.5, 26.7, 21.5, 23.3, 29.5,
... 25.5, 26.4, 22.4, 24.5, 24.8, 30.9, 26.4, 27.3, 29.4, 23
... ])
The `levene` statistic is sensitive to differences in variances
between the samples.
>>> from scipy import stats
>>> res = stats.levene(small_dose, medium_dose, large_dose)
>>> res.statistic
0.6457341109631506
The value of the statistic tends to be high when there is a large
difference in variances.
We can test for inequality of variance among the groups by comparing the
observed value of the statistic against the null distribution: the
distribution of statistic values derived under the null hypothesis that
the population variances of the three groups are equal.
For this test, the null distribution follows the F distribution as shown
below.
>>> import matplotlib.pyplot as plt
>>> k, n = 3, 60 # number of samples, total number of observations
>>> dist = stats.f(dfn=k-1, dfd=n-k)
>>> val = np.linspace(0, 5, 100)
>>> pdf = dist.pdf(val)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> def plot(ax): # we'll re-use this
... ax.plot(val, pdf, color='C0')
... ax.set_title("Levene Test Null Distribution")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
... ax.set_xlim(0, 5)
... ax.set_ylim(0, 1)
>>> plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution greater than or equal to the observed value of the
statistic.
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> pvalue = dist.sf(res.statistic)
>>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (1.5, 0.22), (2.25, 0.3), arrowprops=props)
>>> i = val >= res.statistic
>>> ax.fill_between(val[i], y1=0, y2=pdf[i], color='C0')
>>> plt.show()
>>> res.pvalue
0.5280694573759905
If the p-value is "small" - that is, if there is a low probability of
sampling data from distributions with identical variances that produces
such an extreme value of the statistic - this may be taken as evidence
against the null hypothesis in favor of the alternative: the variances of
the groups are not equal. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence for the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [5]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
- Small p-values are not evidence for a *large* effect; rather, they can
only provide evidence for a "significant" effect, meaning that they are
unlikely to have occurred under the null hypothesis.
Note that the F distribution provides an asymptotic approximation of the
null distribution.
For small samples, it may be more appropriate to perform a permutation
test: Under the null hypothesis that all three samples were drawn from
the same population, each of the measurements is equally likely to have
been observed in any of the three samples. Therefore, we can form a
randomized null distribution by calculating the statistic under many
randomly-generated partitionings of the observations into the three
samples.
>>> def statistic(*samples):
... return stats.levene(*samples).statistic
>>> ref = stats.permutation_test(
... (small_dose, medium_dose, large_dose), statistic,
... permutation_type='independent', alternative='greater'
... )
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> bins = np.linspace(0, 5, 25)
>>> ax.hist(
... ref.null_distribution, bins=bins, density=True, facecolor="C1"
... )
>>> ax.legend(['aymptotic approximation\n(many observations)',
... 'randomized null distribution'])
>>> plot(ax)
>>> plt.show()
>>> ref.pvalue # randomized test p-value
0.4559 # may vary
Note that there is significant disagreement between the p-value calculated
here and the asymptotic approximation returned by `levene` above.
The statistical inferences that can be drawn rigorously from a permutation
test are limited; nonetheless, they may be the preferred approach in many
circumstances [6]_.
Following is another generic example where the null hypothesis would be
rejected.
Test whether the lists `a`, `b` and `c` come from populations
with equal variances.
>>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
>>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
>>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
>>> stat, p = stats.levene(a, b, c)
>>> p
0.002431505967249681
The small p-value suggests that the populations do not have equal
variances.
This is not surprising, given that the sample variance of `b` is much
larger than that of `a` and `c`:
>>> [np.var(x, ddof=1) for x in [a, b, c]]
[0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
"""
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("center must be 'mean', 'median' or 'trimmed'.")
k = len(samples)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
# check for 1d input
for j in range(k):
if np.asanyarray(samples[j]).ndim > 1:
raise ValueError('Samples must be one-dimensional.')
Ni = np.empty(k)
Yci = np.empty(k, 'd')
if center == 'median':
def func(x):
return np.median(x, axis=0)
elif center == 'mean':
def func(x):
return np.mean(x, axis=0)
else: # center == 'trimmed'
samples = tuple(_stats_py.trimboth(np.sort(sample), proportiontocut)
for sample in samples)
def func(x):
return np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(samples[j])
Yci[j] = func(samples[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(samples[i]) - Yci[i])
# compute Zbari
Zbari = np.empty(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)]
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*samples, center='median', proportiontocut=0.05):
r"""Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power
[3]_.
References
----------
.. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
.. [5] C.I. BLISS (1952), The Statistics of Bioassay: With Special
Reference to the Vitamins, pp 499-503,
:doi:`10.1016/C2013-0-12584-6`.
.. [6] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly
Drawn." Statistical Applications in Genetics and Molecular Biology
9.1 (2010).
.. [7] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are
superior to t and F tests in biomedical research. The American
Statistician, 52(2), 127-132.
Examples
--------
In [5]_, the influence of vitamin C on the tooth growth of guinea pigs
was investigated. In a control study, 60 subjects were divided into
small dose, medium dose, and large dose groups that received
daily doses of 0.5, 1.0 and 2.0 mg of vitamin C, respectively.
After 42 days, the tooth growth was measured.
The ``small_dose``, ``medium_dose``, and ``large_dose`` arrays below record
tooth growth measurements of the three groups in microns.
>>> import numpy as np
>>> small_dose = np.array([
... 4.2, 11.5, 7.3, 5.8, 6.4, 10, 11.2, 11.2, 5.2, 7,
... 15.2, 21.5, 17.6, 9.7, 14.5, 10, 8.2, 9.4, 16.5, 9.7
... ])
>>> medium_dose = np.array([
... 16.5, 16.5, 15.2, 17.3, 22.5, 17.3, 13.6, 14.5, 18.8, 15.5,
... 19.7, 23.3, 23.6, 26.4, 20, 25.2, 25.8, 21.2, 14.5, 27.3
... ])
>>> large_dose = np.array([
... 23.6, 18.5, 33.9, 25.5, 26.4, 32.5, 26.7, 21.5, 23.3, 29.5,
... 25.5, 26.4, 22.4, 24.5, 24.8, 30.9, 26.4, 27.3, 29.4, 23
... ])
The `fligner` statistic is sensitive to differences in variances
between the samples.
>>> from scipy import stats
>>> res = stats.fligner(small_dose, medium_dose, large_dose)
>>> res.statistic
1.3878943408857916
The value of the statistic tends to be high when there is a large
difference in variances.
We can test for inequality of variance among the groups by comparing the
observed value of the statistic against the null distribution: the
distribution of statistic values derived under the null hypothesis that
the population variances of the three groups are equal.
For this test, the null distribution follows the chi-square distribution
as shown below.
>>> import matplotlib.pyplot as plt
>>> k = 3 # number of samples
>>> dist = stats.chi2(df=k-1)
>>> val = np.linspace(0, 8, 100)
>>> pdf = dist.pdf(val)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> def plot(ax): # we'll re-use this
... ax.plot(val, pdf, color='C0')
... ax.set_title("Fligner Test Null Distribution")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
... ax.set_xlim(0, 8)
... ax.set_ylim(0, 0.5)
>>> plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution greater than or equal to the observed value of the
statistic.
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> pvalue = dist.sf(res.statistic)
>>> annotation = (f'p-value={pvalue:.4f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (1.5, 0.22), (2.25, 0.3), arrowprops=props)
>>> i = val >= res.statistic
>>> ax.fill_between(val[i], y1=0, y2=pdf[i], color='C0')
>>> plt.show()
>>> res.pvalue
0.49960016501182125
If the p-value is "small" - that is, if there is a low probability of
sampling data from distributions with identical variances that produces
such an extreme value of the statistic - this may be taken as evidence
against the null hypothesis in favor of the alternative: the variances of
the groups are not equal. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence for the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [6]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
- Small p-values are not evidence for a *large* effect; rather, they can
only provide evidence for a "significant" effect, meaning that they are
unlikely to have occurred under the null hypothesis.
Note that the chi-square distribution provides an asymptotic approximation
of the null distribution.
For small samples, it may be more appropriate to perform a
permutation test: Under the null hypothesis that all three samples were
drawn from the same population, each of the measurements is equally likely
to have been observed in any of the three samples. Therefore, we can form
a randomized null distribution by calculating the statistic under many
randomly-generated partitionings of the observations into the three
samples.
>>> def statistic(*samples):
... return stats.fligner(*samples).statistic
>>> ref = stats.permutation_test(
... (small_dose, medium_dose, large_dose), statistic,
... permutation_type='independent', alternative='greater'
... )
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> bins = np.linspace(0, 8, 25)
>>> ax.hist(
... ref.null_distribution, bins=bins, density=True, facecolor="C1"
... )
>>> ax.legend(['aymptotic approximation\n(many observations)',
... 'randomized null distribution'])
>>> plot(ax)
>>> plt.show()
>>> ref.pvalue # randomized test p-value
0.4332 # may vary
Note that there is significant disagreement between the p-value calculated
here and the asymptotic approximation returned by `fligner` above.
The statistical inferences that can be drawn rigorously from a permutation
test are limited; nonetheless, they may be the preferred approach in many
circumstances [7]_.
Following is another generic example where the null hypothesis would be
rejected.
Test whether the lists `a`, `b` and `c` come from populations
with equal variances.
>>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
>>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
>>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
>>> stat, p = stats.fligner(a, b, c)
>>> p
0.00450826080004775
The small p-value suggests that the populations do not have equal
variances.
This is not surprising, given that the sample variance of `b` is much
larger than that of `a` and `c`:
>>> [np.var(x, ddof=1) for x in [a, b, c]]
[0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
"""
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("center must be 'mean', 'median' or 'trimmed'.")
# Handle empty input
for sample in samples:
if np.asanyarray(sample).size == 0:
return FlignerResult(np.nan, np.nan)
k = len(samples)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center == 'median':
def func(x):
return np.median(x, axis=0)
elif center == 'mean':
def func(x):
return np.mean(x, axis=0)
else: # center == 'trimmed'
samples = tuple(_stats_py.trimboth(sample, proportiontocut)
for sample in samples)
def func(x):
return np.mean(x, axis=0)
Ni = asarray([len(samples[j]) for j in range(k)])
Yci = asarray([func(samples[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(samples[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = _stats_py.rankdata(allZij)
sample = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(sample, g, np.sum) / Ni
anbar = np.mean(sample, axis=0)
varsq = np.var(sample, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
@_axis_nan_policy_factory(lambda x1: (x1,), n_samples=4, n_outputs=1)
def _mood_inner_lc(xy, x, diffs, sorted_xy, n, m, N) -> float:
# Obtain the unique values and their frequencies from the pooled samples.
# "a_j, + b_j, = t_j, for j = 1, ... k" where `k` is the number of unique
# classes, and "[t]he number of values associated with the x's and y's in
# the jth class will be denoted by a_j, and b_j respectively."
# (Mielke, 312)
# Reuse previously computed sorted array and `diff` arrays to obtain the
# unique values and counts. Prepend `diffs` with a non-zero to indicate
# that the first element should be marked as not matching what preceded it.
diffs_prep = np.concatenate(([1], diffs))
# Unique elements are where the was a difference between elements in the
# sorted array
uniques = sorted_xy[diffs_prep != 0]
# The count of each element is the bin size for each set of consecutive
# differences where the difference is zero. Replace nonzero differences
# with 1 and then use the cumulative sum to count the indices.
t = np.bincount(np.cumsum(np.asarray(diffs_prep != 0, dtype=int)))[1:]
k = len(uniques)
js = np.arange(1, k + 1, dtype=int)
# the `b` array mentioned in the paper is not used, outside of the
# calculation of `t`, so we do not need to calculate it separately. Here
# we calculate `a`. In plain language, `a[j]` is the number of values in
# `x` that equal `uniques[j]`.
sorted_xyx = np.sort(np.concatenate((xy, x)))
diffs = np.diff(sorted_xyx)
diffs_prep = np.concatenate(([1], diffs))
diff_is_zero = np.asarray(diffs_prep != 0, dtype=int)
xyx_counts = np.bincount(np.cumsum(diff_is_zero))[1:]
a = xyx_counts - t
# "Define .. a_0 = b_0 = t_0 = S_0 = 0" (Mielke 312) so we shift `a`
# and `t` arrays over 1 to allow a first element of 0 to accommodate this
# indexing.
t = np.concatenate(([0], t))
a = np.concatenate(([0], a))
# S is built from `t`, so it does not need a preceding zero added on.
S = np.cumsum(t)
# define a copy of `S` with a prepending zero for later use to avoid
# the need for indexing.
S_i_m1 = np.concatenate(([0], S[:-1]))
# Psi, as defined by the 6th unnumbered equation on page 313 (Mielke).
# Note that in the paper there is an error where the denominator `2` is
# squared when it should be the entire equation.
def psi(indicator):
return (indicator - (N + 1)/2)**2
# define summation range for use in calculation of phi, as seen in sum
# in the unnumbered equation on the bottom of page 312 (Mielke).
s_lower = S[js - 1] + 1
s_upper = S[js] + 1
phi_J = [np.arange(s_lower[idx], s_upper[idx]) for idx in range(k)]
# for every range in the above array, determine the sum of psi(I) for
# every element in the range. Divide all the sums by `t`. Following the
# last unnumbered equation on page 312.
phis = [np.sum(psi(I_j)) for I_j in phi_J] / t[js]
# `T` is equal to a[j] * phi[j], per the first unnumbered equation on
# page 312. `phis` is already in the order based on `js`, so we index
# into `a` with `js` as well.
T = sum(phis * a[js])
# The approximate statistic
E_0_T = n * (N * N - 1) / 12
varM = (m * n * (N + 1.0) * (N ** 2 - 4) / 180 -
m * n / (180 * N * (N - 1)) * np.sum(
t * (t**2 - 1) * (t**2 - 4 + (15 * (N - S - S_i_m1) ** 2))
))
return ((T - E_0_T) / np.sqrt(varM),)
def mood(x, y, axis=0, alternative="two-sided"):
"""Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the scales of the distributions underlying `x` and `y`
are different.
* 'less': the scale of the distribution underlying `x` is less than
the scale of the distribution underlying `y`.
* 'greater': the scale of the distribution underlying `x` is greater
than the scale of the distribution underlying `y`.
.. versionadded:: 1.7.0
Returns
-------
res : SignificanceResult
An object containing attributes:
statistic : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
pvalue : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
References
----------
[1] Mielke, Paul W. "Note on Some Squared Rank Tests with Existing Ties."
Technometrics, vol. 9, no. 2, 1967, pp. 312-14. JSTOR,
https://doi.org/10.2307/1266427. Accessed 18 May 2022.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x2 = rng.standard_normal((2, 45, 6, 7))
>>> x1 = rng.standard_normal((2, 30, 6, 7))
>>> res = stats.mood(x1, x2, axis=1)
>>> res.pvalue.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (res.pvalue > 0.1).sum()
78
Perform the test with different scales:
>>> x1 = rng.standard_normal((2, 30))
>>> x2 = rng.standard_normal((2, 35)) * 10.0
>>> stats.mood(x1, x2, axis=1)
SignificanceResult(statistic=array([-5.76174136, -6.12650783]),
pvalue=array([8.32505043e-09, 8.98287869e-10]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
if axis < 0:
axis = x.ndim + axis
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
# determine if any of the samples contain ties
sorted_xy = np.sort(xy, axis=axis)
diffs = np.diff(sorted_xy, axis=axis)
if 0 in diffs:
z = np.asarray(_mood_inner_lc(xy, x, diffs, sorted_xy, n, m, N,
axis=axis))
else:
if axis != 0:
xy = np.moveaxis(xy, axis, 0)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument,
# and using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.empty_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = _stats_py.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2) ** 2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
z, pval = _normtest_finish(z, alternative)
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return SignificanceResult(z, pval)
WilcoxonResult = _make_tuple_bunch('WilcoxonResult', ['statistic', 'pvalue'])
def wilcoxon_result_unpacker(res):
if hasattr(res, 'zstatistic'):
return res.statistic, res.pvalue, res.zstatistic
else:
return res.statistic, res.pvalue
def wilcoxon_result_object(statistic, pvalue, zstatistic=None):
res = WilcoxonResult(statistic, pvalue)
if zstatistic is not None:
res.zstatistic = zstatistic
return res
def wilcoxon_outputs(kwds):
method = kwds.get('method', 'auto')
if method == 'approx':
return 3
return 2
@_rename_parameter("mode", "method")
@_axis_nan_policy_factory(
wilcoxon_result_object, paired=True,
n_samples=lambda kwds: 2 if kwds.get('y', None) is not None else 1,
result_to_tuple=wilcoxon_result_unpacker, n_outputs=wilcoxon_outputs,
)
def wilcoxon(x, y=None, zero_method="wilcox", correction=False,
alternative="two-sided", method='auto'):
"""Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences ``x - y`` is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
Either the first set of measurements (in which case ``y`` is the second
set of measurements), or the differences between two sets of
measurements (in which case ``y`` is not to be specified.) Must be
one-dimensional.
y : array_like, optional
Either the second set of measurements (if ``x`` is the first set of
measurements), or not specified (if ``x`` is the differences between
two sets of measurements.) Must be one-dimensional.
.. warning::
When `y` is provided, `wilcoxon` calculates the test statistic
based on the ranks of the absolute values of ``d = x - y``.
Roundoff error in the subtraction can result in elements of ``d``
being assigned different ranks even when they would be tied with
exact arithmetic. Rather than passing `x` and `y` separately,
consider computing the difference ``x - y``, rounding as needed to
ensure that only truly unique elements are numerically distinct,
and passing the result as `x`, leaving `y` at the default (None).
zero_method : {"wilcox", "pratt", "zsplit"}, optional
There are different conventions for handling pairs of observations
with equal values ("zero-differences", or "zeros").
* "wilcox": Discards all zero-differences (default); see [4]_.
* "pratt": Includes zero-differences in the ranking process,
but drops the ranks of the zeros (more conservative); see [3]_.
In this case, the normal approximation is adjusted as in [5]_.
* "zsplit": Includes zero-differences in the ranking process and
splits the zero rank between positive and negative ones.
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic if a normal approximation is used. Default is False.
alternative : {"two-sided", "greater", "less"}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
In the following, let ``d`` represent the difference between the paired
samples: ``d = x - y`` if both ``x`` and ``y`` are provided, or
``d = x`` otherwise.
* 'two-sided': the distribution underlying ``d`` is not symmetric
about zero.
* 'less': the distribution underlying ``d`` is stochastically less
than a distribution symmetric about zero.
* 'greater': the distribution underlying ``d`` is stochastically
greater than a distribution symmetric about zero.
method : {"auto", "exact", "approx"}, optional
Method to calculate the p-value, see Notes. Default is "auto".
Returns
-------
An object with the following attributes.
statistic : array_like
If `alternative` is "two-sided", the sum of the ranks of the
differences above or below zero, whichever is smaller.
Otherwise the sum of the ranks of the differences above zero.
pvalue : array_like
The p-value for the test depending on `alternative` and `method`.
zstatistic : array_like
When ``method = 'approx'``, this is the normalized z-statistic::
z = (T - mn - d) / se
where ``T`` is `statistic` as defined above, ``mn`` is the mean of the
distribution under the null hypothesis, ``d`` is a continuity
correction, and ``se`` is the standard error.
When ``method != 'approx'``, this attribute is not available.
See Also
--------
kruskal, mannwhitneyu
Notes
-----
In the following, let ``d`` represent the difference between the paired
samples: ``d = x - y`` if both ``x`` and ``y`` are provided, or ``d = x``
otherwise. Assume that all elements of ``d`` are independent and
identically distributed observations, and all are distinct and nonzero.
- When ``len(d)`` is sufficiently large, the null distribution of the
normalized test statistic (`zstatistic` above) is approximately normal,
and ``method = 'approx'`` can be used to compute the p-value.
- When ``len(d)`` is small, the normal approximation may not be accurate,
and ``method='exact'`` is preferred (at the cost of additional
execution time).
- The default, ``method='auto'``, selects between the two: when
``len(d) <= 50``, the exact method is used; otherwise, the approximate
method is used.
The presence of "ties" (i.e. not all elements of ``d`` are unique) and
"zeros" (i.e. elements of ``d`` are zero) changes the null distribution
of the test statistic, and ``method='exact'`` no longer calculates
the exact p-value. If ``method='approx'``, the z-statistic is adjusted
for more accurate comparison against the standard normal, but still,
for finite sample sizes, the standard normal is only an approximation of
the true null distribution of the z-statistic. There is no clear
consensus among references on which method most accurately approximates
the p-value for small samples in the presence of zeros and/or ties. In any
case, this is the behavior of `wilcoxon` when ``method='auto':
``method='exact'`` is used when ``len(d) <= 50`` *and there are no zeros*;
otherwise, ``method='approx'`` is used.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
.. [2] Conover, W.J., Practical Nonparametric Statistics, 1971.
.. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed
Rank Procedures, Journal of the American Statistical Association,
Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526`
.. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods,
Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968`
.. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank
Sampling Distribution When Zero Differences are Present,
Journal of the American Statistical Association, Vol. 62, 1967,
pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917`
Examples
--------
In [4]_, the differences in height between cross- and self-fertilized
corn plants is given as follows:
>>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75]
Cross-fertilized plants appear to be higher. To test the null
hypothesis that there is no height difference, we can apply the
two-sided test:
>>> from scipy.stats import wilcoxon
>>> res = wilcoxon(d)
>>> res.statistic, res.pvalue
(24.0, 0.041259765625)
Hence, we would reject the null hypothesis at a confidence level of 5%,
concluding that there is a difference in height between the groups.
To confirm that the median of the differences can be assumed to be
positive, we use:
>>> res = wilcoxon(d, alternative='greater')
>>> res.statistic, res.pvalue
(96.0, 0.0206298828125)
This shows that the null hypothesis that the median is negative can be
rejected at a confidence level of 5% in favor of the alternative that
the median is greater than zero. The p-values above are exact. Using the
normal approximation gives very similar values:
>>> res = wilcoxon(d, method='approx')
>>> res.statistic, res.pvalue
(24.0, 0.04088813291185591)
Note that the statistic changed to 96 in the one-sided case (the sum
of ranks of positive differences) whereas it is 24 in the two-sided
case (the minimum of sum of ranks above and below zero).
In the example above, the differences in height between paired plants are
provided to `wilcoxon` directly. Alternatively, `wilcoxon` accepts two
samples of equal length, calculates the differences between paired
elements, then performs the test. Consider the samples ``x`` and ``y``:
>>> import numpy as np
>>> x = np.array([0.5, 0.825, 0.375, 0.5])
>>> y = np.array([0.525, 0.775, 0.325, 0.55])
>>> res = wilcoxon(x, y, alternative='greater')
>>> res
WilcoxonResult(statistic=5.0, pvalue=0.5625)
Note that had we calculated the differences by hand, the test would have
produced different results:
>>> d = [-0.025, 0.05, 0.05, -0.05]
>>> ref = wilcoxon(d, alternative='greater')
>>> ref
WilcoxonResult(statistic=6.0, pvalue=0.4375)
The substantial difference is due to roundoff error in the results of
``x-y``:
>>> d - (x-y)
array([2.08166817e-17, 6.93889390e-17, 1.38777878e-17, 4.16333634e-17])
Even though we expected all the elements of ``(x-y)[1:]`` to have the same
magnitude ``0.05``, they have slightly different magnitudes in practice,
and therefore are assigned different ranks in the test. Before performing
the test, consider calculating ``d`` and adjusting it as necessary to
ensure that theoretically identically values are not numerically distinct.
For example:
>>> d2 = np.around(x - y, decimals=3)
>>> wilcoxon(d2, alternative='greater')
WilcoxonResult(statistic=6.0, pvalue=0.4375)
"""
mode = method
if mode not in ["auto", "approx", "exact"]:
raise ValueError("mode must be either 'auto', 'approx' or 'exact'")
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method must be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if alternative not in ["two-sided", "less", "greater"]:
raise ValueError("Alternative must be either 'two-sided', "
"'greater' or 'less'")
if y is None:
d = asarray(x)
if d.ndim > 1:
raise ValueError('Sample x must be one-dimensional.')
else:
x, y = map(asarray, (x, y))
if x.ndim > 1 or y.ndim > 1:
raise ValueError('Samples x and y must be one-dimensional.')
if len(x) != len(y):
raise ValueError('The samples x and y must have the same length.')
# Future enhancement: consider warning when elements of `d` appear to
# be tied but are numerically distinct.
d = x - y
if len(d) == 0:
NaN = _get_nan(d)
res = WilcoxonResult(NaN, NaN)
if method == 'approx':
res.zstatistic = NaN
return res
if mode == "auto":
if len(d) <= 50:
mode = "exact"
else:
mode = "approx"
n_zero = np.sum(d == 0)
if n_zero > 0 and mode == "exact":
mode = "approx"
warnings.warn("Exact p-value calculation does not work if there are "
"zeros. Switching to normal approximation.")
if mode == "approx":
if zero_method in ["wilcox", "pratt"]:
if n_zero == len(d):
raise ValueError("zero_method 'wilcox' and 'pratt' do not "
"work if x - y is zero for all elements.")
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d)
count = len(d)
if count < 10 and mode == "approx":
warnings.warn("Sample size too small for normal approximation.")
r = _stats_py.rankdata(abs(d))
r_plus = np.sum((d > 0) * r)
r_minus = np.sum((d < 0) * r)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
# return min for two-sided test, but r_plus for one-sided test
# the literature is not consistent here
# r_plus is more informative since r_plus + r_minus = count*(count+1)/2,
# i.e. the sum of the ranks, so r_minus and the min can be inferred
# (If alternative='pratt', r_plus + r_minus = count*(count+1)/2 - r_zero.)
# [3] uses the r_plus for the one-sided test, keep min for two-sided test
# to keep backwards compatibility
if alternative == "two-sided":
T = min(r_plus, r_minus)
else:
T = r_plus
if mode == "approx":
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
# normal approximation needs to be adjusted, see Cureton (1967)
mn -= n_zero * (n_zero + 1.) * 0.25
se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
# apply continuity correction if applicable
d = 0
if correction:
if alternative == "two-sided":
d = 0.5 * np.sign(T - mn)
elif alternative == "less":
d = -0.5
else:
d = 0.5
# compute statistic and p-value using normal approximation
z = (T - mn - d) / se
if alternative == "two-sided":
prob = 2. * distributions.norm.sf(abs(z))
elif alternative == "greater":
# large T = r_plus indicates x is greater than y; i.e.
# accept alternative in that case and return small p-value (sf)
prob = distributions.norm.sf(z)
else:
prob = distributions.norm.cdf(z)
elif mode == "exact":
# get pmf of the possible positive ranksums r_plus
pmf = _get_wilcoxon_distr(count)
# note: r_plus is int (ties not allowed), need int for slices below
r_plus = int(r_plus)
if alternative == "two-sided":
if r_plus == (len(pmf) - 1) // 2:
# r_plus is the center of the distribution.
prob = 1.0
else:
p_less = np.sum(pmf[:r_plus + 1])
p_greater = np.sum(pmf[r_plus:])
prob = 2*min(p_greater, p_less)
elif alternative == "greater":
prob = np.sum(pmf[r_plus:])
else:
prob = np.sum(pmf[:r_plus + 1])
prob = np.clip(prob, 0, 1)
res = WilcoxonResult(T, prob)
if method == 'approx':
res.zstatistic = z
return res
MedianTestResult = _make_tuple_bunch(
'MedianTestResult',
['statistic', 'pvalue', 'median', 'table'], []
)
def median_test(*samples, ties='below', correction=True, lambda_=1,
nan_policy='propagate'):
"""Perform a Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(samples)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
res : MedianTestResult
An object containing attributes:
statistic : float
The test statistic. The statistic that is returned is determined
by `lambda_`. The default is Pearson's chi-squared statistic.
pvalue : float
The p-value of the test.
median : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table. If ``nan_policy`` is "propagate" and there
are nans in the input, the return value for ``table`` is ``None``.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> res = median_test(g1, g2, g3)
The median is
>>> res.median
34.0
and the contingency table is
>>> res.table
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> res.pvalue
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> res = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> res.pvalue
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> res = median_test(g1, g2, g3, ties="above")
>>> res.pvalue
0.063873276069553273
>>> res.table
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
if len(samples) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '{}'; 'ties' must be one "
"of: {}".format(ties, str(ties_options)[1:-1]))
data = [np.asarray(sample) for sample in samples]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
cdata = np.concatenate(data)
contains_nan, nan_policy = _contains_nan(cdata, nan_policy)
if contains_nan and nan_policy == 'propagate':
return MedianTestResult(np.nan, np.nan, np.nan, None)
if contains_nan:
grand_median = np.median(cdata[~np.isnan(cdata)])
else:
grand_median = np.median(cdata)
# When the minimum version of numpy supported by scipy is 1.9.0,
# the above if/else statement can be replaced by the single line:
# grand_median = np.nanmedian(cdata)
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
sample = sample[~np.isnan(sample)]
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.nonzero((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return MedianTestResult(stat, p, grand_median, table)
def _circfuncs_common(samples, high, low, nan_policy='propagate'):
# Ensure samples are array-like and size is not zero
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.asarray(np.nan), np.asarray(np.nan), None
# Recast samples as radians that range between 0 and 2 pi and calculate
# the sine and cosine
sin_samp = sin((samples - low)*2.*pi / (high - low))
cos_samp = cos((samples - low)*2.*pi / (high - low))
# Apply the NaN policy
contains_nan, nan_policy = _contains_nan(samples, nan_policy)
if contains_nan and nan_policy == 'omit':
mask = np.isnan(samples)
# Set the sines and cosines that are NaN to zero
sin_samp[mask] = 0.0
cos_samp[mask] = 0.0
else:
mask = None
return samples, sin_samp, cos_samp, mask
def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
"""Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for the sample range. Default is ``2*pi``.
low : float or int, optional
Low boundary for the sample range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
circmean : float
Circular mean.
See Also
--------
circstd : Circular standard deviation.
circvar : Circular variance.
Examples
--------
For simplicity, all angles are printed out in degrees.
>>> import numpy as np
>>> from scipy.stats import circmean
>>> import matplotlib.pyplot as plt
>>> angles = np.deg2rad(np.array([20, 30, 330]))
>>> circmean = circmean(angles)
>>> np.rad2deg(circmean)
7.294976657784009
>>> mean = angles.mean()
>>> np.rad2deg(mean)
126.66666666666666
Plot and compare the circular mean against the arithmetic mean.
>>> plt.plot(np.cos(np.linspace(0, 2*np.pi, 500)),
... np.sin(np.linspace(0, 2*np.pi, 500)),
... c='k')
>>> plt.scatter(np.cos(angles), np.sin(angles), c='k')
>>> plt.scatter(np.cos(circmean), np.sin(circmean), c='b',
... label='circmean')
>>> plt.scatter(np.cos(mean), np.sin(mean), c='r', label='mean')
>>> plt.legend()
>>> plt.axis('equal')
>>> plt.show()
"""
samples, sin_samp, cos_samp, nmask = _circfuncs_common(samples, high, low,
nan_policy=nan_policy)
sin_sum = sin_samp.sum(axis=axis)
cos_sum = cos_samp.sum(axis=axis)
res = arctan2(sin_sum, cos_sum)
mask_nan = ~np.isnan(res)
if mask_nan.ndim > 0:
mask = res[mask_nan] < 0
else:
mask = res < 0
if mask.ndim > 0:
mask_nan[mask_nan] = mask
res[mask_nan] += 2*pi
elif mask:
res += 2*pi
# Set output to NaN if no samples went into the mean
if nmask is not None:
if nmask.all():
res = np.full(shape=res.shape, fill_value=np.nan)
else:
# Find out if any of the axis that are being averaged consist
# entirely of NaN. If one exists, set the result (res) to NaN
nshape = 0 if axis is None else axis
smask = nmask.shape[nshape] == nmask.sum(axis=axis)
if smask.any():
res[smask] = np.nan
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
"""Compute the circular variance for samples assumed to be in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for the sample range. Default is ``2*pi``.
low : float or int, optional
Low boundary for the sample range. Default is 0.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
circvar : float
Circular variance.
See Also
--------
circmean : Circular mean.
circstd : Circular standard deviation.
Notes
-----
This uses the following definition of circular variance: ``1-R``, where
``R`` is the mean resultant vector. The
returned value is in the range [0, 1], 0 standing for no variance, and 1
for a large variance. In the limit of small angles, this value is similar
to half the 'linear' variance.
References
----------
.. [1] Fisher, N.I. *Statistical analysis of circular data*. Cambridge
University Press, 1993.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import circvar
>>> import matplotlib.pyplot as plt
>>> samples_1 = np.array([0.072, -0.158, 0.077, 0.108, 0.286,
... 0.133, -0.473, -0.001, -0.348, 0.131])
>>> samples_2 = np.array([0.111, -0.879, 0.078, 0.733, 0.421,
... 0.104, -0.136, -0.867, 0.012, 0.105])
>>> circvar_1 = circvar(samples_1)
>>> circvar_2 = circvar(samples_2)
Plot the samples.
>>> fig, (left, right) = plt.subplots(ncols=2)
>>> for image in (left, right):
... image.plot(np.cos(np.linspace(0, 2*np.pi, 500)),
... np.sin(np.linspace(0, 2*np.pi, 500)),
... c='k')
... image.axis('equal')
... image.axis('off')
>>> left.scatter(np.cos(samples_1), np.sin(samples_1), c='k', s=15)
>>> left.set_title(f"circular variance: {np.round(circvar_1, 2)!r}")
>>> right.scatter(np.cos(samples_2), np.sin(samples_2), c='k', s=15)
>>> right.set_title(f"circular variance: {np.round(circvar_2, 2)!r}")
>>> plt.show()
"""
samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,
nan_policy=nan_policy)
if mask is None:
sin_mean = sin_samp.mean(axis=axis)
cos_mean = cos_samp.mean(axis=axis)
else:
nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))
nsum[nsum == 0] = np.nan
sin_mean = sin_samp.sum(axis=axis) / nsum
cos_mean = cos_samp.sum(axis=axis) / nsum
# hypot can go slightly above 1 due to rounding errors
with np.errstate(invalid='ignore'):
R = np.minimum(1, hypot(sin_mean, cos_mean))
res = 1. - R
return res
def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate', *,
normalize=False):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for the sample range. Default is ``2*pi``.
low : float or int, optional
Low boundary for the sample range. Default is 0.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
normalize : boolean, optional
If True, the returned value is equal to ``sqrt(-2*log(R))`` and does
not depend on the variable units. If False (default), the returned
value is scaled by ``((high-low)/(2*pi))``.
Returns
-------
circstd : float
Circular standard deviation.
See Also
--------
circmean : Circular mean.
circvar : Circular variance.
Notes
-----
This uses a definition of circular standard deviation from [1]_.
Essentially, the calculation is as follows.
.. code-block:: python
import numpy as np
C = np.cos(samples).mean()
S = np.sin(samples).mean()
R = np.sqrt(C**2 + S**2)
l = 2*np.pi / (high-low)
circstd = np.sqrt(-2*np.log(R)) / l
In the limit of small angles, it returns a number close to the 'linear'
standard deviation.
References
----------
.. [1] Mardia, K. V. (1972). 2. In *Statistics of Directional Data*
(pp. 18-24). Academic Press. :doi:`10.1016/C2013-0-07425-7`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import circstd
>>> import matplotlib.pyplot as plt
>>> samples_1 = np.array([0.072, -0.158, 0.077, 0.108, 0.286,
... 0.133, -0.473, -0.001, -0.348, 0.131])
>>> samples_2 = np.array([0.111, -0.879, 0.078, 0.733, 0.421,
... 0.104, -0.136, -0.867, 0.012, 0.105])
>>> circstd_1 = circstd(samples_1)
>>> circstd_2 = circstd(samples_2)
Plot the samples.
>>> fig, (left, right) = plt.subplots(ncols=2)
>>> for image in (left, right):
... image.plot(np.cos(np.linspace(0, 2*np.pi, 500)),
... np.sin(np.linspace(0, 2*np.pi, 500)),
... c='k')
... image.axis('equal')
... image.axis('off')
>>> left.scatter(np.cos(samples_1), np.sin(samples_1), c='k', s=15)
>>> left.set_title(f"circular std: {np.round(circstd_1, 2)!r}")
>>> right.plot(np.cos(np.linspace(0, 2*np.pi, 500)),
... np.sin(np.linspace(0, 2*np.pi, 500)),
... c='k')
>>> right.scatter(np.cos(samples_2), np.sin(samples_2), c='k', s=15)
>>> right.set_title(f"circular std: {np.round(circstd_2, 2)!r}")
>>> plt.show()
"""
samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,
nan_policy=nan_policy)
if mask is None:
sin_mean = sin_samp.mean(axis=axis) # [1] (2.2.3)
cos_mean = cos_samp.mean(axis=axis) # [1] (2.2.3)
else:
nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))
nsum[nsum == 0] = np.nan
sin_mean = sin_samp.sum(axis=axis) / nsum
cos_mean = cos_samp.sum(axis=axis) / nsum
# hypot can go slightly above 1 due to rounding errors
with np.errstate(invalid='ignore'):
R = np.minimum(1, hypot(sin_mean, cos_mean)) # [1] (2.2.4)
res = sqrt(-2*log(R))
if not normalize:
res *= (high-low)/(2.*pi) # [1] (2.3.14) w/ (2.3.7)
return res
class DirectionalStats:
def __init__(self, mean_direction, mean_resultant_length):
self.mean_direction = mean_direction
self.mean_resultant_length = mean_resultant_length
def __repr__(self):
return (f"DirectionalStats(mean_direction={self.mean_direction},"
f" mean_resultant_length={self.mean_resultant_length})")
def directional_stats(samples, *, axis=0, normalize=True):
"""
Computes sample statistics for directional data.
Computes the directional mean (also called the mean direction vector) and
mean resultant length of a sample of vectors.
The directional mean is a measure of "preferred direction" of vector data.
It is analogous to the sample mean, but it is for use when the length of
the data is irrelevant (e.g. unit vectors).
The mean resultant length is a value between 0 and 1 used to quantify the
dispersion of directional data: the smaller the mean resultant length, the
greater the dispersion. Several definitions of directional variance
involving the mean resultant length are given in [1]_ and [2]_.
Parameters
----------
samples : array_like
Input array. Must be at least two-dimensional, and the last axis of the
input must correspond with the dimensionality of the vector space.
When the input is exactly two dimensional, this means that each row
of the data is a vector observation.
axis : int, default: 0
Axis along which the directional mean is computed.
normalize: boolean, default: True
If True, normalize the input to ensure that each observation is a
unit vector. It the observations are already unit vectors, consider
setting this to False to avoid unnecessary computation.
Returns
-------
res : DirectionalStats
An object containing attributes:
mean_direction : ndarray
Directional mean.
mean_resultant_length : ndarray
The mean resultant length [1]_.
See Also
--------
circmean: circular mean; i.e. directional mean for 2D *angles*
circvar: circular variance; i.e. directional variance for 2D *angles*
Notes
-----
This uses a definition of directional mean from [1]_.
Assuming the observations are unit vectors, the calculation is as follows.
.. code-block:: python
mean = samples.mean(axis=0)
mean_resultant_length = np.linalg.norm(mean)
mean_direction = mean / mean_resultant_length
This definition is appropriate for *directional* data (i.e. vector data
for which the magnitude of each observation is irrelevant) but not
for *axial* data (i.e. vector data for which the magnitude and *sign* of
each observation is irrelevant).
Several definitions of directional variance involving the mean resultant
length ``R`` have been proposed, including ``1 - R`` [1]_, ``1 - R**2``
[2]_, and ``2 * (1 - R)`` [2]_. Rather than choosing one, this function
returns ``R`` as attribute `mean_resultant_length` so the user can compute
their preferred measure of dispersion.
References
----------
.. [1] Mardia, Jupp. (2000). *Directional Statistics*
(p. 163). Wiley.
.. [2] https://en.wikipedia.org/wiki/Directional_statistics
Examples
--------
>>> import numpy as np
>>> from scipy.stats import directional_stats
>>> data = np.array([[3, 4], # first observation, 2D vector space
... [6, -8]]) # second observation
>>> dirstats = directional_stats(data)
>>> dirstats.mean_direction
array([1., 0.])
In contrast, the regular sample mean of the vectors would be influenced
by the magnitude of each observation. Furthermore, the result would not be
a unit vector.
>>> data.mean(axis=0)
array([4.5, -2.])
An exemplary use case for `directional_stats` is to find a *meaningful*
center for a set of observations on a sphere, e.g. geographical locations.
>>> data = np.array([[0.8660254, 0.5, 0.],
... [0.8660254, -0.5, 0.]])
>>> dirstats = directional_stats(data)
>>> dirstats.mean_direction
array([1., 0., 0.])
The regular sample mean on the other hand yields a result which does not
lie on the surface of the sphere.
>>> data.mean(axis=0)
array([0.8660254, 0., 0.])
The function also returns the mean resultant length, which
can be used to calculate a directional variance. For example, using the
definition ``Var(z) = 1 - R`` from [2]_ where ``R`` is the
mean resultant length, we can calculate the directional variance of the
vectors in the above example as:
>>> 1 - dirstats.mean_resultant_length
0.13397459716167093
"""
samples = np.asarray(samples)
if samples.ndim < 2:
raise ValueError("samples must at least be two-dimensional. "
f"Instead samples has shape: {samples.shape!r}")
samples = np.moveaxis(samples, axis, 0)
if normalize:
vectornorms = np.linalg.norm(samples, axis=-1, keepdims=True)
samples = samples/vectornorms
mean = np.mean(samples, axis=0)
mean_resultant_length = np.linalg.norm(mean, axis=-1, keepdims=True)
mean_direction = mean / mean_resultant_length
return DirectionalStats(mean_direction,
mean_resultant_length.squeeze(-1)[()])
def false_discovery_control(ps, *, axis=0, method='bh'):
"""Adjust p-values to control the false discovery rate.
The false discovery rate (FDR) is the expected proportion of rejected null
hypotheses that are actually true.
If the null hypothesis is rejected when the *adjusted* p-value falls below
a specified level, the false discovery rate is controlled at that level.
Parameters
----------
ps : 1D array_like
The p-values to adjust. Elements must be real numbers between 0 and 1.
axis : int
The axis along which to perform the adjustment. The adjustment is
performed independently along each axis-slice. If `axis` is None, `ps`
is raveled before performing the adjustment.
method : {'bh', 'by'}
The false discovery rate control procedure to apply: ``'bh'`` is for
Benjamini-Hochberg [1]_ (Eq. 1), ``'by'`` is for Benjaminini-Yekutieli
[2]_ (Theorem 1.3). The latter is more conservative, but it is
guaranteed to control the FDR even when the p-values are not from
independent tests.
Returns
-------
ps_adusted : array_like
The adjusted p-values. If the null hypothesis is rejected where these
fall below a specified level, the false discovery rate is controlled
at that level.
See Also
--------
combine_pvalues
statsmodels.stats.multitest.multipletests
Notes
-----
In multiple hypothesis testing, false discovery control procedures tend to
offer higher power than familywise error rate control procedures (e.g.
Bonferroni correction [1]_).
If the p-values correspond with independent tests (or tests with
"positive regression dependencies" [2]_), rejecting null hypotheses
corresponding with Benjamini-Hochberg-adjusted p-values below :math:`q`
controls the false discovery rate at a level less than or equal to
:math:`q m_0 / m`, where :math:`m_0` is the number of true null hypotheses
and :math:`m` is the total number of null hypotheses tested. The same is
true even for dependent tests when the p-values are adjusted accorded to
the more conservative Benjaminini-Yekutieli procedure.
The adjusted p-values produced by this function are comparable to those
produced by the R function ``p.adjust`` and the statsmodels function
`statsmodels.stats.multitest.multipletests`. Please consider the latter
for more advanced methods of multiple comparison correction.
References
----------
.. [1] Benjamini, Yoav, and Yosef Hochberg. "Controlling the false
discovery rate: a practical and powerful approach to multiple
testing." Journal of the Royal statistical society: series B
(Methodological) 57.1 (1995): 289-300.
.. [2] Benjamini, Yoav, and Daniel Yekutieli. "The control of the false
discovery rate in multiple testing under dependency." Annals of
statistics (2001): 1165-1188.
.. [3] TileStats. FDR - Benjamini-Hochberg explained - Youtube.
https://www.youtube.com/watch?v=rZKa4tW2NKs.
.. [4] Neuhaus, Karl-Ludwig, et al. "Improved thrombolysis in acute
myocardial infarction with front-loaded administration of alteplase:
results of the rt-PA-APSAC patency study (TAPS)." Journal of the
American College of Cardiology 19.5 (1992): 885-891.
Examples
--------
We follow the example from [1]_.
Thrombolysis with recombinant tissue-type plasminogen activator (rt-PA)
and anisoylated plasminogen streptokinase activator (APSAC) in
myocardial infarction has been proved to reduce mortality. [4]_
investigated the effects of a new front-loaded administration of rt-PA
versus those obtained with a standard regimen of APSAC, in a randomized
multicentre trial in 421 patients with acute myocardial infarction.
There were four families of hypotheses tested in the study, the last of
which was "cardiac and other events after the start of thrombolitic
treatment". FDR control may be desired in this family of hypotheses
because it would not be appropriate to conclude that the front-loaded
treatment is better if it is merely equivalent to the previous treatment.
The p-values corresponding with the 15 hypotheses in this family were
>>> ps = [0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344,
... 0.0459, 0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000]
If the chosen significance level is 0.05, we may be tempted to reject the
null hypotheses for the tests corresponding with the first nine p-values,
as the first nine p-values fall below the chosen significance level.
However, this would ignore the problem of "multiplicity": if we fail to
correct for the fact that multiple comparisons are being performed, we
are more likely to incorrectly reject true null hypotheses.
One approach to the multiplicity problem is to control the family-wise
error rate (FWER), that is, the rate at which the null hypothesis is
rejected when it is actually true. A common procedure of this kind is the
Bonferroni correction [1]_. We begin by multiplying the p-values by the
number of hypotheses tested.
>>> import numpy as np
>>> np.array(ps) * len(ps)
array([1.5000e-03, 6.0000e-03, 2.8500e-02, 1.4250e-01, 3.0150e-01,
4.1700e-01, 4.4700e-01, 5.1600e-01, 6.8850e-01, 4.8600e+00,
6.3930e+00, 8.5785e+00, 9.7920e+00, 1.1385e+01, 1.5000e+01])
To control the FWER at 5%, we reject only the hypotheses corresponding
with adjusted p-values less than 0.05. In this case, only the hypotheses
corresponding with the first three p-values can be rejected. According to
[1]_, these three hypotheses concerned "allergic reaction" and "two
different aspects of bleeding."
An alternative approach is to control the false discovery rate: the
expected fraction of rejected null hypotheses that are actually true. The
advantage of this approach is that it typically affords greater power: an
increased rate of rejecting the null hypothesis when it is indeed false. To
control the false discovery rate at 5%, we apply the Benjamini-Hochberg
p-value adjustment.
>>> from scipy import stats
>>> stats.false_discovery_control(ps)
array([0.0015 , 0.003 , 0.0095 , 0.035625 , 0.0603 ,
0.06385714, 0.06385714, 0.0645 , 0.0765 , 0.486 ,
0.58118182, 0.714875 , 0.75323077, 0.81321429, 1. ])
Now, the first *four* adjusted p-values fall below 0.05, so we would reject
the null hypotheses corresponding with these *four* p-values. Rejection
of the fourth null hypothesis was particularly important to the original
study as it led to the conclusion that the new treatment had a
"substantially lower in-hospital mortality rate."
"""
# Input Validation and Special Cases
ps = np.asarray(ps)
ps_in_range = (np.issubdtype(ps.dtype, np.number)
and np.all(ps == np.clip(ps, 0, 1)))
if not ps_in_range:
raise ValueError("`ps` must include only numbers between 0 and 1.")
methods = {'bh', 'by'}
if method.lower() not in methods:
raise ValueError(f"Unrecognized `method` '{method}'."
f"Method must be one of {methods}.")
method = method.lower()
if axis is None:
axis = 0
ps = ps.ravel()
axis = np.asarray(axis)[()]
if not np.issubdtype(axis.dtype, np.integer) or axis.size != 1:
raise ValueError("`axis` must be an integer or `None`")
if ps.size <= 1 or ps.shape[axis] <= 1:
return ps[()]
ps = np.moveaxis(ps, axis, -1)
m = ps.shape[-1]
# Main Algorithm
# Equivalent to the ideas of [1] and [2], except that this adjusts the
# p-values as described in [3]. The results are similar to those produced
# by R's p.adjust.
# "Let [ps] be the ordered observed p-values..."
order = np.argsort(ps, axis=-1)
ps = np.take_along_axis(ps, order, axis=-1) # this copies ps
# Equation 1 of [1] rearranged to reject when p is less than specified q
i = np.arange(1, m+1)
ps *= m / i
# Theorem 1.3 of [2]
if method == 'by':
ps *= np.sum(1 / i)
# accounts for rejecting all null hypotheses i for i < k, where k is
# defined in Eq. 1 of either [1] or [2]. See [3]. Starting with the index j
# of the second to last element, we replace element j with element j+1 if
# the latter is smaller.
np.minimum.accumulate(ps[..., ::-1], out=ps[..., ::-1], axis=-1)
# Restore original order of axes and data
np.put_along_axis(ps, order, values=ps.copy(), axis=-1)
ps = np.moveaxis(ps, -1, axis)
return np.clip(ps, 0, 1)
| 188,458
| 36.669198
| 111
|
py
|
scipy
|
scipy-main/scipy/stats/_covariance.py
|
from functools import cached_property
import numpy as np
from scipy import linalg
from scipy.stats import _multivariate
__all__ = ["Covariance"]
class Covariance:
"""
Representation of a covariance matrix
Calculations involving covariance matrices (e.g. data whitening,
multivariate normal function evaluation) are often performed more
efficiently using a decomposition of the covariance matrix instead of the
covariance metrix itself. This class allows the user to construct an
object representing a covariance matrix using any of several
decompositions and perform calculations using a common interface.
.. note::
The `Covariance` class cannot be instantiated directly. Instead, use
one of the factory methods (e.g. `Covariance.from_diagonal`).
Examples
--------
The `Covariance` class is is used by calling one of its
factory methods to create a `Covariance` object, then pass that
representation of the `Covariance` matrix as a shape parameter of a
multivariate distribution.
For instance, the multivariate normal distribution can accept an array
representing a covariance matrix:
>>> from scipy import stats
>>> import numpy as np
>>> d = [1, 2, 3]
>>> A = np.diag(d) # a diagonal covariance matrix
>>> x = [4, -2, 5] # a point of interest
>>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=A)
>>> dist.pdf(x)
4.9595685102808205e-08
but the calculations are performed in a very generic way that does not
take advantage of any special properties of the covariance matrix. Because
our covariance matrix is diagonal, we can use ``Covariance.from_diagonal``
to create an object representing the covariance matrix, and
`multivariate_normal` can use this to compute the probability density
function more efficiently.
>>> cov = stats.Covariance.from_diagonal(d)
>>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=cov)
>>> dist.pdf(x)
4.9595685102808205e-08
"""
def __init__(self):
message = ("The `Covariance` class cannot be instantiated directly. "
"Please use one of the factory methods "
"(e.g. `Covariance.from_diagonal`).")
raise NotImplementedError(message)
@staticmethod
def from_diagonal(diagonal):
r"""
Return a representation of a covariance matrix from its diagonal.
Parameters
----------
diagonal : array_like
The diagonal elements of a diagonal matrix.
Notes
-----
Let the diagonal elements of a diagonal covariance matrix :math:`D` be
stored in the vector :math:`d`.
When all elements of :math:`d` are strictly positive, whitening of a
data point :math:`x` is performed by computing
:math:`x \cdot d^{-1/2}`, where the inverse square root can be taken
element-wise.
:math:`\log\det{D}` is calculated as :math:`-2 \sum(\log{d})`,
where the :math:`\log` operation is performed element-wise.
This `Covariance` class supports singular covariance matrices. When
computing ``_log_pdet``, non-positive elements of :math:`d` are
ignored. Whitening is not well defined when the point to be whitened
does not lie in the span of the columns of the covariance matrix. The
convention taken here is to treat the inverse square root of
non-positive elements of :math:`d` as zeros.
Examples
--------
Prepare a symmetric positive definite covariance matrix ``A`` and a
data point ``x``.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n = 5
>>> A = np.diag(rng.random(n))
>>> x = rng.random(size=n)
Extract the diagonal from ``A`` and create the `Covariance` object.
>>> d = np.diag(A)
>>> cov = stats.Covariance.from_diagonal(d)
Compare the functionality of the `Covariance` object against a
reference implementations.
>>> res = cov.whiten(x)
>>> ref = np.diag(d**-0.5) @ x
>>> np.allclose(res, ref)
True
>>> res = cov.log_pdet
>>> ref = np.linalg.slogdet(A)[-1]
>>> np.allclose(res, ref)
True
"""
return CovViaDiagonal(diagonal)
@staticmethod
def from_precision(precision, covariance=None):
r"""
Return a representation of a covariance from its precision matrix.
Parameters
----------
precision : array_like
The precision matrix; that is, the inverse of a square, symmetric,
positive definite covariance matrix.
covariance : array_like, optional
The square, symmetric, positive definite covariance matrix. If not
provided, this may need to be calculated (e.g. to evaluate the
cumulative distribution function of
`scipy.stats.multivariate_normal`) by inverting `precision`.
Notes
-----
Let the covariance matrix be :math:`A`, its precision matrix be
:math:`P = A^{-1}`, and :math:`L` be the lower Cholesky factor such
that :math:`L L^T = P`.
Whitening of a data point :math:`x` is performed by computing
:math:`x^T L`. :math:`\log\det{A}` is calculated as
:math:`-2tr(\log{L})`, where the :math:`\log` operation is performed
element-wise.
This `Covariance` class does not support singular covariance matrices
because the precision matrix does not exist for a singular covariance
matrix.
Examples
--------
Prepare a symmetric positive definite precision matrix ``P`` and a
data point ``x``. (If the precision matrix is not already available,
consider the other factory methods of the `Covariance` class.)
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n = 5
>>> P = rng.random(size=(n, n))
>>> P = P @ P.T # a precision matrix must be positive definite
>>> x = rng.random(size=n)
Create the `Covariance` object.
>>> cov = stats.Covariance.from_precision(P)
Compare the functionality of the `Covariance` object against
reference implementations.
>>> res = cov.whiten(x)
>>> ref = x @ np.linalg.cholesky(P)
>>> np.allclose(res, ref)
True
>>> res = cov.log_pdet
>>> ref = -np.linalg.slogdet(P)[-1]
>>> np.allclose(res, ref)
True
"""
return CovViaPrecision(precision, covariance)
@staticmethod
def from_cholesky(cholesky):
r"""
Representation of a covariance provided via the (lower) Cholesky factor
Parameters
----------
cholesky : array_like
The lower triangular Cholesky factor of the covariance matrix.
Notes
-----
Let the covariance matrix be :math:`A`and :math:`L` be the lower
Cholesky factor such that :math:`L L^T = A`.
Whitening of a data point :math:`x` is performed by computing
:math:`L^{-1} x`. :math:`\log\det{A}` is calculated as
:math:`2tr(\log{L})`, where the :math:`\log` operation is performed
element-wise.
This `Covariance` class does not support singular covariance matrices
because the Cholesky decomposition does not exist for a singular
covariance matrix.
Examples
--------
Prepare a symmetric positive definite covariance matrix ``A`` and a
data point ``x``.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n = 5
>>> A = rng.random(size=(n, n))
>>> A = A @ A.T # make the covariance symmetric positive definite
>>> x = rng.random(size=n)
Perform the Cholesky decomposition of ``A`` and create the
`Covariance` object.
>>> L = np.linalg.cholesky(A)
>>> cov = stats.Covariance.from_cholesky(L)
Compare the functionality of the `Covariance` object against
reference implementation.
>>> from scipy.linalg import solve_triangular
>>> res = cov.whiten(x)
>>> ref = solve_triangular(L, x, lower=True)
>>> np.allclose(res, ref)
True
>>> res = cov.log_pdet
>>> ref = np.linalg.slogdet(A)[-1]
>>> np.allclose(res, ref)
True
"""
return CovViaCholesky(cholesky)
@staticmethod
def from_eigendecomposition(eigendecomposition):
r"""
Representation of a covariance provided via eigendecomposition
Parameters
----------
eigendecomposition : sequence
A sequence (nominally a tuple) containing the eigenvalue and
eigenvector arrays as computed by `scipy.linalg.eigh` or
`numpy.linalg.eigh`.
Notes
-----
Let the covariance matrix be :math:`A`, let :math:`V` be matrix of
eigenvectors, and let :math:`W` be the diagonal matrix of eigenvalues
such that `V W V^T = A`.
When all of the eigenvalues are strictly positive, whitening of a
data point :math:`x` is performed by computing
:math:`x^T (V W^{-1/2})`, where the inverse square root can be taken
element-wise.
:math:`\log\det{A}` is calculated as :math:`tr(\log{W})`,
where the :math:`\log` operation is performed element-wise.
This `Covariance` class supports singular covariance matrices. When
computing ``_log_pdet``, non-positive eigenvalues are ignored.
Whitening is not well defined when the point to be whitened
does not lie in the span of the columns of the covariance matrix. The
convention taken here is to treat the inverse square root of
non-positive eigenvalues as zeros.
Examples
--------
Prepare a symmetric positive definite covariance matrix ``A`` and a
data point ``x``.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n = 5
>>> A = rng.random(size=(n, n))
>>> A = A @ A.T # make the covariance symmetric positive definite
>>> x = rng.random(size=n)
Perform the eigendecomposition of ``A`` and create the `Covariance`
object.
>>> w, v = np.linalg.eigh(A)
>>> cov = stats.Covariance.from_eigendecomposition((w, v))
Compare the functionality of the `Covariance` object against
reference implementations.
>>> res = cov.whiten(x)
>>> ref = x @ (v @ np.diag(w**-0.5))
>>> np.allclose(res, ref)
True
>>> res = cov.log_pdet
>>> ref = np.linalg.slogdet(A)[-1]
>>> np.allclose(res, ref)
True
"""
return CovViaEigendecomposition(eigendecomposition)
def whiten(self, x):
"""
Perform a whitening transformation on data.
"Whitening" ("white" as in "white noise", in which each frequency has
equal magnitude) transforms a set of random variables into a new set of
random variables with unit-diagonal covariance. When a whitening
transform is applied to a sample of points distributed according to
a multivariate normal distribution with zero mean, the covariance of
the transformed sample is approximately the identity matrix.
Parameters
----------
x : array_like
An array of points. The last dimension must correspond with the
dimensionality of the space, i.e., the number of columns in the
covariance matrix.
Returns
-------
x_ : array_like
The transformed array of points.
References
----------
.. [1] "Whitening Transformation". Wikipedia.
https://en.wikipedia.org/wiki/Whitening_transformation
.. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
coloring linear transformation". Transactions of VSB 18.2
(2018): 31-35. :doi:`10.31490/tces-2018-0013`
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n = 3
>>> A = rng.random(size=(n, n))
>>> cov_array = A @ A.T # make matrix symmetric positive definite
>>> precision = np.linalg.inv(cov_array)
>>> cov_object = stats.Covariance.from_precision(precision)
>>> x = rng.multivariate_normal(np.zeros(n), cov_array, size=(10000))
>>> x_ = cov_object.whiten(x)
>>> np.cov(x_, rowvar=False) # near-identity covariance
array([[0.97862122, 0.00893147, 0.02430451],
[0.00893147, 0.96719062, 0.02201312],
[0.02430451, 0.02201312, 0.99206881]])
"""
return self._whiten(np.asarray(x))
def colorize(self, x):
"""
Perform a colorizing transformation on data.
"Colorizing" ("color" as in "colored noise", in which different
frequencies may have different magnitudes) transforms a set of
uncorrelated random variables into a new set of random variables with
the desired covariance. When a coloring transform is applied to a
sample of points distributed according to a multivariate normal
distribution with identity covariance and zero mean, the covariance of
the transformed sample is approximately the covariance matrix used
in the coloring transform.
Parameters
----------
x : array_like
An array of points. The last dimension must correspond with the
dimensionality of the space, i.e., the number of columns in the
covariance matrix.
Returns
-------
x_ : array_like
The transformed array of points.
References
----------
.. [1] "Whitening Transformation". Wikipedia.
https://en.wikipedia.org/wiki/Whitening_transformation
.. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
coloring linear transformation". Transactions of VSB 18.2
(2018): 31-35. :doi:`10.31490/tces-2018-0013`
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng(1638083107694713882823079058616272161)
>>> n = 3
>>> A = rng.random(size=(n, n))
>>> cov_array = A @ A.T # make matrix symmetric positive definite
>>> cholesky = np.linalg.cholesky(cov_array)
>>> cov_object = stats.Covariance.from_cholesky(cholesky)
>>> x = rng.multivariate_normal(np.zeros(n), np.eye(n), size=(10000))
>>> x_ = cov_object.colorize(x)
>>> cov_data = np.cov(x_, rowvar=False)
>>> np.allclose(cov_data, cov_array, rtol=3e-2)
True
"""
return self._colorize(np.asarray(x))
@property
def log_pdet(self):
"""
Log of the pseudo-determinant of the covariance matrix
"""
return np.array(self._log_pdet, dtype=float)[()]
@property
def rank(self):
"""
Rank of the covariance matrix
"""
return np.array(self._rank, dtype=int)[()]
@property
def covariance(self):
"""
Explicit representation of the covariance matrix
"""
return self._covariance
@property
def shape(self):
"""
Shape of the covariance array
"""
return self._shape
def _validate_matrix(self, A, name):
A = np.atleast_2d(A)
m, n = A.shape[-2:]
if m != n or A.ndim != 2 or not (np.issubdtype(A.dtype, np.integer) or
np.issubdtype(A.dtype, np.floating)):
message = (f"The input `{name}` must be a square, "
"two-dimensional array of real numbers.")
raise ValueError(message)
return A
def _validate_vector(self, A, name):
A = np.atleast_1d(A)
if A.ndim != 1 or not (np.issubdtype(A.dtype, np.integer) or
np.issubdtype(A.dtype, np.floating)):
message = (f"The input `{name}` must be a one-dimensional array "
"of real numbers.")
raise ValueError(message)
return A
class CovViaPrecision(Covariance):
def __init__(self, precision, covariance=None):
precision = self._validate_matrix(precision, 'precision')
if covariance is not None:
covariance = self._validate_matrix(covariance, 'covariance')
message = "`precision.shape` must equal `covariance.shape`."
if precision.shape != covariance.shape:
raise ValueError(message)
self._chol_P = np.linalg.cholesky(precision)
self._log_pdet = -2*np.log(np.diag(self._chol_P)).sum(axis=-1)
self._rank = precision.shape[-1] # must be full rank if invertible
self._precision = precision
self._cov_matrix = covariance
self._shape = precision.shape
self._allow_singular = False
def _whiten(self, x):
return x @ self._chol_P
@cached_property
def _covariance(self):
n = self._shape[-1]
return (linalg.cho_solve((self._chol_P, True), np.eye(n))
if self._cov_matrix is None else self._cov_matrix)
def _colorize(self, x):
return linalg.solve_triangular(self._chol_P.T, x.T, lower=False).T
def _dot_diag(x, d):
# If d were a full diagonal matrix, x @ d would always do what we want.
# Special treatment is needed for n-dimensional `d` in which each row
# includes only the diagonal elements of a covariance matrix.
return x * d if x.ndim < 2 else x * np.expand_dims(d, -2)
class CovViaDiagonal(Covariance):
def __init__(self, diagonal):
diagonal = self._validate_vector(diagonal, 'diagonal')
i_zero = diagonal <= 0
positive_diagonal = np.array(diagonal, dtype=np.float64)
positive_diagonal[i_zero] = 1 # ones don't affect determinant
self._log_pdet = np.sum(np.log(positive_diagonal), axis=-1)
psuedo_reciprocals = 1 / np.sqrt(positive_diagonal)
psuedo_reciprocals[i_zero] = 0
self._sqrt_diagonal = np.sqrt(diagonal)
self._LP = psuedo_reciprocals
self._rank = positive_diagonal.shape[-1] - i_zero.sum(axis=-1)
self._covariance = np.apply_along_axis(np.diag, -1, diagonal)
self._i_zero = i_zero
self._shape = self._covariance.shape
self._allow_singular = True
def _whiten(self, x):
return _dot_diag(x, self._LP)
def _colorize(self, x):
return _dot_diag(x, self._sqrt_diagonal)
def _support_mask(self, x):
"""
Check whether x lies in the support of the distribution.
"""
return ~np.any(_dot_diag(x, self._i_zero), axis=-1)
class CovViaCholesky(Covariance):
def __init__(self, cholesky):
L = self._validate_matrix(cholesky, 'cholesky')
self._factor = L
self._log_pdet = 2*np.log(np.diag(self._factor)).sum(axis=-1)
self._rank = L.shape[-1] # must be full rank for cholesky
self._shape = L.shape
self._allow_singular = False
@cached_property
def _covariance(self):
return self._factor @ self._factor.T
def _whiten(self, x):
res = linalg.solve_triangular(self._factor, x.T, lower=True).T
return res
def _colorize(self, x):
return x @ self._factor.T
class CovViaEigendecomposition(Covariance):
def __init__(self, eigendecomposition):
eigenvalues, eigenvectors = eigendecomposition
eigenvalues = self._validate_vector(eigenvalues, 'eigenvalues')
eigenvectors = self._validate_matrix(eigenvectors, 'eigenvectors')
message = ("The shapes of `eigenvalues` and `eigenvectors` "
"must be compatible.")
try:
eigenvalues = np.expand_dims(eigenvalues, -2)
eigenvectors, eigenvalues = np.broadcast_arrays(eigenvectors,
eigenvalues)
eigenvalues = eigenvalues[..., 0, :]
except ValueError:
raise ValueError(message)
i_zero = eigenvalues <= 0
positive_eigenvalues = np.array(eigenvalues, dtype=np.float64)
positive_eigenvalues[i_zero] = 1 # ones don't affect determinant
self._log_pdet = np.sum(np.log(positive_eigenvalues), axis=-1)
psuedo_reciprocals = 1 / np.sqrt(positive_eigenvalues)
psuedo_reciprocals[i_zero] = 0
self._LP = eigenvectors * psuedo_reciprocals
self._LA = eigenvectors * np.sqrt(positive_eigenvalues)
self._rank = positive_eigenvalues.shape[-1] - i_zero.sum(axis=-1)
self._w = eigenvalues
self._v = eigenvectors
self._shape = eigenvectors.shape
self._null_basis = eigenvectors * i_zero
# This is only used for `_support_mask`, not to decide whether
# the covariance is singular or not.
self._eps = _multivariate._eigvalsh_to_eps(eigenvalues) * 10**3
self._allow_singular = True
def _whiten(self, x):
return x @ self._LP
def _colorize(self, x):
return x @ self._LA.T
@cached_property
def _covariance(self):
return (self._v * self._w) @ self._v.T
def _support_mask(self, x):
"""
Check whether x lies in the support of the distribution.
"""
residual = np.linalg.norm(x @ self._null_basis, axis=-1)
in_support = residual < self._eps
return in_support
class CovViaPSD(Covariance):
"""
Representation of a covariance provided via an instance of _PSD
"""
def __init__(self, psd):
self._LP = psd.U
self._log_pdet = psd.log_pdet
self._rank = psd.rank
self._covariance = psd._M
self._shape = psd._M.shape
self._psd = psd
self._allow_singular = False # by default
def _whiten(self, x):
return x @ self._LP
def _support_mask(self, x):
return self._psd._support_mask(x)
| 22,535
| 34.545741
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_odds_ratio.py
|
import numpy as np
from scipy.special import ndtri
from scipy.optimize import brentq
from ._discrete_distns import nchypergeom_fisher
from ._common import ConfidenceInterval
def _sample_odds_ratio(table):
"""
Given a table [[a, b], [c, d]], compute a*d/(b*c).
Return nan if the numerator and denominator are 0.
Return inf if just the denominator is 0.
"""
# table must be a 2x2 numpy array.
if table[1, 0] > 0 and table[0, 1] > 0:
oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1])
elif table[0, 0] == 0 or table[1, 1] == 0:
oddsratio = np.nan
else:
oddsratio = np.inf
return oddsratio
def _solve(func):
"""
Solve func(nc) = 0. func must be an increasing function.
"""
# We could just as well call the variable `x` instead of `nc`, but we
# always call this function with functions for which nc (the noncentrality
# parameter) is the variable for which we are solving.
nc = 1.0
value = func(nc)
if value == 0:
return nc
# Multiplicative factor by which to increase or decrease nc when
# searching for a bracketing interval.
factor = 2.0
# Find a bracketing interval.
if value > 0:
nc /= factor
while func(nc) > 0:
nc /= factor
lo = nc
hi = factor*nc
else:
nc *= factor
while func(nc) < 0:
nc *= factor
lo = nc/factor
hi = nc
# lo and hi bracket the solution for nc.
nc = brentq(func, lo, hi, xtol=1e-13)
return nc
def _nc_hypergeom_mean_inverse(x, M, n, N):
"""
For the given noncentral hypergeometric parameters x, M, n,and N
(table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2
contingency table), find the noncentrality parameter of Fisher's
noncentral hypergeometric distribution whose mean is x.
"""
nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x)
return nc
def _hypergeom_params_from_table(table):
# The notation M, n and N is consistent with stats.hypergeom and
# stats.nchypergeom_fisher.
x = table[0, 0]
M = table.sum()
n = table[0].sum()
N = table[:, 0].sum()
return x, M, n, N
def _ci_upper(table, alpha):
"""
Compute the upper end of the confidence interval.
"""
if _sample_odds_ratio(table) == np.inf:
return np.inf
x, M, n, N = _hypergeom_params_from_table(table)
# nchypergeom_fisher.cdf is a decreasing function of nc, so we negate
# it in the lambda expression.
nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha)
return nc
def _ci_lower(table, alpha):
"""
Compute the lower end of the confidence interval.
"""
if _sample_odds_ratio(table) == 0:
return 0
x, M, n, N = _hypergeom_params_from_table(table)
nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha)
return nc
def _conditional_oddsratio(table):
"""
Conditional MLE of the odds ratio for the 2x2 contingency table.
"""
x, M, n, N = _hypergeom_params_from_table(table)
# Get the bounds of the support. The support of the noncentral
# hypergeometric distribution with parameters M, n, and N is the same
# for all values of the noncentrality parameter, so we can use 1 here.
lo, hi = nchypergeom_fisher.support(M, n, N, 1)
# Check if x is at one of the extremes of the support. If so, we know
# the odds ratio is either 0 or inf.
if x == lo:
# x is at the low end of the support.
return 0
if x == hi:
# x is at the high end of the support.
return np.inf
nc = _nc_hypergeom_mean_inverse(x, M, n, N)
return nc
def _conditional_oddsratio_ci(table, confidence_level=0.95,
alternative='two-sided'):
"""
Conditional exact confidence interval for the odds ratio.
"""
if alternative == 'two-sided':
alpha = 0.5*(1 - confidence_level)
lower = _ci_lower(table, alpha)
upper = _ci_upper(table, alpha)
elif alternative == 'less':
lower = 0.0
upper = _ci_upper(table, 1 - confidence_level)
else:
# alternative == 'greater'
lower = _ci_lower(table, 1 - confidence_level)
upper = np.inf
return lower, upper
def _sample_odds_ratio_ci(table, confidence_level=0.95,
alternative='two-sided'):
oddsratio = _sample_odds_ratio(table)
log_or = np.log(oddsratio)
se = np.sqrt((1/table).sum())
if alternative == 'less':
z = ndtri(confidence_level)
loglow = -np.inf
loghigh = log_or + z*se
elif alternative == 'greater':
z = ndtri(confidence_level)
loglow = log_or - z*se
loghigh = np.inf
else:
# alternative is 'two-sided'
z = ndtri(0.5*confidence_level + 0.5)
loglow = log_or - z*se
loghigh = log_or + z*se
return np.exp(loglow), np.exp(loghigh)
class OddsRatioResult:
"""
Result of `scipy.stats.contingency.odds_ratio`. See the
docstring for `odds_ratio` for more details.
Attributes
----------
statistic : float
The computed odds ratio.
* If `kind` is ``'sample'``, this is sample (or unconditional)
estimate, given by
``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
* If `kind` is ``'conditional'``, this is the conditional
maximum likelihood estimate for the odds ratio. It is
the noncentrality parameter of Fisher's noncentral
hypergeometric distribution with the same hypergeometric
parameters as `table` and whose mean is ``table[0, 0]``.
Methods
-------
confidence_interval :
Confidence interval for the odds ratio.
"""
def __init__(self, _table, _kind, statistic):
# for now, no need to make _table and _kind public, since this sort of
# information is returned in very few `scipy.stats` results
self._table = _table
self._kind = _kind
self.statistic = statistic
def __repr__(self):
return f"OddsRatioResult(statistic={self.statistic})"
def confidence_interval(self, confidence_level=0.95,
alternative='two-sided'):
"""
Confidence interval for the odds ratio.
Parameters
----------
confidence_level: float
Desired confidence level for the confidence interval.
The value must be given as a fraction between 0 and 1.
Default is 0.95 (meaning 95%).
alternative : {'two-sided', 'less', 'greater'}, optional
The alternative hypothesis of the hypothesis test to which the
confidence interval corresponds. That is, suppose the null
hypothesis is that the true odds ratio equals ``OR`` and the
confidence interval is ``(low, high)``. Then the following options
for `alternative` are available (default is 'two-sided'):
* 'two-sided': the true odds ratio is not equal to ``OR``. There
is evidence against the null hypothesis at the chosen
`confidence_level` if ``high < OR`` or ``low > OR``.
* 'less': the true odds ratio is less than ``OR``. The ``low`` end
of the confidence interval is 0, and there is evidence against
the null hypothesis at the chosen `confidence_level` if
``high < OR``.
* 'greater': the true odds ratio is greater than ``OR``. The
``high`` end of the confidence interval is ``np.inf``, and there
is evidence against the null hypothesis at the chosen
`confidence_level` if ``low > OR``.
Returns
-------
ci : ``ConfidenceInterval`` instance
The confidence interval, represented as an object with
attributes ``low`` and ``high``.
Notes
-----
When `kind` is ``'conditional'``, the limits of the confidence
interval are the conditional "exact confidence limits" as described
by Fisher [1]_. The conditional odds ratio and confidence interval are
also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_.
When `kind` is ``'sample'``, the confidence interval is computed
under the assumption that the logarithm of the odds ratio is normally
distributed with standard error given by::
se = sqrt(1/a + 1/b + 1/c + 1/d)
where ``a``, ``b``, ``c`` and ``d`` are the elements of the
contingency table. (See, for example, [2]_, section 3.1.3.2,
or [3]_, section 2.3.3).
References
----------
.. [1] R. A. Fisher (1935), The logic of inductive inference,
Journal of the Royal Statistical Society, Vol. 98, No. 1,
pp. 39-82.
.. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
Methods, Techniques, and Applications, CRC Press LLC, Boca
Raton, Florida.
.. [3] Alan Agresti, An Introduction to Categorical Data Analyis
(second edition), Wiley, Hoboken, NJ, USA (2007).
"""
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError("`alternative` must be 'two-sided', 'less' or "
"'greater'.")
if confidence_level < 0 or confidence_level > 1:
raise ValueError('confidence_level must be between 0 and 1')
if self._kind == 'conditional':
ci = self._conditional_odds_ratio_ci(confidence_level, alternative)
else:
ci = self._sample_odds_ratio_ci(confidence_level, alternative)
return ci
def _conditional_odds_ratio_ci(self, confidence_level=0.95,
alternative='two-sided'):
"""
Confidence interval for the conditional odds ratio.
"""
table = self._table
if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1,
# the odds ratio is NaN and the confidence interval is (0, inf).
ci = (0, np.inf)
else:
ci = _conditional_oddsratio_ci(table,
confidence_level=confidence_level,
alternative=alternative)
return ConfidenceInterval(low=ci[0], high=ci[1])
def _sample_odds_ratio_ci(self, confidence_level=0.95,
alternative='two-sided'):
"""
Confidence interval for the sample odds ratio.
"""
if confidence_level < 0 or confidence_level > 1:
raise ValueError('confidence_level must be between 0 and 1')
table = self._table
if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1,
# the odds ratio is NaN and the confidence interval is (0, inf).
ci = (0, np.inf)
else:
ci = _sample_odds_ratio_ci(table,
confidence_level=confidence_level,
alternative=alternative)
return ConfidenceInterval(low=ci[0], high=ci[1])
def odds_ratio(table, *, kind='conditional'):
r"""
Compute the odds ratio for a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements must be non-negative integers.
kind : str, optional
Which kind of odds ratio to compute, either the sample
odds ratio (``kind='sample'``) or the conditional odds ratio
(``kind='conditional'``). Default is ``'conditional'``.
Returns
-------
result : `~scipy.stats._result_classes.OddsRatioResult` instance
The returned object has two computed attributes:
statistic : float
* If `kind` is ``'sample'``, this is sample (or unconditional)
estimate, given by
``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
* If `kind` is ``'conditional'``, this is the conditional
maximum likelihood estimate for the odds ratio. It is
the noncentrality parameter of Fisher's noncentral
hypergeometric distribution with the same hypergeometric
parameters as `table` and whose mean is ``table[0, 0]``.
The object has the method `confidence_interval` that computes
the confidence interval of the odds ratio.
See Also
--------
scipy.stats.fisher_exact
relative_risk
Notes
-----
The conditional odds ratio was discussed by Fisher (see "Example 1"
of [1]_). Texts that cover the odds ratio include [2]_ and [3]_.
.. versionadded:: 1.10.0
References
----------
.. [1] R. A. Fisher (1935), The logic of inductive inference,
Journal of the Royal Statistical Society, Vol. 98, No. 1,
pp. 39-82.
.. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research.
Volume I - The analysis of case-control studies. IARC Sci Publ.
(32):5-338. PMID: 7216345. (See section 4.2.)
.. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
Methods, Techniques, and Applications, CRC Press LLC, Boca
Raton, Florida.
.. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of
Cardiovascular Events in Women and Men: A Sex-Specific
Meta-analysis of Randomized Controlled Trials."
JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006.
Examples
--------
In epidemiology, individuals are classified as "exposed" or
"unexposed" to some factor or treatment. If the occurrence of some
illness is under study, those who have the illness are often
classifed as "cases", and those without it are "noncases". The
counts of the occurrences of these classes gives a contingency
table::
exposed unexposed
cases a b
noncases c d
The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can
be interpreted as the odds of a case occurring in the exposed group,
and ``b/d`` as the odds of a case occurring in the unexposed group.
The sample odds ratio is the ratio of these odds. If the odds ratio
is greater than 1, it suggests that there is a positive association
between being exposed and being a case.
Interchanging the rows or columns of the contingency table inverts
the odds ratio, so it is import to understand the meaning of labels
given to the rows and columns of the table when interpreting the
odds ratio.
In [4]_, the use of aspirin to prevent cardiovascular events in women
and men was investigated. The study notably concluded:
...aspirin therapy reduced the risk of a composite of
cardiovascular events due to its effect on reducing the risk of
ischemic stroke in women [...]
The article lists studies of various cardiovascular events. Let's
focus on the ischemic stoke in women.
The following table summarizes the results of the experiment in which
participants took aspirin or a placebo on a regular basis for several
years. Cases of ischemic stroke were recorded::
Aspirin Control/Placebo
Ischemic stroke 176 230
No stroke 21035 21018
The question we ask is "Is there evidence that the aspirin reduces the
risk of ischemic stroke?"
Compute the odds ratio:
>>> from scipy.stats.contingency import odds_ratio
>>> res = odds_ratio([[176, 230], [21035, 21018]])
>>> res.statistic
0.7646037659999126
For this sample, the odds of getting an ischemic stroke for those who have
been taking aspirin are 0.76 times that of those
who have received the placebo.
To make statistical inferences about the population under study,
we can compute the 95% confidence interval for the odds ratio:
>>> res.confidence_interval(confidence_level=0.95)
ConfidenceInterval(low=0.6241234078749812, high=0.9354102892100372)
The 95% confidence interval for the conditional odds ratio is
approximately (0.62, 0.94).
The fact that the entire 95% confidence interval falls below 1 supports
the authors' conclusion that the aspirin was associated with a
statistically significant reduction in ischemic stroke.
"""
if kind not in ['conditional', 'sample']:
raise ValueError("`kind` must be 'conditional' or 'sample'.")
c = np.asarray(table)
if c.shape != (2, 2):
raise ValueError(f"Invalid shape {c.shape}. The input `table` must be "
"of shape (2, 2).")
if not np.issubdtype(c.dtype, np.integer):
raise ValueError("`table` must be an array of integers, but got "
f"type {c.dtype}")
c = c.astype(np.int64)
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is NaN and
# the odds ratio is NaN.
result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan)
return result
if kind == 'sample':
oddsratio = _sample_odds_ratio(c)
else: # kind is 'conditional'
oddsratio = _conditional_oddsratio(c)
result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio)
return result
| 17,856
| 35.971014
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_censored_data.py
|
import numpy as np
def _validate_1d(a, name, allow_inf=False):
if np.ndim(a) != 1:
raise ValueError(f'`{name}` must be a one-dimensional sequence.')
if np.isnan(a).any():
raise ValueError(f'`{name}` must not contain nan.')
if not allow_inf and np.isinf(a).any():
raise ValueError(f'`{name}` must contain only finite values.')
def _validate_interval(interval):
interval = np.asarray(interval)
if interval.shape == (0,):
# The input was a sequence with length 0.
interval = interval.reshape((0, 2))
if interval.ndim != 2 or interval.shape[-1] != 2:
raise ValueError('`interval` must be a two-dimensional array with '
'shape (m, 2), where m is the number of '
'interval-censored values, but got shape '
f'{interval.shape}')
if np.isnan(interval).any():
raise ValueError('`interval` must not contain nan.')
if np.isinf(interval).all(axis=1).any():
raise ValueError('In each row in `interval`, both values must not'
' be infinite.')
if (interval[:, 0] > interval[:, 1]).any():
raise ValueError('In each row of `interval`, the left value must not'
' exceed the right value.')
uncensored_mask = interval[:, 0] == interval[:, 1]
left_mask = np.isinf(interval[:, 0])
right_mask = np.isinf(interval[:, 1])
interval_mask = np.isfinite(interval).all(axis=1) & ~uncensored_mask
uncensored2 = interval[uncensored_mask, 0]
left2 = interval[left_mask, 1]
right2 = interval[right_mask, 0]
interval2 = interval[interval_mask]
return uncensored2, left2, right2, interval2
def _validate_x_censored(x, censored):
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('`x` must be one-dimensional.')
censored = np.asarray(censored)
if censored.ndim != 1:
raise ValueError('`censored` must be one-dimensional.')
if (~np.isfinite(x)).any():
raise ValueError('`x` must not contain nan or inf.')
if censored.size != x.size:
raise ValueError('`x` and `censored` must have the same length.')
return x, censored.astype(bool)
class CensoredData:
"""
Instances of this class represent censored data.
Instances may be passed to the ``fit`` method of continuous
univariate SciPy distributions for maximum likelihood estimation.
The *only* method of the univariate continuous distributions that
understands `CensoredData` is the ``fit`` method. An instance of
`CensoredData` can not be passed to methods such as ``pdf`` and
``cdf``.
An observation is said to be *censored* when the precise value is unknown,
but it has a known upper and/or lower bound. The conventional terminology
is:
* left-censored: an observation is below a certain value but it is
unknown by how much.
* right-censored: an observation is above a certain value but it is
unknown by how much.
* interval-censored: an observation lies somewhere on an interval between
two values.
Left-, right-, and interval-censored data can be represented by
`CensoredData`.
For convenience, the class methods ``left_censored`` and
``right_censored`` are provided to create a `CensoredData`
instance from a single one-dimensional array of measurements
and a corresponding boolean array to indicate which measurements
are censored. The class method ``interval_censored`` accepts two
one-dimensional arrays that hold the lower and upper bounds of the
intervals.
Parameters
----------
uncensored : array_like, 1D
Uncensored observations.
left : array_like, 1D
Left-censored observations.
right : array_like, 1D
Right-censored observations.
interval : array_like, 2D, with shape (m, 2)
Interval-censored observations. Each row ``interval[k, :]``
represents the interval for the kth interval-censored observation.
Notes
-----
In the input array `interval`, the lower bound of the interval may
be ``-inf``, and the upper bound may be ``inf``, but at least one must be
finite. When the lower bound is ``-inf``, the row represents a left-
censored observation, and when the upper bound is ``inf``, the row
represents a right-censored observation. If the length of an interval
is 0 (i.e. ``interval[k, 0] == interval[k, 1]``, the observation is
treated as uncensored. So one can represent all the types of censored
and uncensored data in ``interval``, but it is generally more convenient
to use `uncensored`, `left` and `right` for uncensored, left-censored and
right-censored observations, respectively.
Examples
--------
In the most general case, a censored data set may contain values that
are left-censored, right-censored, interval-censored, and uncensored.
For example, here we create a data set with five observations. Two
are uncensored (values 1 and 1.5), one is a left-censored observation
of 0, one is a right-censored observation of 10 and one is
interval-censored in the interval [2, 3].
>>> import numpy as np
>>> from scipy.stats import CensoredData
>>> data = CensoredData(uncensored=[1, 1.5], left=[0], right=[10],
... interval=[[2, 3]])
>>> print(data)
CensoredData(5 values: 2 not censored, 1 left-censored,
1 right-censored, 1 interval-censored)
Equivalently,
>>> data = CensoredData(interval=[[1, 1],
... [1.5, 1.5],
... [-np.inf, 0],
... [10, np.inf],
... [2, 3]])
>>> print(data)
CensoredData(5 values: 2 not censored, 1 left-censored,
1 right-censored, 1 interval-censored)
A common case is to have a mix of uncensored observations and censored
observations that are all right-censored (or all left-censored). For
example, consider an experiment in which six devices are started at
various times and left running until they fail. Assume that time is
measured in hours, and the experiment is stopped after 30 hours, even
if all the devices have not failed by that time. We might end up with
data such as this::
Device Start-time Fail-time Time-to-failure
1 0 13 13
2 2 24 22
3 5 22 17
4 8 23 15
5 10 *** >20
6 12 *** >18
Two of the devices had not failed when the experiment was stopped;
the observations of the time-to-failure for these two devices are
right-censored. We can represent this data with
>>> data = CensoredData(uncensored=[13, 22, 17, 15], right=[20, 18])
>>> print(data)
CensoredData(6 values: 4 not censored, 2 right-censored)
Alternatively, we can use the method `CensoredData.right_censored` to
create a representation of this data. The time-to-failure observations
are put the list ``ttf``. The ``censored`` list indicates which values
in ``ttf`` are censored.
>>> ttf = [13, 22, 17, 15, 20, 18]
>>> censored = [False, False, False, False, True, True]
Pass these lists to `CensoredData.right_censored` to create an
instance of `CensoredData`.
>>> data = CensoredData.right_censored(ttf, censored)
>>> print(data)
CensoredData(6 values: 4 not censored, 2 right-censored)
If the input data is interval censored and already stored in two
arrays, one holding the low end of the intervals and another
holding the high ends, the class method ``interval_censored`` can
be used to create the `CensoredData` instance.
This example creates an instance with four interval-censored values.
The intervals are [10, 11], [0.5, 1], [2, 3], and [12.5, 13.5].
>>> a = [10, 0.5, 2, 12.5] # Low ends of the intervals
>>> b = [11, 1.0, 3, 13.5] # High ends of the intervals
>>> data = CensoredData.interval_censored(low=a, high=b)
>>> print(data)
CensoredData(4 values: 0 not censored, 4 interval-censored)
Finally, we create and censor some data from the `weibull_min`
distribution, and then fit `weibull_min` to that data. We'll assume
that the location parameter is known to be 0.
>>> from scipy.stats import weibull_min
>>> rng = np.random.default_rng()
Create the random data set.
>>> x = weibull_min.rvs(2.5, loc=0, scale=30, size=250, random_state=rng)
>>> x[x > 40] = 40 # Right-censor values greater or equal to 40.
Create the `CensoredData` instance with the `right_censored` method.
The censored values are those where the value is 40.
>>> data = CensoredData.right_censored(x, x == 40)
>>> print(data)
CensoredData(250 values: 215 not censored, 35 right-censored)
35 values have been right-censored.
Fit `weibull_min` to the censored data. We expect to shape and scale
to be approximately 2.5 and 30, respectively.
>>> weibull_min.fit(data, floc=0)
(2.3575922823897315, 0, 30.40650074451254)
"""
def __init__(self, uncensored=None, *, left=None, right=None,
interval=None):
if uncensored is None:
uncensored = []
if left is None:
left = []
if right is None:
right = []
if interval is None:
interval = np.empty((0, 2))
_validate_1d(uncensored, 'uncensored')
_validate_1d(left, 'left')
_validate_1d(right, 'right')
uncensored2, left2, right2, interval2 = _validate_interval(interval)
self._uncensored = np.concatenate((uncensored, uncensored2))
self._left = np.concatenate((left, left2))
self._right = np.concatenate((right, right2))
# Note that by construction, the private attribute _interval
# will be a 2D array that contains only finite values representing
# intervals with nonzero but finite length.
self._interval = interval2
def __repr__(self):
uncensored_str = " ".join(np.array_repr(self._uncensored).split())
left_str = " ".join(np.array_repr(self._left).split())
right_str = " ".join(np.array_repr(self._right).split())
interval_str = " ".join(np.array_repr(self._interval).split())
return (f"CensoredData(uncensored={uncensored_str}, left={left_str}, "
f"right={right_str}, interval={interval_str})")
def __str__(self):
num_nc = len(self._uncensored)
num_lc = len(self._left)
num_rc = len(self._right)
num_ic = len(self._interval)
n = num_nc + num_lc + num_rc + num_ic
parts = [f'{num_nc} not censored']
if num_lc > 0:
parts.append(f'{num_lc} left-censored')
if num_rc > 0:
parts.append(f'{num_rc} right-censored')
if num_ic > 0:
parts.append(f'{num_ic} interval-censored')
return f'CensoredData({n} values: ' + ', '.join(parts) + ')'
# This is not a complete implementation of the arithmetic operators.
# All we need is subtracting a scalar and dividing by a scalar.
def __sub__(self, other):
return CensoredData(uncensored=self._uncensored - other,
left=self._left - other,
right=self._right - other,
interval=self._interval - other)
def __truediv__(self, other):
return CensoredData(uncensored=self._uncensored / other,
left=self._left / other,
right=self._right / other,
interval=self._interval / other)
def __len__(self):
"""
The number of values (censored and not censored).
"""
return (len(self._uncensored) + len(self._left) + len(self._right)
+ len(self._interval))
def num_censored(self):
"""
Number of censored values.
"""
return len(self._left) + len(self._right) + len(self._interval)
@classmethod
def right_censored(cls, x, censored):
"""
Create a `CensoredData` instance of right-censored data.
Parameters
----------
x : array_like
`x` is the array of observed data or measurements.
`x` must be a one-dimensional sequence of finite numbers.
censored : array_like of bool
`censored` must be a one-dimensional sequence of boolean
values. If ``censored[k]`` is True, the corresponding value
in `x` is right-censored. That is, the value ``x[k]``
is the lower bound of the true (but unknown) value.
Returns
-------
data : `CensoredData`
An instance of `CensoredData` that represents the
collection of uncensored and right-censored values.
Examples
--------
>>> from scipy.stats import CensoredData
Two uncensored values (4 and 10) and two right-censored values
(24 and 25).
>>> data = CensoredData.right_censored([4, 10, 24, 25],
... [False, False, True, True])
>>> data
CensoredData(uncensored=array([ 4., 10.]),
left=array([], dtype=float64), right=array([24., 25.]),
interval=array([], shape=(0, 2), dtype=float64))
>>> print(data)
CensoredData(4 values: 2 not censored, 2 right-censored)
"""
x, censored = _validate_x_censored(x, censored)
return cls(uncensored=x[~censored], right=x[censored])
@classmethod
def left_censored(cls, x, censored):
"""
Create a `CensoredData` instance of left-censored data.
Parameters
----------
x : array_like
`x` is the array of observed data or measurements.
`x` must be a one-dimensional sequence of finite numbers.
censored : array_like of bool
`censored` must be a one-dimensional sequence of boolean
values. If ``censored[k]`` is True, the corresponding value
in `x` is left-censored. That is, the value ``x[k]``
is the upper bound of the true (but unknown) value.
Returns
-------
data : `CensoredData`
An instance of `CensoredData` that represents the
collection of uncensored and left-censored values.
Examples
--------
>>> from scipy.stats import CensoredData
Two uncensored values (0.12 and 0.033) and two left-censored values
(both 1e-3).
>>> data = CensoredData.left_censored([0.12, 0.033, 1e-3, 1e-3],
... [False, False, True, True])
>>> data
CensoredData(uncensored=array([0.12 , 0.033]),
left=array([0.001, 0.001]), right=array([], dtype=float64),
interval=array([], shape=(0, 2), dtype=float64))
>>> print(data)
CensoredData(4 values: 2 not censored, 2 left-censored)
"""
x, censored = _validate_x_censored(x, censored)
return cls(uncensored=x[~censored], left=x[censored])
@classmethod
def interval_censored(cls, low, high):
"""
Create a `CensoredData` instance of interval-censored data.
This method is useful when all the data is interval-censored, and
the low and high ends of the intervals are already stored in
separate one-dimensional arrays.
Parameters
----------
low : array_like
The one-dimensional array containing the low ends of the
intervals.
high : array_like
The one-dimensional array containing the high ends of the
intervals.
Returns
-------
data : `CensoredData`
An instance of `CensoredData` that represents the
collection of censored values.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import CensoredData
``a`` and ``b`` are the low and high ends of a collection of
interval-censored values.
>>> a = [0.5, 2.0, 3.0, 5.5]
>>> b = [1.0, 2.5, 3.5, 7.0]
>>> data = CensoredData.interval_censored(low=a, high=b)
>>> print(data)
CensoredData(4 values: 0 not censored, 4 interval-censored)
"""
_validate_1d(low, 'low', allow_inf=True)
_validate_1d(high, 'high', allow_inf=True)
if len(low) != len(high):
raise ValueError('`low` and `high` must have the same length.')
interval = np.column_stack((low, high))
uncensored, left, right, interval = _validate_interval(interval)
return cls(uncensored=uncensored, left=left, right=right,
interval=interval)
def _uncensor(self):
"""
This function is used when a non-censored version of the data
is needed to create a rough estimate of the parameters of a
distribution via the method of moments or some similar method.
The data is "uncensored" by taking the given endpoints as the
data for the left- or right-censored data, and the mean for the
interval-censored data.
"""
data = np.concatenate((self._uncensored, self._left, self._right,
self._interval.mean(axis=1)))
return data
def _supported(self, a, b):
"""
Return a subset of self containing the values that are in
(or overlap with) the interval (a, b).
"""
uncensored = self._uncensored
uncensored = uncensored[(a < uncensored) & (uncensored < b)]
left = self._left
left = left[a < left]
right = self._right
right = right[right < b]
interval = self._interval
interval = interval[(a < interval[:, 1]) & (interval[:, 0] < b)]
return CensoredData(uncensored, left=left, right=right,
interval=interval)
| 18,306
| 38.797826
| 78
|
py
|
scipy
|
scipy-main/scipy/stats/setup.py
|
import os
from os.path import join
from numpy.distutils.misc_util import get_info
def pre_build_hook(build_ext, ext):
from scipy._build_utils.compiler_helper import get_cxx_std_flag
std_flag = get_cxx_std_flag(build_ext._cxx_compiler)
if std_flag is not None:
ext.extra_compile_args.append(std_flag)
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from scipy._build_utils.compiler_helper import set_cxx_flags_hook
import numpy as np
config = Configuration('stats', parent_package, top_path)
config.add_data_dir('tests')
# add _stats module
config.add_extension('_stats',
sources=['_stats.c'])
# add _mvn module
config.add_extension('_mvn',
sources=['mvn.pyf', 'mvndst.f'])
# add ansari-bradley and shapiro-wilk module _AB_SW.pyx
config.add_extension('_ansari_swilk_statistics',
sources=['_ansari_swilk_statistics.c'])
# add _sobol module
config.add_extension('_sobol',
sources=['_sobol.c'])
config.add_data_files('_sobol_direction_numbers.npz')
# add _qmc_cy module
ext = config.add_extension('_qmc_cy',
sources=['_qmc_cy.cxx'])
ext._pre_build_hook = set_cxx_flags_hook
if int(os.environ.get('SCIPY_USE_PYTHRAN', 1)):
import pythran
ext = pythran.dist.PythranExtension(
'scipy.stats._stats_pythran',
sources=["scipy/stats/_stats_pythran.py"],
config=['compiler.blas=none'])
config.ext_modules.append(ext)
# add BiasedUrn module
config.add_data_files('_biasedurn.pxd')
biasedurn_libs = ['npyrandom', 'npymath']
biasedurn_libdirs = [join(np.get_include(), '..', '..', 'random', 'lib')]
biasedurn_libdirs += get_info('npymath')['library_dirs']
ext = config.add_extension(
'_biasedurn',
sources=[
'_biasedurn.cxx',
'biasedurn/impls.cpp',
'biasedurn/fnchyppr.cpp',
'biasedurn/wnchyppr.cpp',
'biasedurn/stoc1.cpp',
'biasedurn/stoc3.cpp'],
include_dirs=[np.get_include()],
library_dirs=biasedurn_libdirs,
libraries=biasedurn_libs,
define_macros=[('R_BUILD', None)],
language='c++',
depends=['biasedurn/stocR.h'],
)
ext._pre_build_hook = pre_build_hook
# add unuran submodule
config.add_subpackage('_unuran')
# add boost stats distributions
config.add_subpackage('_boost')
# add levy stable submodule
config.add_subpackage('_levy_stable')
# add rcont submodule
config.add_subpackage('_rcont')
# Type stubs
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 2,936
| 29.278351
| 77
|
py
|
scipy
|
scipy-main/scipy/stats/_mstats_extras.py
|
"""
Additional statistics functions with support for masked arrays.
"""
# Original author (2007): Pierre GF Gerard-Marchant
__all__ = ['compare_medians_ms',
'hdquantiles', 'hdmedian', 'hdquantiles_sd',
'idealfourths',
'median_cihs','mjci','mquantiles_cimj',
'rsh',
'trimmed_mean_ci',]
import numpy as np
from numpy import float_, int_, ndarray
import numpy.ma as ma
from numpy.ma import MaskedArray
from . import _mstats_basic as mstats
from scipy.stats.distributions import norm, beta, t, binom
def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,):
"""
Computes quantile estimates with the Harrell-Davis method.
The quantile estimates are calculated as a weighted linear combination
of order statistics.
Parameters
----------
data : array_like
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
var : bool, optional
Whether to return the variance of the estimate.
Returns
-------
hdquantiles : MaskedArray
A (p,) array of quantiles (if `var` is False), or a (2,p) array of
quantiles and variances (if `var` is True), where ``p`` is the
number of quantiles.
See Also
--------
hdquantiles_sd
"""
def _hd_1D(data,prob,var):
"Computes the HD quantiles for a 1D array. Returns nan for invalid data."
xsorted = np.squeeze(np.sort(data.compressed().view(ndarray)))
# Don't use length here, in case we have a numpy scalar
n = xsorted.size
hd = np.empty((2,len(prob)), float_)
if n < 2:
hd.flat = np.nan
if var:
return hd
return hd[0]
v = np.arange(n+1) / float(n)
betacdf = beta.cdf
for (i,p) in enumerate(prob):
_w = betacdf(v, (n+1)*p, (n+1)*(1-p))
w = _w[1:] - _w[:-1]
hd_mean = np.dot(w, xsorted)
hd[0,i] = hd_mean
#
hd[1,i] = np.dot(w, (xsorted-hd_mean)**2)
#
hd[0, prob == 0] = xsorted[0]
hd[0, prob == 1] = xsorted[-1]
if var:
hd[1, prob == 0] = hd[1, prob == 1] = np.nan
return hd
return hd[0]
# Initialization & checks
data = ma.array(data, copy=False, dtype=float_)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None) or (data.ndim == 1):
result = _hd_1D(data, p, var)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_hd_1D, axis, data, p, var)
return ma.fix_invalid(result, copy=False)
def hdmedian(data, axis=-1, var=False):
"""
Returns the Harrell-Davis estimate of the median along the given axis.
Parameters
----------
data : ndarray
Data array.
axis : int, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
var : bool, optional
Whether to return the variance of the estimate.
Returns
-------
hdmedian : MaskedArray
The median values. If ``var=True``, the variance is returned inside
the masked array. E.g. for a 1-D array the shape change from (1,) to
(2,).
"""
result = hdquantiles(data,[0.5], axis=axis, var=var)
return result.squeeze()
def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None):
"""
The standard error of the Harrell-Davis quantile estimates by jackknife.
Parameters
----------
data : array_like
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
Returns
-------
hdquantiles_sd : MaskedArray
Standard error of the Harrell-Davis quantile estimates.
See Also
--------
hdquantiles
"""
def _hdsd_1D(data, prob):
"Computes the std error for 1D arrays."
xsorted = np.sort(data.compressed())
n = len(xsorted)
hdsd = np.empty(len(prob), float_)
if n < 2:
hdsd.flat = np.nan
vv = np.arange(n) / float(n-1)
betacdf = beta.cdf
for (i,p) in enumerate(prob):
_w = betacdf(vv, n*p, n*(1-p))
w = _w[1:] - _w[:-1]
# cumulative sum of weights and data points if
# ith point is left out for jackknife
mx_ = np.zeros_like(xsorted)
mx_[1:] = np.cumsum(w * xsorted[:-1])
# similar but from the right
mx_[:-1] += np.cumsum(w[::-1] * xsorted[:0:-1])[::-1]
hdsd[i] = np.sqrt(mx_.var() * (n - 1))
return hdsd
# Initialization & checks
data = ma.array(data, copy=False, dtype=float_)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None):
result = _hdsd_1D(data, p)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_hdsd_1D, axis, data, p)
return ma.fix_invalid(result, copy=False).ravel()
def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True),
alpha=0.05, axis=None):
"""
Selected confidence interval of the trimmed mean along the given axis.
Parameters
----------
data : array_like
Input data.
limits : {None, tuple}, optional
None or a two item tuple.
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1. If ``n``
is the number of unmasked data before trimming, then
(``n * limits[0]``)th smallest data and (``n * limits[1]``)th
largest data are masked. The total number of unmasked data after
trimming is ``n * (1. - sum(limits))``.
The value of one limit can be set to None to indicate an open interval.
Defaults to (0.2, 0.2).
inclusive : (2,) tuple of boolean, optional
If relative==False, tuple indicating whether values exactly equal to
the absolute limits are allowed.
If relative==True, tuple indicating whether the number of data being
masked on each side should be rounded (True) or truncated (False).
Defaults to (True, True).
alpha : float, optional
Confidence level of the intervals.
Defaults to 0.05.
axis : int, optional
Axis along which to cut. If None, uses a flattened version of `data`.
Defaults to None.
Returns
-------
trimmed_mean_ci : (2,) ndarray
The lower and upper confidence intervals of the trimmed data.
"""
data = ma.array(data, copy=False)
trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis)
tmean = trimmed.mean(axis)
tstde = mstats.trimmed_stde(data,limits=limits,inclusive=inclusive,axis=axis)
df = trimmed.count(axis) - 1
tppf = t.ppf(1-alpha/2.,df)
return np.array((tmean - tppf*tstde, tmean+tppf*tstde))
def mjci(data, prob=[0.25,0.5,0.75], axis=None):
"""
Returns the Maritz-Jarrett estimators of the standard error of selected
experimental quantiles of the data.
Parameters
----------
data : ndarray
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
"""
def _mjci_1D(data, p):
data = np.sort(data.compressed())
n = data.size
prob = (np.array(p) * n + 0.5).astype(int_)
betacdf = beta.cdf
mj = np.empty(len(prob), float_)
x = np.arange(1,n+1, dtype=float_) / n
y = x - 1./n
for (i,m) in enumerate(prob):
W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m)
C1 = np.dot(W,data)
C2 = np.dot(W,data**2)
mj[i] = np.sqrt(C2 - C1**2)
return mj
data = ma.array(data, copy=False)
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None):
return _mjci_1D(data, p)
else:
return ma.apply_along_axis(_mjci_1D, axis, data, p)
def mquantiles_cimj(data, prob=[0.25,0.50,0.75], alpha=0.05, axis=None):
"""
Computes the alpha confidence interval for the selected quantiles of the
data, with Maritz-Jarrett estimators.
Parameters
----------
data : ndarray
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
alpha : float, optional
Confidence level of the intervals.
axis : int or None, optional
Axis along which to compute the quantiles.
If None, use a flattened array.
Returns
-------
ci_lower : ndarray
The lower boundaries of the confidence interval. Of the same length as
`prob`.
ci_upper : ndarray
The upper boundaries of the confidence interval. Of the same length as
`prob`.
"""
alpha = min(alpha, 1 - alpha)
z = norm.ppf(1 - alpha/2.)
xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis)
smj = mjci(data, prob, axis=axis)
return (xq - z * smj, xq + z * smj)
def median_cihs(data, alpha=0.05, axis=None):
"""
Computes the alpha-level confidence interval for the median of the data.
Uses the Hettmasperger-Sheather method.
Parameters
----------
data : array_like
Input data. Masked values are discarded. The input should be 1D only,
or `axis` should be set to None.
alpha : float, optional
Confidence level of the intervals.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
Returns
-------
median_cihs
Alpha level confidence interval.
"""
def _cihs_1D(data, alpha):
data = np.sort(data.compressed())
n = len(data)
alpha = min(alpha, 1-alpha)
k = int(binom._ppf(alpha/2., n, 0.5))
gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
if gk < 1-alpha:
k -= 1
gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5)
I = (gk - 1 + alpha)/(gk - gkk)
lambd = (n-k) * I / float(k + (n-2*k)*I)
lims = (lambd*data[k] + (1-lambd)*data[k-1],
lambd*data[n-k-1] + (1-lambd)*data[n-k])
return lims
data = ma.array(data, copy=False)
# Computes quantiles along axis (or globally)
if (axis is None):
result = _cihs_1D(data, alpha)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_cihs_1D, axis, data, alpha)
return result
def compare_medians_ms(group_1, group_2, axis=None):
"""
Compares the medians from two independent groups along the given axis.
The comparison is performed using the McKean-Schrader estimate of the
standard error of the medians.
Parameters
----------
group_1 : array_like
First dataset. Has to be of size >=7.
group_2 : array_like
Second dataset. Has to be of size >=7.
axis : int, optional
Axis along which the medians are estimated. If None, the arrays are
flattened. If `axis` is not None, then `group_1` and `group_2`
should have the same shape.
Returns
-------
compare_medians_ms : {float, ndarray}
If `axis` is None, then returns a float, otherwise returns a 1-D
ndarray of floats with a length equal to the length of `group_1`
along `axis`.
Examples
--------
>>> from scipy import stats
>>> a = [1, 2, 3, 4, 5, 6, 7]
>>> b = [8, 9, 10, 11, 12, 13, 14]
>>> stats.mstats.compare_medians_ms(a, b, axis=None)
1.0693225866553746e-05
The function is vectorized to compute along a given axis.
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> x = rng.random(size=(3, 7))
>>> y = rng.random(size=(3, 8))
>>> stats.mstats.compare_medians_ms(x, y, axis=1)
array([0.36908985, 0.36092538, 0.2765313 ])
References
----------
.. [1] McKean, Joseph W., and Ronald M. Schrader. "A comparison of methods
for studentizing the sample median." Communications in
Statistics-Simulation and Computation 13.6 (1984): 751-773.
"""
(med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
(std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
mstats.stde_median(group_2, axis=axis))
W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
return 1 - norm.cdf(W)
def idealfourths(data, axis=None):
"""
Returns an estimate of the lower and upper quartiles.
Uses the ideal fourths algorithm.
Parameters
----------
data : array_like
Input array.
axis : int, optional
Axis along which the quartiles are estimated. If None, the arrays are
flattened.
Returns
-------
idealfourths : {list of floats, masked array}
Returns the two internal values that divide `data` into four parts
using the ideal fourths algorithm either along the flattened array
(if `axis` is None) or along `axis` of `data`.
"""
def _idf(data):
x = data.compressed()
n = len(x)
if n < 3:
return [np.nan,np.nan]
(j,h) = divmod(n/4. + 5/12.,1)
j = int(j)
qlo = (1-h)*x[j-1] + h*x[j]
k = n - j
qup = (1-h)*x[k] + h*x[k-1]
return [qlo, qup]
data = ma.sort(data, axis=axis).view(MaskedArray)
if (axis is None):
return _idf(data)
else:
return ma.apply_along_axis(_idf, axis, data)
def rsh(data, points=None):
"""
Evaluates Rosenblatt's shifted histogram estimators for each data point.
Rosenblatt's estimator is a centered finite-difference approximation to the
derivative of the empirical cumulative distribution function.
Parameters
----------
data : sequence
Input data, should be 1-D. Masked values are ignored.
points : sequence or None, optional
Sequence of points where to evaluate Rosenblatt shifted histogram.
If None, use the data.
"""
data = ma.array(data, copy=False)
if points is None:
points = data
else:
points = np.array(points, copy=False, ndmin=1)
if data.ndim != 1:
raise AttributeError("The input array should be 1D only !")
n = data.count()
r = idealfourths(data, axis=None)
h = 1.2 * (r[-1]-r[0]) / n**(1./5)
nhi = (data[:,None] <= points[None,:] + h).sum(0)
nlo = (data[:,None] < points[None,:] - h).sum(0)
return (nhi-nlo) / (2.*n*h)
| 15,610
| 30.159681
| 81
|
py
|
scipy
|
scipy-main/scipy/stats/_page_trend_test.py
|
from itertools import permutations
import numpy as np
import math
from ._continuous_distns import norm
import scipy.stats
from dataclasses import dataclass
@dataclass
class PageTrendTestResult:
statistic: float
pvalue: float
method: str
def page_trend_test(data, ranked=False, predicted_ranks=None, method='auto'):
r"""
Perform Page's Test, a measure of trend in observations between treatments.
Page's Test (also known as Page's :math:`L` test) is useful when:
* there are :math:`n \geq 3` treatments,
* :math:`m \geq 2` subjects are observed for each treatment, and
* the observations are hypothesized to have a particular order.
Specifically, the test considers the null hypothesis that
.. math::
m_1 = m_2 = m_3 \cdots = m_n,
where :math:`m_j` is the mean of the observed quantity under treatment
:math:`j`, against the alternative hypothesis that
.. math::
m_1 \leq m_2 \leq m_3 \leq \cdots \leq m_n,
where at least one inequality is strict.
As noted by [4]_, Page's :math:`L` test has greater statistical power than
the Friedman test against the alternative that there is a difference in
trend, as Friedman's test only considers a difference in the means of the
observations without considering their order. Whereas Spearman :math:`\rho`
considers the correlation between the ranked observations of two variables
(e.g. the airspeed velocity of a swallow vs. the weight of the coconut it
carries), Page's :math:`L` is concerned with a trend in an observation
(e.g. the airspeed velocity of a swallow) across several distinct
treatments (e.g. carrying each of five coconuts of different weight) even
as the observation is repeated with multiple subjects (e.g. one European
swallow and one African swallow).
Parameters
----------
data : array-like
A :math:`m \times n` array; the element in row :math:`i` and
column :math:`j` is the observation corresponding with subject
:math:`i` and treatment :math:`j`. By default, the columns are
assumed to be arranged in order of increasing predicted mean.
ranked : boolean, optional
By default, `data` is assumed to be observations rather than ranks;
it will be ranked with `scipy.stats.rankdata` along ``axis=1``. If
`data` is provided in the form of ranks, pass argument ``True``.
predicted_ranks : array-like, optional
The predicted ranks of the column means. If not specified,
the columns are assumed to be arranged in order of increasing
predicted mean, so the default `predicted_ranks` are
:math:`[1, 2, \dots, n-1, n]`.
method : {'auto', 'asymptotic', 'exact'}, optional
Selects the method used to calculate the *p*-value. The following
options are available.
* 'auto': selects between 'exact' and 'asymptotic' to
achieve reasonably accurate results in reasonable time (default)
* 'asymptotic': compares the standardized test statistic against
the normal distribution
* 'exact': computes the exact *p*-value by comparing the observed
:math:`L` statistic against those realized by all possible
permutations of ranks (under the null hypothesis that each
permutation is equally likely)
Returns
-------
res : PageTrendTestResult
An object containing attributes:
statistic : float
Page's :math:`L` test statistic.
pvalue : float
The associated *p*-value
method : {'asymptotic', 'exact'}
The method used to compute the *p*-value
See Also
--------
rankdata, friedmanchisquare, spearmanr
Notes
-----
As noted in [1]_, "the :math:`n` 'treatments' could just as well represent
:math:`n` objects or events or performances or persons or trials ranked."
Similarly, the :math:`m` 'subjects' could equally stand for :math:`m`
"groupings by ability or some other control variable, or judges doing
the ranking, or random replications of some other sort."
The procedure for calculating the :math:`L` statistic, adapted from
[1]_, is:
1. "Predetermine with careful logic the appropriate hypotheses
concerning the predicted ording of the experimental results.
If no reasonable basis for ordering any treatments is known, the
:math:`L` test is not appropriate."
2. "As in other experiments, determine at what level of confidence
you will reject the null hypothesis that there is no agreement of
experimental results with the monotonic hypothesis."
3. "Cast the experimental material into a two-way table of :math:`n`
columns (treatments, objects ranked, conditions) and :math:`m`
rows (subjects, replication groups, levels of control variables)."
4. "When experimental observations are recorded, rank them across each
row", e.g. ``ranks = scipy.stats.rankdata(data, axis=1)``.
5. "Add the ranks in each column", e.g.
``colsums = np.sum(ranks, axis=0)``.
6. "Multiply each sum of ranks by the predicted rank for that same
column", e.g. ``products = predicted_ranks * colsums``.
7. "Sum all such products", e.g. ``L = products.sum()``.
[1]_ continues by suggesting use of the standardized statistic
.. math::
\chi_L^2 = \frac{\left[12L-3mn(n+1)^2\right]^2}{mn^2(n^2-1)(n+1)}
"which is distributed approximately as chi-square with 1 degree of
freedom. The ordinary use of :math:`\chi^2` tables would be
equivalent to a two-sided test of agreement. If a one-sided test
is desired, *as will almost always be the case*, the probability
discovered in the chi-square table should be *halved*."
However, this standardized statistic does not distinguish between the
observed values being well correlated with the predicted ranks and being
_anti_-correlated with the predicted ranks. Instead, we follow [2]_
and calculate the standardized statistic
.. math::
\Lambda = \frac{L - E_0}{\sqrt{V_0}},
where :math:`E_0 = \frac{1}{4} mn(n+1)^2` and
:math:`V_0 = \frac{1}{144} mn^2(n+1)(n^2-1)`, "which is asymptotically
normal under the null hypothesis".
The *p*-value for ``method='exact'`` is generated by comparing the observed
value of :math:`L` against the :math:`L` values generated for all
:math:`(n!)^m` possible permutations of ranks. The calculation is performed
using the recursive method of [5].
The *p*-values are not adjusted for the possibility of ties. When
ties are present, the reported ``'exact'`` *p*-values may be somewhat
larger (i.e. more conservative) than the true *p*-value [2]_. The
``'asymptotic'``` *p*-values, however, tend to be smaller (i.e. less
conservative) than the ``'exact'`` *p*-values.
References
----------
.. [1] Ellis Batten Page, "Ordered hypotheses for multiple treatments:
a significant test for linear ranks", *Journal of the American
Statistical Association* 58(301), p. 216--230, 1963.
.. [2] Markus Neuhauser, *Nonparametric Statistical Test: A computational
approach*, CRC Press, p. 150--152, 2012.
.. [3] Statext LLC, "Page's L Trend Test - Easy Statistics", *Statext -
Statistics Study*, https://www.statext.com/practice/PageTrendTest03.php,
Accessed July 12, 2020.
.. [4] "Page's Trend Test", *Wikipedia*, WikimediaFoundation,
https://en.wikipedia.org/wiki/Page%27s_trend_test,
Accessed July 12, 2020.
.. [5] Robert E. Odeh, "The exact distribution of Page's L-statistic in
the two-way layout", *Communications in Statistics - Simulation and
Computation*, 6(1), p. 49--61, 1977.
Examples
--------
We use the example from [3]_: 10 students are asked to rate three
teaching methods - tutorial, lecture, and seminar - on a scale of 1-5,
with 1 being the lowest and 5 being the highest. We have decided that
a confidence level of 99% is required to reject the null hypothesis in
favor of our alternative: that the seminar will have the highest ratings
and the tutorial will have the lowest. Initially, the data have been
tabulated with each row representing an individual student's ratings of
the three methods in the following order: tutorial, lecture, seminar.
>>> table = [[3, 4, 3],
... [2, 2, 4],
... [3, 3, 5],
... [1, 3, 2],
... [2, 3, 2],
... [2, 4, 5],
... [1, 2, 4],
... [3, 4, 4],
... [2, 4, 5],
... [1, 3, 4]]
Because the tutorial is hypothesized to have the lowest ratings, the
column corresponding with tutorial rankings should be first; the seminar
is hypothesized to have the highest ratings, so its column should be last.
Since the columns are already arranged in this order of increasing
predicted mean, we can pass the table directly into `page_trend_test`.
>>> from scipy.stats import page_trend_test
>>> res = page_trend_test(table)
>>> res
PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
method='exact')
This *p*-value indicates that there is a 0.1819% chance that
the :math:`L` statistic would reach such an extreme value under the null
hypothesis. Because 0.1819% is less than 1%, we have evidence to reject
the null hypothesis in favor of our alternative at a 99% confidence level.
The value of the :math:`L` statistic is 133.5. To check this manually,
we rank the data such that high scores correspond with high ranks, settling
ties with an average rank:
>>> from scipy.stats import rankdata
>>> ranks = rankdata(table, axis=1)
>>> ranks
array([[1.5, 3. , 1.5],
[1.5, 1.5, 3. ],
[1.5, 1.5, 3. ],
[1. , 3. , 2. ],
[1.5, 3. , 1.5],
[1. , 2. , 3. ],
[1. , 2. , 3. ],
[1. , 2.5, 2.5],
[1. , 2. , 3. ],
[1. , 2. , 3. ]])
We add the ranks within each column, multiply the sums by the
predicted ranks, and sum the products.
>>> import numpy as np
>>> m, n = ranks.shape
>>> predicted_ranks = np.arange(1, n+1)
>>> L = (predicted_ranks * np.sum(ranks, axis=0)).sum()
>>> res.statistic == L
True
As presented in [3]_, the asymptotic approximation of the *p*-value is the
survival function of the normal distribution evaluated at the standardized
test statistic:
>>> from scipy.stats import norm
>>> E0 = (m*n*(n+1)**2)/4
>>> V0 = (m*n**2*(n+1)*(n**2-1))/144
>>> Lambda = (L-E0)/np.sqrt(V0)
>>> p = norm.sf(Lambda)
>>> p
0.0012693433690751756
This does not precisely match the *p*-value reported by `page_trend_test`
above. The asymptotic distribution is not very accurate, nor conservative,
for :math:`m \leq 12` and :math:`n \leq 8`, so `page_trend_test` chose to
use ``method='exact'`` based on the dimensions of the table and the
recommendations in Page's original paper [1]_. To override
`page_trend_test`'s choice, provide the `method` argument.
>>> res = page_trend_test(table, method="asymptotic")
>>> res
PageTrendTestResult(statistic=133.5, pvalue=0.0012693433690751756,
method='asymptotic')
If the data are already ranked, we can pass in the ``ranks`` instead of
the ``table`` to save computation time.
>>> res = page_trend_test(ranks, # ranks of data
... ranked=True, # data is already ranked
... )
>>> res
PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
method='exact')
Suppose the raw data had been tabulated in an order different from the
order of predicted means, say lecture, seminar, tutorial.
>>> table = np.asarray(table)[:, [1, 2, 0]]
Since the arrangement of this table is not consistent with the assumed
ordering, we can either rearrange the table or provide the
`predicted_ranks`. Remembering that the lecture is predicted
to have the middle rank, the seminar the highest, and tutorial the lowest,
we pass:
>>> res = page_trend_test(table, # data as originally tabulated
... predicted_ranks=[2, 3, 1], # our predicted order
... )
>>> res
PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
method='exact')
"""
# Possible values of the method parameter and the corresponding function
# used to evaluate the p value
methods = {"asymptotic": _l_p_asymptotic,
"exact": _l_p_exact,
"auto": None}
if method not in methods:
raise ValueError(f"`method` must be in {set(methods)}")
ranks = np.array(data, copy=False)
if ranks.ndim != 2: # TODO: relax this to accept 3d arrays?
raise ValueError("`data` must be a 2d array.")
m, n = ranks.shape
if m < 2 or n < 3:
raise ValueError("Page's L is only appropriate for data with two "
"or more rows and three or more columns.")
if np.any(np.isnan(data)):
raise ValueError("`data` contains NaNs, which cannot be ranked "
"meaningfully")
# ensure NumPy array and rank the data if it's not already ranked
if ranked:
# Only a basic check on whether data is ranked. Checking that the data
# is properly ranked could take as much time as ranking it.
if not (ranks.min() >= 1 and ranks.max() <= ranks.shape[1]):
raise ValueError("`data` is not properly ranked. Rank the data or "
"pass `ranked=False`.")
else:
ranks = scipy.stats.rankdata(data, axis=-1)
# generate predicted ranks if not provided, ensure valid NumPy array
if predicted_ranks is None:
predicted_ranks = np.arange(1, n+1)
else:
predicted_ranks = np.array(predicted_ranks, copy=False)
if (predicted_ranks.ndim < 1 or
(set(predicted_ranks) != set(range(1, n+1)) or
len(predicted_ranks) != n)):
raise ValueError(f"`predicted_ranks` must include each integer "
f"from 1 to {n} (the number of columns in "
f"`data`) exactly once.")
if type(ranked) is not bool:
raise TypeError("`ranked` must be boolean.")
# Calculate the L statistic
L = _l_vectorized(ranks, predicted_ranks)
# Calculate the p-value
if method == "auto":
method = _choose_method(ranks)
p_fun = methods[method] # get the function corresponding with the method
p = p_fun(L, m, n)
page_result = PageTrendTestResult(statistic=L, pvalue=p, method=method)
return page_result
def _choose_method(ranks):
'''Choose method for computing p-value automatically'''
m, n = ranks.shape
if n > 8 or (m > 12 and n > 3) or m > 20: # as in [1], [4]
method = "asymptotic"
else:
method = "exact"
return method
def _l_vectorized(ranks, predicted_ranks):
'''Calculate's Page's L statistic for each page of a 3d array'''
colsums = ranks.sum(axis=-2, keepdims=True)
products = predicted_ranks * colsums
Ls = products.sum(axis=-1)
Ls = Ls[0] if Ls.size == 1 else Ls.ravel()
return Ls
def _l_p_asymptotic(L, m, n):
'''Calculate the p-value of Page's L from the asymptotic distribution'''
# Using [1] as a reference, the asymptotic p-value would be calculated as:
# chi_L = (12*L - 3*m*n*(n+1)**2)**2/(m*n**2*(n**2-1)*(n+1))
# p = chi2.sf(chi_L, df=1, loc=0, scale=1)/2
# but this is insentive to the direction of the hypothesized ranking
# See [2] page 151
E0 = (m*n*(n+1)**2)/4
V0 = (m*n**2*(n+1)*(n**2-1))/144
Lambda = (L-E0)/np.sqrt(V0)
# This is a one-sided "greater" test - calculate the probability that the
# L statistic under H0 would be greater than the observed L statistic
p = norm.sf(Lambda)
return p
def _l_p_exact(L, m, n):
'''Calculate the p-value of Page's L exactly'''
# [1] uses m, n; [5] uses n, k.
# Switch convention here because exact calculation code references [5].
L, n, k = int(L), int(m), int(n)
_pagel_state.set_k(k)
return _pagel_state.sf(L, n)
class _PageL:
'''Maintains state between `page_trend_test` executions'''
def __init__(self):
'''Lightweight initialization'''
self.all_pmfs = {}
def set_k(self, k):
'''Calculate lower and upper limits of L for single row'''
self.k = k
# See [5] top of page 52
self.a, self.b = (k*(k+1)*(k+2))//6, (k*(k+1)*(2*k+1))//6
def sf(self, l, n):
'''Survival function of Page's L statistic'''
ps = [self.pmf(l, n) for l in range(l, n*self.b + 1)]
return np.sum(ps)
def p_l_k_1(self):
'''Relative frequency of each L value over all possible single rows'''
# See [5] Equation (6)
ranks = range(1, self.k+1)
# generate all possible rows of length k
rank_perms = np.array(list(permutations(ranks)))
# compute Page's L for all possible rows
Ls = (ranks*rank_perms).sum(axis=1)
# count occurrences of each L value
counts = np.histogram(Ls, np.arange(self.a-0.5, self.b+1.5))[0]
# factorial(k) is number of possible permutations
return counts/math.factorial(self.k)
def pmf(self, l, n):
'''Recursive function to evaluate p(l, k, n); see [5] Equation 1'''
if n not in self.all_pmfs:
self.all_pmfs[n] = {}
if self.k not in self.all_pmfs[n]:
self.all_pmfs[n][self.k] = {}
# Cache results to avoid repeating calculation. Initially this was
# written with lru_cache, but this seems faster? Also, we could add
# an option to save this for future lookup.
if l in self.all_pmfs[n][self.k]:
return self.all_pmfs[n][self.k][l]
if n == 1:
ps = self.p_l_k_1() # [5] Equation 6
ls = range(self.a, self.b+1)
# not fast, but we'll only be here once
self.all_pmfs[n][self.k] = {l: p for l, p in zip(ls, ps)}
return self.all_pmfs[n][self.k][l]
p = 0
low = max(l-(n-1)*self.b, self.a) # [5] Equation 2
high = min(l-(n-1)*self.a, self.b)
# [5] Equation 1
for t in range(low, high+1):
p1 = self.pmf(l-t, n-1)
p2 = self.pmf(t, 1)
p += p1*p2
self.all_pmfs[n][self.k][l] = p
return p
# Maintain state for faster repeat calls to page_trend_test w/ method='exact'
_pagel_state = _PageL()
| 18,999
| 38.583333
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/kde.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.stats` namespace for importing the functions
# included below.
import warnings
from . import _kde
__all__ = [ # noqa: F822
'gaussian_kde', 'linalg', 'logsumexp', 'check_random_state',
'atleast_2d', 'reshape', 'newaxis', 'exp', 'ravel', 'power',
'atleast_1d', 'squeeze', 'sum', 'transpose', 'cov',
'gaussian_kernel_estimate'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.stats.kde is deprecated and has no attribute "
f"{name}. Try looking in scipy.stats instead.")
warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
"the `scipy.stats.kde` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_kde, name)
| 923
| 27.875
| 76
|
py
|
scipy
|
scipy-main/scipy/stats/_stats_py.py
|
# Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python.
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
import warnings
import math
from math import gcd
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma
from numpy.lib import NumpyVersion
from numpy.testing import suppress_warnings
from scipy import sparse
from scipy.spatial.distance import cdist
from scipy.spatial import distance_matrix
from scipy.ndimage import _measurements
from scipy.optimize import milp, LinearConstraint
from scipy._lib._util import (check_random_state, MapWrapper, _get_nan,
rng_integers, _rename_parameter, _contains_nan)
import scipy.special as special
from scipy import linalg
from . import distributions
from . import _mstats_basic as mstats_basic
from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
siegelslopes)
from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from dataclasses import dataclass
from ._hypotests import _all_partitions
from ._stats_pythran import _compute_outer_prob_inside_method
from ._resampling import (MonteCarloMethod, PermutationMethod, BootstrapMethod,
monte_carlo_test, permutation_test, bootstrap,
_batch_generator)
from ._axis_nan_policy import (_axis_nan_policy_factory,
_broadcast_concatenate)
from ._binomtest import _binary_search_for_binom_tst as _binary_search
from scipy._lib._bunch import _make_tuple_bunch
from scipy import stats
from scipy.optimize import root_scalar
from scipy._lib.deprecation import _NoValue
from scipy._lib._util import normalize_axis_index
# In __all__ but deprecated for removal in SciPy 1.13.0
from scipy._lib._util import float_factorial # noqa
from scipy.stats._mstats_basic import (PointbiserialrResult, Ttest_1sampResult, # noqa
Ttest_relResult) # noqa
# Functions/classes in other files should be added in `__init__.py`, not here
__all__ = ['find_repeats', 'gmean', 'hmean', 'pmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd',
'median_abs_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
'f_oneway', 'pearsonr', 'fisher_exact',
'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau', 'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel',
'kstest', 'ks_1samp', 'ks_2samp',
'chisquare', 'power_divergence',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'alexandergovern',
'expectile', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
SignificanceResult = _make_tuple_bunch('SignificanceResult',
['statistic', 'pvalue'], [])
# note that `weights` are paired with `x`
@_axis_nan_policy_factory(
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
def gmean(a, axis=0, dtype=None, weights=None):
r"""Compute the weighted geometric mean along the specified axis.
The weighted geometric mean of the array :math:`a_i` associated to weights
:math:`w_i` is:
.. math::
\exp \left( \frac{ \sum_{i=1}^n w_i \ln a_i }{ \sum_{i=1}^n w_i }
\right) \, ,
and, with equal weights, it gives:
.. math::
\sqrt[n]{ \prod_{i=1}^n a_i } \, .
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type to which the input arrays are cast before the calculation is
performed.
weights : array_like, optional
The `weights` array must be broadcastable to the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
Returns
-------
gmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
References
----------
.. [1] "Weighted Geometric Mean", *Wikipedia*,
https://en.wikipedia.org/wiki/Weighted_geometric_mean.
.. [2] Grossman, J., Grossman, M., Katz, R., "Averages: A New Approach",
Archimedes Foundation, 1983
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
>>> gmean([1, 4, 7], weights=[3, 1, 3])
2.80668351922014
"""
a = np.asarray(a, dtype=dtype)
if weights is not None:
weights = np.asarray(weights, dtype=dtype)
with np.errstate(divide='ignore'):
log_a = np.log(a)
return np.exp(np.average(log_a, axis=axis, weights=weights))
@_axis_nan_policy_factory(
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
def hmean(a, axis=0, dtype=None, *, weights=None):
r"""Calculate the weighted harmonic mean along the specified axis.
The weighted harmonic mean of the array :math:`a_i` associated to weights
:math:`w_i` is:
.. math::
\frac{ \sum_{i=1}^n w_i }{ \sum_{i=1}^n \frac{w_i}{a_i} } \, ,
and, with equal weights, it gives:
.. math::
\frac{ n }{ \sum_{i=1}^n \frac{1}{a_i} } \, .
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
weights : array_like, optional
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given `axis`) or of the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
.. versionadded:: 1.9
Returns
-------
hmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
References
----------
.. [1] "Weighted Harmonic Mean", *Wikipedia*,
https://en.wikipedia.org/wiki/Harmonic_mean#Weighted_harmonic_mean
.. [2] Ferger, F., "The nature and use of the harmonic mean", Journal of
the American Statistical Association, vol. 26, pp. 36-40, 1931
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
>>> hmean([1, 4, 7], weights=[3, 1, 3])
1.9029126213592233
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
a = np.ma.asarray(a, dtype=dtype)
else:
a = np.asarray(a, dtype=dtype)
if np.all(a >= 0):
# Harmonic mean only defined if greater than or equal to zero.
if weights is not None:
weights = np.asanyarray(weights, dtype=dtype)
with np.errstate(divide='ignore'):
return 1.0 / np.average(1.0 / a, axis=axis, weights=weights)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than or equal to zero")
@_axis_nan_policy_factory(
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
def pmean(a, p, *, axis=0, dtype=None, weights=None):
r"""Calculate the weighted power mean along the specified axis.
The weighted power mean of the array :math:`a_i` associated to weights
:math:`w_i` is:
.. math::
\left( \frac{ \sum_{i=1}^n w_i a_i^p }{ \sum_{i=1}^n w_i }
\right)^{ 1 / p } \, ,
and, with equal weights, it gives:
.. math::
\left( \frac{ 1 }{ n } \sum_{i=1}^n a_i^p \right)^{ 1 / p } \, .
When ``p=0``, it returns the geometric mean.
This mean is also called generalized mean or Hölder mean, and must not be
confused with the Kolmogorov generalized mean, also called
quasi-arithmetic mean or generalized f-mean [3]_.
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
p : int or float
Exponent.
axis : int or None, optional
Axis along which the power mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
weights : array_like, optional
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given `axis`) or of the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
Returns
-------
pmean : ndarray, see `dtype` parameter above.
Output array containing the power mean values.
See Also
--------
numpy.average : Weighted average
gmean : Geometric mean
hmean : Harmonic mean
Notes
-----
The power mean is computed over a single dimension of the input
array, ``axis=0`` by default, or all values in the array if ``axis=None``.
float64 intermediate and return values are used for integer inputs.
.. versionadded:: 1.9
References
----------
.. [1] "Generalized Mean", *Wikipedia*,
https://en.wikipedia.org/wiki/Generalized_mean
.. [2] Norris, N., "Convexity properties of generalized mean value
functions", The Annals of Mathematical Statistics, vol. 8,
pp. 118-120, 1937
.. [3] Bullen, P.S., Handbook of Means and Their Inequalities, 2003
Examples
--------
>>> from scipy.stats import pmean, hmean, gmean
>>> pmean([1, 4], 1.3)
2.639372938300652
>>> pmean([1, 2, 3, 4, 5, 6, 7], 1.3)
4.157111214492084
>>> pmean([1, 4, 7], -2, weights=[3, 1, 3])
1.4969684896631954
For p=-1, power mean is equal to harmonic mean:
>>> pmean([1, 4, 7], -1, weights=[3, 1, 3])
1.9029126213592233
>>> hmean([1, 4, 7], weights=[3, 1, 3])
1.9029126213592233
For p=0, power mean is defined as the geometric mean:
>>> pmean([1, 4, 7], 0, weights=[3, 1, 3])
2.80668351922014
>>> gmean([1, 4, 7], weights=[3, 1, 3])
2.80668351922014
"""
if not isinstance(p, (int, float)):
raise ValueError("Power mean only defined for exponent of type int or "
"float.")
if p == 0:
return gmean(a, axis=axis, dtype=dtype, weights=weights)
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
a = np.ma.asarray(a, dtype=dtype)
else:
a = np.asarray(a, dtype=dtype)
if np.all(a >= 0):
# Power mean only defined if greater than or equal to zero
if weights is not None:
weights = np.asanyarray(weights, dtype=dtype)
with np.errstate(divide='ignore'):
return np.float_power(
np.average(np.float_power(a, p), axis=axis, weights=weights),
1/p)
else:
raise ValueError("Power mean only defined if all elements greater "
"than or equal to zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def _mode_result(mode, count):
# When a slice is empty, `_axis_nan_policy` automatically produces
# NaN for `mode` and `count`. This is a reasonable convention for `mode`,
# but `count` should not be NaN; it should be zero.
i = np.isnan(count)
if i.shape == ():
count = count.dtype(0) if i else count
else:
count[i] = 0
return ModeResult(mode, count)
@_axis_nan_policy_factory(_mode_result, override={'vectorization': True,
'nan_propagation': False})
def mode(a, axis=0, nan_policy='propagate', keepdims=False):
r"""Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only one is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
Numeric, n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': treats nan as it would treat any other value
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
keepdims : bool, optional
If set to ``False``, the `axis` over which the statistic is taken
is consumed (eliminated from the output array). If set to ``True``,
the `axis` is retained with size one, and the result will broadcast
correctly against the input array.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Notes
-----
The mode is calculated using `numpy.unique`.
In NumPy versions 1.21 and after, all NaNs - even those with different
binary representations - are treated as equivalent and counted as separate
instances of the same value.
By convention, the mode of an empty array is NaN, and the associated count
is zero.
Examples
--------
>>> import numpy as np
>>> a = np.array([[3, 0, 3, 7],
... [3, 2, 6, 2],
... [1, 7, 2, 8],
... [3, 0, 6, 1],
... [3, 2, 5, 5]])
>>> from scipy import stats
>>> stats.mode(a, keepdims=True)
ModeResult(mode=array([[3, 0, 6, 1]]), count=array([[4, 2, 2, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None, keepdims=True)
ModeResult(mode=[[3]], count=[[5]])
>>> stats.mode(a, axis=None, keepdims=False)
ModeResult(mode=3, count=5)
""" # noqa: E501
# `axis`, `nan_policy`, and `keepdims` are handled by `_axis_nan_policy`
if not np.issubdtype(a.dtype, np.number):
message = ("Argument `a` is not recognized as numeric. "
"Support for input that cannot be coerced to a numeric "
"array was deprecated in SciPy 1.9.0 and removed in SciPy "
"1.11.0. Please consider `np.unique`.")
raise TypeError(message)
if a.size == 0:
NaN = _get_nan(a)
return ModeResult(*np.array([NaN, 0], dtype=NaN.dtype))
vals, cnts = np.unique(a, return_counts=True)
modes, counts = vals[cnts.argmax()], cnts.max()
return ModeResult(modes[()], counts[()])
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : ndarray
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, axis)
am = _mask_to_limits(a, limits, inclusive)
mean = np.ma.filled(am.mean(axis=axis), fill_value=np.nan)
return mean if mean.ndim > 0 else mean.item()
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return a.var(ddof=ddof, axis=axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanvar(amnan, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
Array of values.
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmax : float, int or ndarray
Trimmed maximum.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Trimmed sample standard deviation.
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Trimmed standard error of the mean.
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def _moment_outputs(kwds):
moment = np.atleast_1d(kwds.get('moment', 1))
if moment.size == 0:
raise ValueError("'moment' must be a scalar or a non-empty 1D "
"list/array.")
return len(moment)
def _moment_result_object(*args):
if len(args) == 1:
return args[0]
return np.asarray(args)
# `moment` fits into the `_axis_nan_policy` pattern, but it is a bit unusual
# because the number of outputs is variable. Specifically,
# `result_to_tuple=lambda x: (x,)` may be surprising for a function that
# can produce more than one output, but it is intended here.
# When `moment is called to produce the output:
# - `result_to_tuple` packs the returned array into a single-element tuple,
# - `_moment_result_object` extracts and returns that single element.
# However, when the input array is empty, `moment` is never called. Instead,
# - `_check_empty_inputs` is used to produce an empty array with the
# appropriate dimensions.
# - A list comprehension creates the appropriate number of copies of this
# array, depending on `n_outputs`.
# - This list - which may have multiple elements - is passed into
# `_moment_result_object`.
# - If there is a single output, `_moment_result_object` extracts and returns
# the single output from the list.
# - If there are multiple outputs, and therefore multiple elements in the list,
# `_moment_result_object` converts the list of arrays to a single array and
# returns it.
# Currently this leads to a slight inconsistency: when the input array is
# empty, there is no distinction between the `moment` function being called
# with parameter `moments=1` and `moments=[1]`; the latter *should* produce
# the same as the former but with a singleton zeroth dimension.
@_axis_nan_policy_factory( # noqa: E302
_moment_result_object, n_samples=1, result_to_tuple=lambda x: (x,),
n_outputs=_moment_outputs
)
def moment(a, moment=1, axis=0, nan_policy='propagate', *, center=None):
r"""Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
Input array.
moment : int or array_like of ints, optional
Order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
center : float or None, optional
The point about which moments are taken. This can be the sample mean,
the origin, or any other be point. If `None` (default) compute the
center as the sample mean.
Returns
-------
n-th moment about the `center` : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See Also
--------
kurtosis, skew, describe
Notes
-----
The k-th moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - c)^k
Where `n` is the number of samples, and `c` is the center around which the
moment is calculated. This function uses exponentiation by squares [1]_ for
efficiency.
Note that, if `a` is an empty array (``a.size == 0``), array `moment` with
one element (`moment.size == 1`) is treated the same as scalar `moment`
(``np.isscalar(moment)``). This might produce arrays of unexpected shape.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
# Calculated the mean once at most, and only if it will be used
calculate_mean = center is None and np.any(np.asarray(moment) > 1)
mean = a.mean(axis, keepdims=True) if calculate_mean else None
mmnt = []
for i in moment:
if center is None and i > 1:
mmnt.append(_moment(a, i, axis, mean=mean))
else:
mmnt.append(_moment(a, i, axis, mean=center))
return np.array(mmnt)
else:
return _moment(a, moment, axis, mean=center)
# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
def _moment(a, moment, axis, *, mean=None):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
# moment of empty array is the same regardless of order
if a.size == 0:
return np.mean(a, axis=axis)
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
if moment == 0 or (moment == 1 and mean is None):
# By definition the zeroth moment is always 1, and the first *central*
# moment is 0.
shape = list(a.shape)
del shape[axis]
if len(shape) == 0:
return dtype(1.0 if moment == 0 else 0.0)
else:
return (np.ones(shape, dtype=dtype) if moment == 0
else np.zeros(shape, dtype=dtype))
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
mean = (a.mean(axis, keepdims=True) if mean is None
else np.asarray(mean, dtype=dtype)[()])
a_zero_mean = a - mean
eps = np.finfo(a_zero_mean.dtype).resolution * 10
with np.errstate(divide='ignore', invalid='ignore'):
rel_diff = np.max(np.abs(a_zero_mean), axis=axis,
keepdims=True) / np.abs(mean)
with np.errstate(invalid='ignore'):
precision_loss = np.any(rel_diff < eps)
n = a.shape[axis] if axis is not None else a.size
if precision_loss and n > 1:
message = ("Precision loss occurred in moment calculation due to "
"catastrophic cancellation. This occurs when the data "
"are nearly identical. Results may be unreliable.")
warnings.warn(message, RuntimeWarning, stacklevel=4)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def _var(x, axis=0, ddof=0, mean=None):
# Calculate variance of sample, warning if precision is lost
var = _moment(x, 2, axis, mean=mean)
if ddof != 0:
n = x.shape[axis] if axis is not None else x.size
var *= np.divide(n, n-ddof) # to avoid error on division by zero
return var
@_axis_nan_policy_factory(
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
r"""Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
Parameters
----------
a : ndarray
Input array.
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning NaN where all values
are equal.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and
:math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m3 = _moment(a, 3, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, np.nan, m3 / m2**1.5)
if not bias:
can_correct = ~zero & (n > 2)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
return vals[()]
@_axis_nan_policy_factory(
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
)
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
Data for which the kurtosis is calculated.
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis, returning NaN where all values
are equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
In Fisher's definition, the kurtosis of the normal distribution is zero.
In the following example, the kurtosis is close to zero, because it was
calculated from the dataset, not from the continuous distribution.
>>> import numpy as np
>>> from scipy.stats import norm, kurtosis
>>> data = norm.rvs(size=1000, random_state=3)
>>> kurtosis(data)
-0.06928694200380558
The distribution with a higher kurtosis has a heavier tail.
The zero valued kurtosis of the normal distribution in Fisher's definition
can serve as a reference point.
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> from scipy.stats import kurtosis
>>> x = np.linspace(-5, 5, 100)
>>> ax = plt.subplot()
>>> distnames = ['laplace', 'norm', 'uniform']
>>> for distname in distnames:
... if distname == 'uniform':
... dist = getattr(stats, distname)(loc=-2, scale=4)
... else:
... dist = getattr(stats, distname)
... data = dist.rvs(size=1000)
... kur = kurtosis(data, fisher=True)
... y = dist.pdf(x)
... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
... ax.legend()
The Laplace distribution has a heavier tail than the normal distribution.
The uniform distribution (which has negative kurtosis) has the thinnest
tail.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m4 = _moment(a, 4, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, np.nan, m4 / m2**2.0)
if not bias:
can_correct = ~zero & (n > 3)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
return vals[()] - 3 if fisher else vals[()]
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected
for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, the length along each axis
slice is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of `a` along the given axis.
mean : ndarray or float
Arithmetic mean of `a` along the given axis.
variance : ndarray or float
Unbiased variance of `a` along the given axis; denominator is number
of observations minus one.
skewness : ndarray or float
Skewness of `a` along the given axis, based on moment calculations
with denominator equal to the number of observations, i.e. no degrees
of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher) of `a` along the given axis. The kurtosis is
normalized so that it is zero for the normal distribution. No
degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5,
variance=9.166666666666666, skewness=0.0,
kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = _var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def _normtest_finish(z, alternative):
"""Common code between all the normality-test functions."""
if alternative == 'less':
prob = distributions.norm.cdf(z)
elif alternative == 'greater':
prob = distributions.norm.sf(z)
elif alternative == 'two-sided':
prob = 2 * distributions.norm.sf(np.abs(z))
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if z.ndim == 0:
z = z[()]
return z, prob
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
r"""Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the skewness of the distribution underlying the sample
is different from that of the normal distribution (i.e. 0)
* 'less': the skewness of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the skewness of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
.. [2] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test
for normality (complete samples). Biometrika, 52(3/4), 591-611.
.. [3] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly
Drawn." Statistical Applications in Genetics and Molecular Biology
9.1 (2010).
Examples
--------
Suppose we wish to infer from measurements whether the weights of adult
human males in a medical study are not normally distributed [2]_.
The weights (lbs) are recorded in the array ``x`` below.
>>> import numpy as np
>>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236])
The skewness test from [1]_ begins by computing a statistic based on the
sample skewness.
>>> from scipy import stats
>>> res = stats.skewtest(x)
>>> res.statistic
2.7788579769903414
Because normal distributions have zero skewness, the magnitude of this
statistic tends to be low for samples drawn from a normal distribution.
The test is performed by comparing the observed value of the
statistic against the null distribution: the distribution of statistic
values derived under the null hypothesis that the weights were drawn from
a normal distribution.
For this test, the null distribution of the statistic for very large
samples is the standard normal distribution.
>>> import matplotlib.pyplot as plt
>>> dist = stats.norm()
>>> st_val = np.linspace(-5, 5, 100)
>>> pdf = dist.pdf(st_val)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> def st_plot(ax): # we'll re-use this
... ax.plot(st_val, pdf)
... ax.set_title("Skew Test Null Distribution")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
>>> st_plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution as extreme or more extreme than the observed
value of the statistic. In a two-sided test, elements of the null
distribution greater than the observed statistic and elements of the null
distribution less than the negative of the observed statistic are both
considered "more extreme".
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> st_plot(ax)
>>> pvalue = dist.cdf(-res.statistic) + dist.sf(res.statistic)
>>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (3, 0.005), (3.25, 0.02), arrowprops=props)
>>> i = st_val >= res.statistic
>>> ax.fill_between(st_val[i], y1=0, y2=pdf[i], color='C0')
>>> i = st_val <= -res.statistic
>>> ax.fill_between(st_val[i], y1=0, y2=pdf[i], color='C0')
>>> ax.set_xlim(-5, 5)
>>> ax.set_ylim(0, 0.1)
>>> plt.show()
>>> res.pvalue
0.005455036974740185
If the p-value is "small" - that is, if there is a low probability of
sampling data from a normally distributed population that produces such an
extreme value of the statistic - this may be taken as evidence against
the null hypothesis in favor of the alternative: the weights were not
drawn from a normal distribution. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence for the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [3]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
Note that the standard normal distribution provides an asymptotic
approximation of the null distribution; it is only accurate for samples
with many observations. For small samples like ours,
`scipy.stats.monte_carlo_test` may provide a more accurate, albeit
stochastic, approximation of the exact p-value.
>>> def statistic(x, axis):
... # get just the skewtest statistic; ignore the p-value
... return stats.skewtest(x, axis=axis).statistic
>>> res = stats.monte_carlo_test(x, stats.norm.rvs, statistic)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> st_plot(ax)
>>> ax.hist(res.null_distribution, np.linspace(-5, 5, 50),
... density=True)
>>> ax.legend(['aymptotic approximation\n(many observations)',
... 'Monte Carlo approximation\n(11 observations)'])
>>> plt.show()
>>> res.pvalue
0.0062 # may vary
In this case, the asymptotic approximation and Monte Carlo approximation
agree fairly closely, even for our small sample.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis, alternative)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(*_normtest_finish(Z, alternative))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
r"""Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution.
Parameters
----------
a : array
Array of the sample data.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the kurtosis of the distribution underlying the sample
is different from that of the normal distribution
* 'less': the kurtosis of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the kurtosis of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
.. [2] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test
for normality (complete samples). Biometrika, 52(3/4), 591-611.
.. [3] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly
Drawn." Statistical Applications in Genetics and Molecular Biology
9.1 (2010).
.. [4] Panagiotakos, D. B. (2008). The value of p-value in biomedical
research. The open cardiovascular medicine journal, 2, 97.
Examples
--------
Suppose we wish to infer from measurements whether the weights of adult
human males in a medical study are not normally distributed [2]_.
The weights (lbs) are recorded in the array ``x`` below.
>>> import numpy as np
>>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236])
The kurtosis test from [1]_ begins by computing a statistic based on the
sample (excess/Fisher) kurtosis.
>>> from scipy import stats
>>> res = stats.kurtosistest(x)
>>> res.statistic
2.3048235214240873
(The test warns that our sample has too few observations to perform the
test. We'll return to this at the end of the example.)
Because normal distributions have zero excess kurtosis (by definition),
the magnitude of this statistic tends to be low for samples drawn from a
normal distribution.
The test is performed by comparing the observed value of the
statistic against the null distribution: the distribution of statistic
values derived under the null hypothesis that the weights were drawn from
a normal distribution.
For this test, the null distribution of the statistic for very large
samples is the standard normal distribution.
>>> import matplotlib.pyplot as plt
>>> dist = stats.norm()
>>> kt_val = np.linspace(-5, 5, 100)
>>> pdf = dist.pdf(kt_val)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> def kt_plot(ax): # we'll re-use this
... ax.plot(kt_val, pdf)
... ax.set_title("Kurtosis Test Null Distribution")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
>>> kt_plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution as extreme or more extreme than the observed
value of the statistic. In a two-sided test in which the statistic is
positive, elements of the null distribution greater than the observed
statistic and elements of the null distribution less than the negative of
the observed statistic are both considered "more extreme".
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> kt_plot(ax)
>>> pvalue = dist.cdf(-res.statistic) + dist.sf(res.statistic)
>>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (3, 0.005), (3.25, 0.02), arrowprops=props)
>>> i = kt_val >= res.statistic
>>> ax.fill_between(kt_val[i], y1=0, y2=pdf[i], color='C0')
>>> i = kt_val <= -res.statistic
>>> ax.fill_between(kt_val[i], y1=0, y2=pdf[i], color='C0')
>>> ax.set_xlim(-5, 5)
>>> ax.set_ylim(0, 0.1)
>>> plt.show()
>>> res.pvalue
0.0211764592113868
If the p-value is "small" - that is, if there is a low probability of
sampling data from a normally distributed population that produces such an
extreme value of the statistic - this may be taken as evidence against
the null hypothesis in favor of the alternative: the weights were not
drawn from a normal distribution. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence for the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [3]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
Note that the standard normal distribution provides an asymptotic
approximation of the null distribution; it is only accurate for samples
with many observations. This is the reason we received a warning at the
beginning of the example; our sample is quite small. In this case,
`scipy.stats.monte_carlo_test` may provide a more accurate, albeit
stochastic, approximation of the exact p-value.
>>> def statistic(x, axis):
... # get just the skewtest statistic; ignore the p-value
... return stats.kurtosistest(x, axis=axis).statistic
>>> res = stats.monte_carlo_test(x, stats.norm.rvs, statistic)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> kt_plot(ax)
>>> ax.hist(res.null_distribution, np.linspace(-5, 5, 50),
... density=True)
>>> ax.legend(['aymptotic approximation\n(many observations)',
... 'Monte Carlo approximation\n(11 observations)'])
>>> plt.show()
>>> res.pvalue
0.0272 # may vary
Furthermore, despite their stochastic nature, p-values computed in this way
can be used to exactly control the rate of false rejections of the null
hypothesis [4]_.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis, alternative)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(*_normtest_finish(Z, alternative))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
r"""Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
.. [3] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test
for normality (complete samples). Biometrika, 52(3/4), 591-611.
.. [4] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly
Drawn." Statistical Applications in Genetics and Molecular Biology
9.1 (2010).
.. [5] Panagiotakos, D. B. (2008). The value of p-value in biomedical
research. The open cardiovascular medicine journal, 2, 97.
Examples
--------
Suppose we wish to infer from measurements whether the weights of adult
human males in a medical study are not normally distributed [3]_.
The weights (lbs) are recorded in the array ``x`` below.
>>> import numpy as np
>>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236])
The normality test of [1]_ and [2]_ begins by computing a statistic based
on the sample skewness and kurtosis.
>>> from scipy import stats
>>> res = stats.normaltest(x)
>>> res.statistic
13.034263121192582
(The test warns that our sample has too few observations to perform the
test. We'll return to this at the end of the example.)
Because the normal distribution has zero skewness and zero
("excess" or "Fisher") kurtosis, the value of this statistic tends to be
low for samples drawn from a normal distribution.
The test is performed by comparing the observed value of the statistic
against the null distribution: the distribution of statistic values derived
under the null hypothesis that the weights were drawn from a normal
distribution.
For this normality test, the null distribution for very large samples is
the chi-squared distribution with two degrees of freedom.
>>> import matplotlib.pyplot as plt
>>> dist = stats.chi2(df=2)
>>> stat_vals = np.linspace(0, 16, 100)
>>> pdf = dist.pdf(stat_vals)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> def plot(ax): # we'll re-use this
... ax.plot(stat_vals, pdf)
... ax.set_title("Normality Test Null Distribution")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
>>> plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution greater than or equal to the observed value of the
statistic.
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> pvalue = dist.sf(res.statistic)
>>> annotation = (f'p-value={pvalue:.6f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (13.5, 5e-4), (14, 5e-3), arrowprops=props)
>>> i = stat_vals >= res.statistic # index more extreme statistic values
>>> ax.fill_between(stat_vals[i], y1=0, y2=pdf[i])
>>> ax.set_xlim(8, 16)
>>> ax.set_ylim(0, 0.01)
>>> plt.show()
>>> res.pvalue
0.0014779023013100172
If the p-value is "small" - that is, if there is a low probability of
sampling data from a normally distributed population that produces such an
extreme value of the statistic - this may be taken as evidence against
the null hypothesis in favor of the alternative: the weights were not
drawn from a normal distribution. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence for the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [4]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
Note that the chi-squared distribution provides an asymptotic
approximation of the null distribution; it is only accurate for samples
with many observations. This is the reason we received a warning at the
beginning of the example; our sample is quite small. In this case,
`scipy.stats.monte_carlo_test` may provide a more accurate, albeit
stochastic, approximation of the exact p-value.
>>> def statistic(x, axis):
... # Get only the `normaltest` statistic; ignore approximate p-value
... return stats.normaltest(x, axis=axis).statistic
>>> res = stats.monte_carlo_test(x, stats.norm.rvs, statistic,
... alternative='greater')
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> ax.hist(res.null_distribution, np.linspace(0, 25, 50),
... density=True)
>>> ax.legend(['aymptotic approximation (many observations)',
... 'Monte Carlo approximation (11 observations)'])
>>> ax.set_xlim(0, 14)
>>> plt.show()
>>> res.pvalue
0.0082 # may vary
Furthermore, despite their stochastic nature, p-values computed in this way
can be used to exactly control the rate of false rejections of the null
hypothesis [5]_.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
@_axis_nan_policy_factory(SignificanceResult, default_axis=None)
def jarque_bera(x, *, axis=None):
r"""Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
axis : int or None, default: 0
If an int, the axis of the input along which to compute the statistic.
The statistic of each axis-slice (e.g. row) of the input will appear in
a corresponding element of the output.
If ``None``, the input will be raveled before computing the statistic.
Returns
-------
result : SignificanceResult
An object with the following attributes:
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
.. [2] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test
for normality (complete samples). Biometrika, 52(3/4), 591-611.
.. [3] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly
Drawn." Statistical Applications in Genetics and Molecular Biology
9.1 (2010).
.. [4] Panagiotakos, D. B. (2008). The value of p-value in biomedical
research. The open cardiovascular medicine journal, 2, 97.
Examples
--------
Suppose we wish to infer from measurements whether the weights of adult
human males in a medical study are not normally distributed [2]_.
The weights (lbs) are recorded in the array ``x`` below.
>>> import numpy as np
>>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236])
The Jarque-Bera test begins by computing a statistic based on the sample
skewness and kurtosis.
>>> from scipy import stats
>>> res = stats.jarque_bera(x)
>>> res.statistic
6.982848237344646
Because the normal distribution has zero skewness and zero
("excess" or "Fisher") kurtosis, the value of this statistic tends to be
low for samples drawn from a normal distribution.
The test is performed by comparing the observed value of the statistic
against the null distribution: the distribution of statistic values derived
under the null hypothesis that the weights were drawn from a normal
distribution.
For the Jarque-Bera test, the null distribution for very large samples is
the chi-squared distribution with two degrees of freedom.
>>> import matplotlib.pyplot as plt
>>> dist = stats.chi2(df=2)
>>> jb_val = np.linspace(0, 11, 100)
>>> pdf = dist.pdf(jb_val)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> def jb_plot(ax): # we'll re-use this
... ax.plot(jb_val, pdf)
... ax.set_title("Jarque-Bera Null Distribution")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
>>> jb_plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution greater than or equal to the observed value of the
statistic.
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> jb_plot(ax)
>>> pvalue = dist.sf(res.statistic)
>>> annotation = (f'p-value={pvalue:.6f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (7.5, 0.01), (8, 0.05), arrowprops=props)
>>> i = jb_val >= res.statistic # indices of more extreme statistic values
>>> ax.fill_between(jb_val[i], y1=0, y2=pdf[i])
>>> ax.set_xlim(0, 11)
>>> ax.set_ylim(0, 0.3)
>>> plt.show()
>>> res.pvalue
0.03045746622458189
If the p-value is "small" - that is, if there is a low probability of
sampling data from a normally distributed population that produces such an
extreme value of the statistic - this may be taken as evidence against
the null hypothesis in favor of the alternative: the weights were not
drawn from a normal distribution. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence for the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [3]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
Note that the chi-squared distribution provides an asymptotic approximation
of the null distribution; it is only accurate for samples with many
observations. For small samples like ours, `scipy.stats.monte_carlo_test`
may provide a more accurate, albeit stochastic, approximation of the
exact p-value.
>>> def statistic(x, axis):
... # underlying calculation of the Jarque Bera statistic
... s = stats.skew(x, axis=axis)
... k = stats.kurtosis(x, axis=axis)
... return x.shape[axis]/6 * (s**2 + k**2/4)
>>> res = stats.monte_carlo_test(x, stats.norm.rvs, statistic,
... alternative='greater')
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> jb_plot(ax)
>>> ax.hist(res.null_distribution, np.linspace(0, 10, 50),
... density=True)
>>> ax.legend(['aymptotic approximation (many observations)',
... 'Monte Carlo approximation (11 observations)'])
>>> plt.show()
>>> res.pvalue
0.0097 # may vary
Furthermore, despite their stochastic nature, p-values computed in this way
can be used to exactly control the rate of false rejections of the null
hypothesis [4]_.
"""
x = np.asarray(x)
if axis is None:
x = x.ravel()
axis = 0
n = x.shape[axis]
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean(axis=axis, keepdims=True)
diffx = x - mu
s = skew(diffx, axis=axis, _no_deco=True)
k = kurtosis(diffx, axis=axis, _no_deco=True)
statistic = n / 6 * (s**2 + k**2 / 4)
pvalue = distributions.chi2.sf(statistic, df=2)
return SignificanceResult(statistic, pvalue)
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
Specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
The following options are available (default is 'fraction'):
* 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``
* 'lower': ``i``
* 'higher': ``j``
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank', nan_policy='propagate'):
"""Compute the percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array to which `score` is compared.
score : array_like
Scores to compute percentiles for.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
Specifies the interpretation of the resulting score.
The following options are available (default is 'rank'):
* 'rank': Average percentage ranking of score. In case of multiple
matches, average the percentage rankings of all matching scores.
* 'weak': This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80% means that 80%
of values are less than or equal to the provided score.
* 'strict': Similar to "weak", except that only values that are
strictly less than the given score are counted.
* 'mean': The average of the "weak" and "strict" scores, often used
in testing. See https://en.wikipedia.org/wiki/Percentile_rank
nan_policy : {'propagate', 'raise', 'omit'}, optional
Specifies how to treat `nan` values in `a`.
The following options are available (default is 'propagate'):
* 'propagate': returns nan (for each value in `score`).
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
scipy.stats.scoreatpercentile, scipy.stats.rankdata
Examples
--------
Three-quarters of the given values lie below a given score:
>>> import numpy as np
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
Score arrays (of any dimensionality) are supported:
>>> stats.percentileofscore([1, 2, 3, 3, 4], [2, 3])
array([40., 70.])
The inputs can be infinite:
>>> stats.percentileofscore([-np.inf, 0, 1, np.inf], [1, 2, np.inf])
array([75., 75., 100.])
If `a` is empty, then the resulting percentiles are all `nan`:
>>> stats.percentileofscore([], [1, 2])
array([nan, nan])
"""
a = np.asarray(a)
n = len(a)
score = np.asarray(score)
# Nan treatment
cna, npa = _contains_nan(a, nan_policy, use_summation=False)
cns, nps = _contains_nan(score, nan_policy, use_summation=False)
if (cna or cns) and nan_policy == 'raise':
raise ValueError("The input contains nan values")
if cns:
# If a score is nan, then the output should be nan
# (also if nan_policy is "omit", because it only applies to `a`)
score = ma.masked_where(np.isnan(score), score)
if cna:
if nan_policy == "omit":
# Don't count nans
a = ma.masked_where(np.isnan(a), a)
n = a.count()
if nan_policy == "propagate":
# All outputs should be nans
n = 0
# Cannot compare to empty list ==> nan
if n == 0:
perct = np.full_like(score, np.nan, dtype=np.float64)
else:
# Prepare broadcasting
score = score[..., None]
def count(x):
return np.count_nonzero(x, -1)
# Despite using masked_array to omit nan values from processing,
# the CI tests on "Azure pipelines" (but not on the other CI servers)
# emits warnings when there are nan values, contrarily to the purpose
# of masked_arrays. As a fix, we simply suppress the warnings.
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in less")
sup.filter(RuntimeWarning,
"invalid value encountered in greater")
# Main computations/logic
if kind == 'rank':
left = count(a < score)
right = count(a <= score)
plus1 = left < right
perct = (left + right + plus1) * (50.0 / n)
elif kind == 'strict':
perct = count(a < score) * (100.0 / n)
elif kind == 'weak':
perct = count(a <= score) * (100.0 / n)
elif kind == 'mean':
left = count(a < score)
right = count(a <= score)
perct = (left + right) * (50.0 / n)
else:
raise ValueError(
"kind can only be 'rank', 'strict', 'weak' or 'mean'")
# Re-insert nan values
perct = ma.filled(perct, np.nan)
if perct.ndim == 0:
return perct[()]
return perct
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None,
printextras=False):
"""Create a histogram.
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit.
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*samples):
"""Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*samples`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
sample1, sample2, ... : array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
sLast = None
for sample in samples:
a = np.asarray(sample)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
sLast = a.shape
if sLast:
for arr in arrays[:-1]:
if sLast != arr.shape:
return np.array(arrays, dtype=object)
return np.array(arrays)
@_axis_nan_policy_factory(
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, too_small=1
)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""Compute standard error of the mean.
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> import numpy as np
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def _isconst(x):
"""
Check if all values in x are the same. nans are ignored.
x must be a 1d array.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([True])
else:
return (y[0] == y).all(keepdims=True)
def _quiet_nanmean(x):
"""
Compute nanmean for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.mean(y, keepdims=True)
def _quiet_nanstd(x, ddof=0):
"""
Compute nanstd for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.std(y, keepdims=True, ddof=ddof)
def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the z score.
Compute the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the z-scores computed for the non-nan values.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
See Also
--------
numpy.mean : Arithmetic average
numpy.std : Arithmetic standard deviation
scipy.stats.gzscore : Geometric standard score
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
References
----------
.. [1] "Standard score", *Wikipedia*,
https://en.wikipedia.org/wiki/Standard_score.
.. [2] Huck, S. W., Cross, T. L., Clark, S. B, "Overcoming misconceptions
about Z-scores", Teaching Statistics, vol. 8, pp. 38-40, 1986
Examples
--------
>>> import numpy as np
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
An example with `nan_policy='omit'`:
>>> x = np.array([[25.11, 30.10, np.nan, 32.02, 43.15],
... [14.95, 16.06, 121.25, 94.35, 29.81]])
>>> stats.zscore(x, axis=1, nan_policy='omit')
array([[-1.13490897, -0.37830299, nan, -0.08718406, 1.60039602],
[-0.91611681, -0.89090508, 1.4983032 , 0.88731639, -0.5785977 ]])
"""
return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy)
def gzscore(a, *, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the geometric standard score.
Compute the geometric z score of each strictly positive value in the
sample, relative to the geometric mean and standard deviation.
Mathematically the geometric z score can be evaluated as::
gzscore = log(a/gmu) / log(gsigma)
where ``gmu`` (resp. ``gsigma``) is the geometric mean (resp. standard
deviation).
Parameters
----------
a : array_like
Sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the geometric z scores computed for the non-nan values.
Returns
-------
gzscore : array_like
The geometric z scores, standardized by geometric mean and geometric
standard deviation of input array `a`.
See Also
--------
gmean : Geometric mean
gstd : Geometric standard deviation
zscore : Standard score
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses ``asanyarray`` instead of
``asarray`` for parameters).
.. versionadded:: 1.8
References
----------
.. [1] "Geometric standard score", *Wikipedia*,
https://en.wikipedia.org/wiki/Geometric_standard_deviation#Geometric_standard_score.
Examples
--------
Draw samples from a log-normal distribution:
>>> import numpy as np
>>> from scipy.stats import zscore, gzscore
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> mu, sigma = 3., 1. # mean and standard deviation
>>> x = rng.lognormal(mu, sigma, size=500)
Display the histogram of the samples:
>>> fig, ax = plt.subplots()
>>> ax.hist(x, 50)
>>> plt.show()
Display the histogram of the samples standardized by the classical zscore.
Distribution is rescaled but its shape is unchanged.
>>> fig, ax = plt.subplots()
>>> ax.hist(zscore(x), 50)
>>> plt.show()
Demonstrate that the distribution of geometric zscores is rescaled and
quasinormal:
>>> fig, ax = plt.subplots()
>>> ax.hist(gzscore(x), 50)
>>> plt.show()
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
return zscore(log(a), axis=axis, ddof=ddof, nan_policy=nan_policy)
def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle the occurrence of nans in `compare`.
'propagate' returns nan, 'raise' raises an exception, 'omit'
performs the calculations ignoring nan values. Default is
'propagate'. Note that when the value is 'omit', nans in `scores`
also propagate to the output, but they do not affect the z-scores
computed for the non-nan values.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
a = np.asanyarray(compare)
if a.size == 0:
return np.empty(a.shape)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if axis is None:
mn = _quiet_nanmean(a.ravel())
std = _quiet_nanstd(a.ravel(), ddof=ddof)
isconst = _isconst(a.ravel())
else:
mn = np.apply_along_axis(_quiet_nanmean, axis, a)
std = np.apply_along_axis(_quiet_nanstd, axis, a, ddof=ddof)
isconst = np.apply_along_axis(_isconst, axis, a)
else:
mn = a.mean(axis=axis, keepdims=True)
std = a.std(axis=axis, ddof=ddof, keepdims=True)
if axis is None:
isconst = (a.item(0) == a).all()
else:
isconst = (_first(a, axis) == a).all(axis=axis, keepdims=True)
# Set std deviations that are 0 to 1 to avoid division by 0.
std[isconst] = 1.0
z = (scores - mn) / std
# Set the outputs associated with a constant input to nan.
z[np.broadcast_to(isconst, z.shape)] = np.nan
return z
def gstd(a, axis=0, ddof=1):
"""
Calculate the geometric standard deviation of an array.
The geometric standard deviation describes the spread of a set of numbers
where the geometric mean is preferred. It is a multiplicative factor, and
so a dimensionless quantity.
It is defined as the exponent of the standard deviation of ``log(a)``.
Mathematically the population geometric standard deviation can be
evaluated as::
gstd = exp(std(log(a)))
.. versionadded:: 1.3.0
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int, tuple or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degree of freedom correction in the calculation of the
geometric standard deviation. Default is 1.
Returns
-------
gstd : ndarray or float
An array of the geometric standard deviation. If `axis` is None or `a`
is a 1d array a float is returned.
See Also
--------
gmean : Geometric mean
numpy.std : Standard deviation
gzscore : Geometric standard score
Notes
-----
As the calculation requires the use of logarithms the geometric standard
deviation only supports strictly positive values. Any non-positive or
infinite values will raise a `ValueError`.
The geometric standard deviation is sometimes confused with the exponent of
the standard deviation, ``exp(std(a))``. Instead the geometric standard
deviation is ``exp(std(log(a)))``.
The default value for `ddof` is different to the default value (0) used
by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
References
----------
.. [1] "Geometric standard deviation", *Wikipedia*,
https://en.wikipedia.org/wiki/Geometric_standard_deviation.
.. [2] Kirkwood, T. B., "Geometric means and measures of dispersion",
Biometrics, vol. 35, pp. 908-909, 1979
Examples
--------
Find the geometric standard deviation of a log-normally distributed sample.
Note that the standard deviation of the distribution is one, on a
log scale this evaluates to approximately ``exp(1)``.
>>> import numpy as np
>>> from scipy.stats import gstd
>>> rng = np.random.default_rng()
>>> sample = rng.lognormal(mean=0, sigma=1, size=1000)
>>> gstd(sample)
2.810010162475324
Compute the geometric standard deviation of a multidimensional array and
of a given axis.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> gstd(a, axis=None)
2.2944076136018947
>>> gstd(a, axis=2)
array([[1.82424757, 1.22436866, 1.13183117],
[1.09348306, 1.07244798, 1.05914985]])
>>> gstd(a, axis=(1,2))
array([2.12939215, 1.22120169])
The geometric standard deviation further handles masked arrays.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> ma = np.ma.masked_where(a > 16, a)
>>> ma
masked_array(
data=[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[--, --, --, --],
[--, --, --, --]]],
mask=[[[False, False, False, False],
[False, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[ True, True, True, True],
[ True, True, True, True]]],
fill_value=999999)
>>> gstd(ma, axis=2)
masked_array(
data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
[1.0934830582350938, --, --]],
mask=[[False, False, False],
[False, True, True]],
fill_value=999999)
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
try:
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
return np.exp(np.std(log(a), axis=axis, ddof=ddof))
except RuntimeWarning as w:
if np.isinf(a).any():
raise ValueError(
'Infinite value encountered. The geometric standard deviation '
'is defined for strictly positive values only.'
) from w
a_nan = np.isnan(a)
a_nan_any = a_nan.any()
# exclude NaN's from negativity check, but
# avoid expensive masking for arrays with no NaN
if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
(not a_nan_any and np.less_equal(a, 0).any())):
raise ValueError(
'Non positive value encountered. The geometric standard '
'deviation is defined for strictly positive values only.'
) from w
elif 'Degrees of freedom <= 0 for slice' == str(w):
raise ValueError(w) from w
else:
# Remaining warnings don't need to be exceptions.
return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
except TypeError as e:
raise ValueError(
'Invalid array input. The inputs could not be '
'safely coerced to any supported types') from e
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
@_axis_nan_policy_factory(
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1,
default_axis=None, override={'nan_propagation': False}
)
def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
interpolation='linear', keepdims=False):
r"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
``(25, 75)``. The order of the elements is not important.
scale : scalar or str or array_like of reals, optional
The numerical value of scale will be divided out of the final
result. The following string value is also recognized:
* 'normal' : Scale by
:math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
The default is 1.0.
Array-like `scale` of real dtype is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
interpolation : str, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points ``i`` and ``j``.
The following options are available (default is 'linear'):
* 'linear': ``i + (j - i)*fraction``, where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
* 'lower': ``i``.
* 'higher': ``j``.
* 'nearest': ``i`` or ``j`` whichever is nearest.
* 'midpoint': ``(i + j)/2``.
For NumPy >= 1.22.0, the additional options provided by the ``method``
keyword of `numpy.percentile` are also valid.
keepdims : bool, optional
If this is set to True, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
Examples
--------
>>> import numpy as np
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return _get_nan(x)
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError(f"{scale} not a valid scale for `iqr`")
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = np.nanpercentile
else:
percentile_func = np.percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
if np.isnan(rng).any():
raise ValueError("range must not contain NaNs")
rng = sorted(rng)
if NumpyVersion(np.__version__) >= '1.22.0':
pct = percentile_func(x, rng, axis=axis, method=interpolation,
keepdims=keepdims)
else:
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _mad_1d(x, center, nan_policy):
# Median absolute deviation for 1-d array x.
# This is a helper function for `median_abs_deviation`; it assumes its
# arguments have been validated already. In particular, x must be a
# 1-d numpy array, center must be callable, and if nan_policy is not
# 'propagate', it is assumed to be 'omit', because 'raise' is handled
# in `median_abs_deviation`.
# No warning is generated if x is empty or all nan.
isnan = np.isnan(x)
if isnan.any():
if nan_policy == 'propagate':
return np.nan
x = x[~isnan]
if x.size == 0:
# MAD of an empty array is nan.
return np.nan
# Edge cases have been handled, so do the basic MAD calculation.
med = center(x)
mad = np.median(np.abs(x - med))
return mad
def median_abs_deviation(x, axis=0, center=np.median, scale=1.0,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.5.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the
function signature ``func(arr, axis)``.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The default is 1.0. The string "normal" is also accepted,
and results in `scale` being the inverse of the standard normal
quantile function at 0.75, which is approximately 0.67449.
Array-like scale is also allowed, as long as it broadcasts correctly
to the output such that ``out / scale`` is a valid operation. The
output dimensions depend on the input array, `x`, and the `axis`
argument.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
The input array may contain `inf`, but if `center` returns `inf`, the
corresponding MAD for that data will be `nan`.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_abs_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> import numpy as np
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_abs_deviation(x)
0.82832610097857
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_abs_deviation(x)
0.8323442311590675
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_abs_deviation(x)
array([3.5, 2.5, 1.5])
>>> stats.median_abs_deviation(x, axis=None)
2.0
Scale normal example:
>>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456)
>>> stats.median_abs_deviation(x)
1.3487398527041636
>>> stats.median_abs_deviation(x, scale='normal')
1.9996446978061115
"""
if not callable(center):
raise TypeError("The argument 'center' must be callable. The given "
f"value {repr(center)} is not callable.")
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
if scale.lower() == 'normal':
scale = 0.6744897501960817 # special.ndtri(0.75)
else:
raise ValueError(f"{scale} is not a valid scale value.")
x = asarray(x)
# Consistent with `np.var` and `np.std`.
if not x.size:
if axis is None:
return np.nan
nan_shape = tuple(item for i, item in enumerate(x.shape) if i != axis)
if nan_shape == ():
# Return nan, not array(nan)
return np.nan
return np.full(nan_shape, np.nan)
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan:
if axis is None:
mad = _mad_1d(x.ravel(), center, nan_policy)
else:
mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy)
else:
if axis is None:
med = center(x, axis=None)
mad = np.median(np.abs(x - med))
else:
# Wrap the call to center() in expand_dims() so it acts like
# keepdims=True was used.
med = np.expand_dims(center(x, axis=axis), axis)
mad = np.median(np.abs(x - med), axis=axis)
return mad / scale
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""Perform iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""Slice off a proportion of items from both ends of an array.
Slice off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slice off less if proportion results in a non-integer slice index (i.e.
conservatively slices off `proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
Create an array of 10 values and trim 10% of those values from each end:
>>> import numpy as np
>>> from scipy import stats
>>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> stats.trimboth(a, 0.1)
array([1, 3, 2, 4, 5, 6, 7, 8])
Note that the elements of the input array are trimmed by value, but the
output array is not necessarily sorted.
The proportion to trim is rounded down to the nearest integer. For
instance, trimming 25% of the values from each end of an array of 10
values will return an array of 6 values:
>>> b = np.arange(10)
>>> stats.trimboth(b, 1/4).shape
(6,)
Multidimensional arrays can be trimmed along any axis or across the entire
array:
>>> c = [2, 4, 6, 8, 0, 1, 3, 5, 7, 9]
>>> d = np.array([a, b, c])
>>> stats.trimboth(d, 0.4, axis=0).shape
(1, 10)
>>> stats.trimboth(d, 0.4, axis=1).shape
(3, 2)
>>> stats.trimboth(d, 0.4, axis=None).shape
(6,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""Slice off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slice off less if proportion results in a non-integer slice index
(i.e. conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution.
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
Examples
--------
Create an array of 10 values and trim 20% of its lowest values:
>>> import numpy as np
>>> from scipy import stats
>>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> stats.trim1(a, 0.2, 'left')
array([2, 4, 3, 5, 6, 7, 8, 9])
Note that the elements of the input array are trimmed by value, but the
output array is not necessarily sorted.
The proportion to trim is rounded down to the nearest integer. For
instance, trimming 25% of the values from an array of 10 values will
return an array of 8 values:
>>> b = np.arange(10)
>>> stats.trim1(b, 1/4).shape
(8,)
Multidimensional arrays can be trimmed along any axis or across the entire
array:
>>> c = [2, 4, 6, 8, 0, 1, 3, 5, 7, 9]
>>> d = np.array([a, b, c])
>>> stats.trim1(d, 0.8, axis=0).shape
(1, 10)
>>> stats.trim1(d, 0.8, axis=1).shape
(3, 2)
>>> stats.trim1(d, 0.8, axis=None).shape
(6,)
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim_mean(a, proportiontocut, axis=0):
"""Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def _create_f_oneway_nan_result(shape, axis):
"""
This is a helper function for f_oneway for creating the return values
in certain degenerate conditions. It creates return values that are
all nan with the appropriate shape for the given `shape` and `axis`.
"""
axis = normalize_axis_index(axis, len(shape))
shp = shape[:axis] + shape[axis+1:]
if shp == ():
f = np.nan
prob = np.nan
else:
f = np.full(shp, fill_value=np.nan)
prob = f.copy()
return F_onewayResult(f, prob)
def _first(arr, axis):
"""Return arr[..., 0:1, ...] where 0:1 is in the `axis` position."""
return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis)
def f_oneway(*samples, axis=0):
"""Perform one-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two arguments. If the arrays are multidimensional, then all the
dimensions of the array must be the same except for `axis`.
axis : int, optional
Axis of the input arrays along which the test is applied.
Default is 0.
Returns
-------
statistic : float
The computed F statistic of the test.
pvalue : float
The associated p-value from the F distribution.
Warns
-----
`~scipy.stats.ConstantInputWarning`
Raised if all values within each of the input arrays are identical.
In this case the F statistic is either infinite or isn't defined,
so ``np.inf`` or ``np.nan`` is returned.
`~scipy.stats.DegenerateDataWarning`
Raised if the length of any input array is 0, or if all the input
arrays have length 1. ``np.nan`` is returned for the F statistic
and the p-value in these cases.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still
be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) or
the Alexander-Govern test (`scipy.stats.alexandergovern`) although with
some loss of power.
The length of each group must be at least one, and there must be at
least one group with length greater than one. If these conditions
are not satisfied, a warning is generated and (``np.nan``, ``np.nan``)
is returned.
If all values in each group are identical, and there exist at least two
groups with different values, the function generates a warning and
returns (``np.inf``, 0).
If all values in all groups are the same, function generates a warning
and returns (``np.nan``, ``np.nan``).
The algorithm is from Heiman [2]_, pp.394-7.
References
----------
.. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
Chapter 14, 2014, http://vassarstats.net/textbook/
.. [2] G.W. Heiman, "Understanding research methods and statistics: An
integrated introduction for psychology", Houghton, Mifflin and
Company, 2001.
.. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import numpy as np
>>> from scipy.stats import f_oneway
Here are some data [3]_ on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.121019471642447, pvalue=0.0002812242314534544)
`f_oneway` accepts multidimensional input arrays. When the inputs
are multidimensional and `axis` is not given, the test is performed
along the first axis of the input arrays. For the following data, the
test is performed three times, once for each column.
>>> a = np.array([[9.87, 9.03, 6.81],
... [7.18, 8.35, 7.00],
... [8.39, 7.58, 7.68],
... [7.45, 6.33, 9.35],
... [6.41, 7.10, 9.33],
... [8.00, 8.24, 8.44]])
>>> b = np.array([[6.35, 7.30, 7.16],
... [6.65, 6.68, 7.63],
... [5.72, 7.73, 6.72],
... [7.01, 9.19, 7.41],
... [7.75, 7.87, 8.30],
... [6.90, 7.97, 6.97]])
>>> c = np.array([[3.31, 8.77, 1.01],
... [8.25, 3.24, 3.62],
... [6.32, 8.81, 5.19],
... [7.48, 8.83, 8.91],
... [8.59, 6.01, 6.07],
... [3.07, 9.72, 7.48]])
>>> F, p = f_oneway(a, b, c)
>>> F
array([1.75676344, 0.03701228, 3.76439349])
>>> p
array([0.20630784, 0.96375203, 0.04733157])
"""
if len(samples) < 2:
raise TypeError('at least two inputs are required;'
f' got {len(samples)}.')
samples = [np.asarray(sample, dtype=float) for sample in samples]
# ANOVA on N groups, each in its own array
num_groups = len(samples)
# We haven't explicitly validated axis, but if it is bad, this call of
# np.concatenate will raise np.AxisError. The call will raise ValueError
# if the dimensions of all the arrays, except the axis dimension, are not
# the same.
alldata = np.concatenate(samples, axis=axis)
bign = alldata.shape[axis]
# Check this after forming alldata, so shape errors are detected
# and reported before checking for 0 length inputs.
if any(sample.shape[axis] == 0 for sample in samples):
warnings.warn(stats.DegenerateDataWarning('at least one input '
'has length 0'))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Must have at least one group with length greater than 1.
if all(sample.shape[axis] == 1 for sample in samples):
msg = ('all input arrays have length 1. f_oneway requires that at '
'least one input has length greater than 1.')
warnings.warn(stats.DegenerateDataWarning(msg))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Check if all values within each group are identical, and if the common
# value in at least one group is different from that in another group.
# Based on https://github.com/scipy/scipy/issues/11669
# If axis=0, say, and the groups have shape (n0, ...), (n1, ...), ...,
# then is_const is a boolean array with shape (num_groups, ...).
# It is True if the values within the groups along the axis slice are
# identical. In the typical case where each input array is 1-d, is_const is
# a 1-d array with length num_groups.
is_const = np.concatenate(
[(_first(sample, axis) == sample).all(axis=axis,
keepdims=True)
for sample in samples],
axis=axis
)
# all_const is a boolean array with shape (...) (see previous comment).
# It is True if the values within each group along the axis slice are
# the same (e.g. [[3, 3, 3], [5, 5, 5, 5], [4, 4, 4]]).
all_const = is_const.all(axis=axis)
if all_const.any():
msg = ("Each of the input arrays is constant;"
"the F statistic is not defined or infinite")
warnings.warn(stats.ConstantInputWarning(msg))
# all_same_const is True if all the values in the groups along the axis=0
# slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]).
all_same_const = (_first(alldata, axis) == alldata).all(axis=axis)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean(axis=axis, keepdims=True)
alldata -= offset
normalized_ss = _square_of_sums(alldata, axis=axis) / bign
sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss
ssbn = 0
for sample in samples:
ssbn += _square_of_sums(sample - offset,
axis=axis) / sample.shape[axis]
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= normalized_ss
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
with np.errstate(divide='ignore', invalid='ignore'):
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
# Fix any f values that should be inf or nan because the corresponding
# inputs were constant.
if np.isscalar(f):
if all_same_const:
f = np.nan
prob = np.nan
elif all_const:
f = np.inf
prob = 0.0
else:
f[all_const] = np.inf
prob[all_const] = 0.0
f[all_same_const] = np.nan
prob[all_same_const] = np.nan
return F_onewayResult(f, prob)
def alexandergovern(*samples, nan_policy='propagate'):
"""Performs the Alexander Govern test.
The Alexander-Govern approximation tests the equality of k independent
means in the face of heterogeneity of variance. The test is applied to
samples from two or more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two samples.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
res : AlexanderGovernResult
An object with attributes:
statistic : float
The computed A statistic of the test.
pvalue : float
The associated p-value from the chi-squared distribution.
Warns
-----
`~scipy.stats.ConstantInputWarning`
Raised if an input is a constant array. The statistic is not defined
in this case, so ``np.nan`` is returned.
See Also
--------
f_oneway : one-way ANOVA
Notes
-----
The use of this test relies on several assumptions.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. Unlike `f_oneway`, this test does not assume on homoscedasticity,
instead relaxing the assumption of equal variances.
Input samples must be finite, one dimensional, and with size greater than
one.
References
----------
.. [1] Alexander, Ralph A., and Diane M. Govern. "A New and Simpler
Approximation for ANOVA under Variance Heterogeneity." Journal
of Educational Statistics, vol. 19, no. 2, 1994, pp. 91-101.
JSTOR, www.jstor.org/stable/1165140. Accessed 12 Sept. 2020.
Examples
--------
>>> from scipy.stats import alexandergovern
Here are some data on annual percentage rate of interest charged on
new car loans at nine of the largest banks in four American cities
taken from the National Institute of Standards and Technology's
ANOVA dataset.
We use `alexandergovern` to test the null hypothesis that all cities
have the same mean APR against the alternative that the cities do not
all have the same mean APR. We decide that a significance level of 5%
is required to reject the null hypothesis in favor of the alternative.
>>> atlanta = [13.75, 13.75, 13.5, 13.5, 13.0, 13.0, 13.0, 12.75, 12.5]
>>> chicago = [14.25, 13.0, 12.75, 12.5, 12.5, 12.4, 12.3, 11.9, 11.9]
>>> houston = [14.0, 14.0, 13.51, 13.5, 13.5, 13.25, 13.0, 12.5, 12.5]
>>> memphis = [15.0, 14.0, 13.75, 13.59, 13.25, 12.97, 12.5, 12.25,
... 11.89]
>>> alexandergovern(atlanta, chicago, houston, memphis)
AlexanderGovernResult(statistic=4.65087071883494,
pvalue=0.19922132490385214)
The p-value is 0.1992, indicating a nearly 20% chance of observing
such an extreme value of the test statistic under the null hypothesis.
This exceeds 5%, so we do not reject the null hypothesis in favor of
the alternative.
"""
samples = _alexandergovern_input_validation(samples, nan_policy)
if np.any([(sample == sample[0]).all() for sample in samples]):
msg = "An input array is constant; the statistic is not defined."
warnings.warn(stats.ConstantInputWarning(msg))
return AlexanderGovernResult(np.nan, np.nan)
# The following formula numbers reference the equation described on
# page 92 by Alexander, Govern. Formulas 5, 6, and 7 describe other
# tests that serve as the basis for equation (8) but are not needed
# to perform the test.
# precalculate mean and length of each sample
lengths = np.array([ma.count(sample) if nan_policy == 'omit'
else len(sample) for sample in samples])
means = np.array([np.mean(sample) for sample in samples])
# (1) determine standard error of the mean for each sample
standard_errors = [np.std(sample, ddof=1) / np.sqrt(length)
for sample, length in zip(samples, lengths)]
# (2) define a weight for each sample
inv_sq_se = 1 / np.square(standard_errors)
weights = inv_sq_se / np.sum(inv_sq_se)
# (3) determine variance-weighted estimate of the common mean
var_w = np.sum(weights * means)
# (4) determine one-sample t statistic for each group
t_stats = (means - var_w)/standard_errors
# calculate parameters to be used in transformation
v = lengths - 1
a = v - .5
b = 48 * a**2
c = (a * np.log(1 + (t_stats ** 2)/v))**.5
# (8) perform a normalizing transformation on t statistic
z = (c + ((c**3 + 3*c)/b) -
((4*c**7 + 33*c**5 + 240*c**3 + 855*c) /
(b**2*10 + 8*b*c**4 + 1000*b)))
# (9) calculate statistic
A = np.sum(np.square(z))
# "[the p value is determined from] central chi-square random deviates
# with k - 1 degrees of freedom". Alexander, Govern (94)
p = distributions.chi2.sf(A, len(samples) - 1)
return AlexanderGovernResult(A, p)
def _alexandergovern_input_validation(samples, nan_policy):
if len(samples) < 2:
raise TypeError(f"2 or more inputs required, got {len(samples)}")
# input arrays are flattened
samples = [np.asarray(sample, dtype=float) for sample in samples]
for i, sample in enumerate(samples):
if np.size(sample) <= 1:
raise ValueError("Input sample size must be greater than one.")
if sample.ndim != 1:
raise ValueError("Input samples must be one-dimensional")
if np.isinf(sample).any():
raise ValueError("Input samples must be finite.")
contains_nan, nan_policy = _contains_nan(sample,
nan_policy=nan_policy)
if contains_nan and nan_policy == 'omit':
samples[i] = ma.masked_invalid(sample)
return samples
@dataclass
class AlexanderGovernResult:
statistic: float
pvalue: float
def _pearsonr_fisher_ci(r, n, confidence_level, alternative):
"""
Compute the confidence interval for Pearson's R.
Fisher's transformation is used to compute the confidence interval
(https://en.wikipedia.org/wiki/Fisher_transformation).
"""
if r == 1:
zr = np.inf
elif r == -1:
zr = -np.inf
else:
zr = np.arctanh(r)
if n > 3:
se = np.sqrt(1 / (n - 3))
if alternative == "two-sided":
h = special.ndtri(0.5 + confidence_level/2)
zlo = zr - h*se
zhi = zr + h*se
rlo = np.tanh(zlo)
rhi = np.tanh(zhi)
elif alternative == "less":
h = special.ndtri(confidence_level)
zhi = zr + h*se
rhi = np.tanh(zhi)
rlo = -1.0
else:
# alternative == "greater":
h = special.ndtri(confidence_level)
zlo = zr - h*se
rlo = np.tanh(zlo)
rhi = 1.0
else:
rlo, rhi = -1.0, 1.0
return ConfidenceInterval(low=rlo, high=rhi)
def _pearsonr_bootstrap_ci(confidence_level, method, x, y, alternative):
"""
Compute the confidence interval for Pearson's R using the bootstrap.
"""
def statistic(x, y):
statistic, _ = pearsonr(x, y)
return statistic
res = bootstrap((x, y), statistic, confidence_level=confidence_level,
paired=True, alternative=alternative, **method._asdict())
# for one-sided confidence intervals, bootstrap gives +/- inf on one side
res.confidence_interval = np.clip(res.confidence_interval, -1, 1)
return ConfidenceInterval(*res.confidence_interval)
ConfidenceInterval = namedtuple('ConfidenceInterval', ['low', 'high'])
PearsonRResultBase = _make_tuple_bunch('PearsonRResultBase',
['statistic', 'pvalue'], [])
class PearsonRResult(PearsonRResultBase):
"""
Result of `scipy.stats.pearsonr`
Attributes
----------
statistic : float
Pearson product-moment correlation coefficient.
pvalue : float
The p-value associated with the chosen alternative.
Methods
-------
confidence_interval
Computes the confidence interval of the correlation
coefficient `statistic` for the given confidence level.
"""
def __init__(self, statistic, pvalue, alternative, n, x, y):
super().__init__(statistic, pvalue)
self._alternative = alternative
self._n = n
self._x = x
self._y = y
# add alias for consistency with other correlation functions
self.correlation = statistic
def confidence_interval(self, confidence_level=0.95, method=None):
"""
The confidence interval for the correlation coefficient.
Compute the confidence interval for the correlation coefficient
``statistic`` with the given confidence level.
If `method` is not provided,
The confidence interval is computed using the Fisher transformation
F(r) = arctanh(r) [1]_. When the sample pairs are drawn from a
bivariate normal distribution, F(r) approximately follows a normal
distribution with standard error ``1/sqrt(n - 3)``, where ``n`` is the
length of the original samples along the calculation axis. When
``n <= 3``, this approximation does not yield a finite, real standard
error, so we define the confidence interval to be -1 to 1.
If `method` is an instance of `BootstrapMethod`, the confidence
interval is computed using `scipy.stats.bootstrap` with the provided
configuration options and other appropriate settings. In some cases,
confidence limits may be NaN due to a degenerate resample, and this is
typical for very small samples (~6 observations).
Parameters
----------
confidence_level : float
The confidence level for the calculation of the correlation
coefficient confidence interval. Default is 0.95.
method : BootstrapMethod, optional
Defines the method used to compute the confidence interval. See
method description for details.
.. versionadded:: 1.11.0
Returns
-------
ci : namedtuple
The confidence interval is returned in a ``namedtuple`` with
fields `low` and `high`.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
"""
if isinstance(method, BootstrapMethod):
ci = _pearsonr_bootstrap_ci(confidence_level, method,
self._x, self._y, self._alternative)
elif method is None:
ci = _pearsonr_fisher_ci(self.statistic, self._n, confidence_level,
self._alternative)
else:
message = ('`method` must be an instance of `BootstrapMethod` '
'or None.')
raise ValueError(message)
return ci
def pearsonr(x, y, *, alternative='two-sided', method=None):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
This function also performs a test of the null hypothesis that the
distributions underlying the samples are uncorrelated and normally
distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.)
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
alternative : {'two-sided', 'greater', 'less'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the correlation is nonzero
* 'less': the correlation is negative (less than zero)
* 'greater': the correlation is positive (greater than zero)
.. versionadded:: 1.9.0
method : ResamplingMethod, optional
Defines the method used to compute the p-value. If `method` is an
instance of `PermutationMethod`/`MonteCarloMethod`, the p-value is
computed using
`scipy.stats.permutation_test`/`scipy.stats.monte_carlo_test` with the
provided configuration options and other appropriate settings.
Otherwise, the p-value is computed as documented in the notes.
.. versionadded:: 1.11.0
Returns
-------
result : `~scipy.stats._result_classes.PearsonRResult`
An object with the following attributes:
statistic : float
Pearson product-moment correlation coefficient.
pvalue : float
The p-value associated with the chosen alternative.
The object has the following method:
confidence_interval(confidence_level, method)
This computes the confidence interval of the correlation
coefficient `statistic` for the given confidence level.
The confidence interval is returned in a ``namedtuple`` with
fields `low` and `high`. If `method` is not provided, the
confidence interval is computed using the Fisher transformation
[1]_. If `method` is an instance of `BootstrapMethod`, the
confidence interval is computed using `scipy.stats.bootstrap` with
the provided configuration options and other appropriate settings.
In some cases, confidence limits may be NaN due to a degenerate
resample, and this is typical for very small samples (~6
observations).
Warns
-----
`~scipy.stats.ConstantInputWarning`
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
`~scipy.stats.NearConstantInputWarning`
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector x and :math:`m_y` is
the mean of the vector y.
Under the assumption that x and y are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient r is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value when
the `method` parameter is left at its default value (None).
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The default p-value returned by `pearsonr` is a two-sided p-value. For a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
For backwards compatibility, the object that is returned also behaves
like a tuple of length two that holds the statistic and the p-value.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> x, y = [1, 2, 3, 4, 5, 6, 7], [10, 9, 2.5, 6, 4, 3, 2]
>>> res = stats.pearsonr(x, y)
>>> res
PearsonRResult(statistic=-0.828503883588428, pvalue=0.021280260007523286)
To perform an exact permutation version of the test:
>>> rng = np.random.default_rng(7796654889291491997)
>>> method = stats.PermutationMethod(n_resamples=np.inf, random_state=rng)
>>> stats.pearsonr(x, y, method=method)
PearsonRResult(statistic=-0.828503883588428, pvalue=0.028174603174603175)
To perform the test under the null hypothesis that the data were drawn from
*uniform* distributions:
>>> method = stats.MonteCarloMethod(rvs=(rng.uniform, rng.uniform))
>>> stats.pearsonr(x, y, method=method)
PearsonRResult(statistic=-0.828503883588428, pvalue=0.0188)
To produce an asymptotic 90% confidence interval:
>>> res.confidence_interval(confidence_level=0.9)
ConfidenceInterval(low=-0.9644331982722841, high=-0.3460237473272273)
And for a bootstrap confidence interval:
>>> method = stats.BootstrapMethod(method='BCa', random_state=rng)
>>> res.confidence_interval(confidence_level=0.9, method=method)
ConfidenceInterval(low=-0.9983163756488651, high=-0.22771001702132443) # may vary
There is a linear dependence between x and y if y = a + b*x + e, where
a,b are constants and e is a random error term, assumed to be independent
of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
e follow a normal distribution with mean zero and standard deviation s>0.
>>> rng = np.random.default_rng()
>>> s = 0.5
>>> x = stats.norm.rvs(size=500, random_state=rng)
>>> e = stats.norm.rvs(scale=s, size=500, random_state=rng)
>>> y = x + e
>>> stats.pearsonr(x, y).statistic
0.9001942438244763
This should be close to the exact value given by
>>> 1/np.sqrt(1 + s**2)
0.8944271909999159
For s=0.5, we observe a high level of correlation. In general, a large
variance of the noise reduces the correlation, while the correlation
approaches one as the variance of the error goes to zero.
It is important to keep in mind that no correlation does not imply
independence unless (x, y) is jointly normal. Correlation can even be zero
when there is a very simple dependence structure: if X follows a
standard normal distribution, let y = abs(x). Note that the correlation
between x and y is zero. Indeed, since the expectation of x is zero,
cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
by symmetry. The following lines of code illustrate this observation:
>>> y = np.abs(x)
>>> stats.pearsonr(x, y)
PearsonRResult(statistic=-0.05444919272687482, pvalue=0.22422294836207743)
A non-zero correlation coefficient can be misleading. For example, if X has
a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
implying a high level of correlation:
>>> y = np.where(x < 0, x, 0)
>>> stats.pearsonr(x, y)
PearsonRResult(statistic=0.861985781588, pvalue=4.813432002751103e-149)
This is unintuitive since there is no dependence of x and y if x is larger
than zero which happens in about half of the cases if we sample x and y.
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
if (np.issubdtype(x.dtype, np.complexfloating)
or np.issubdtype(y.dtype, np.complexfloating)):
raise ValueError('This function does not support complex data')
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
warnings.warn(stats.ConstantInputWarning(msg))
result = PearsonRResult(statistic=np.nan, pvalue=np.nan, n=n,
alternative=alternative, x=x, y=y)
return result
if isinstance(method, PermutationMethod):
def statistic(y):
statistic, _ = pearsonr(x, y, alternative=alternative)
return statistic
res = permutation_test((y,), statistic, permutation_type='pairings',
alternative=alternative, **method._asdict())
return PearsonRResult(statistic=res.statistic, pvalue=res.pvalue, n=n,
alternative=alternative, x=x, y=y)
elif isinstance(method, MonteCarloMethod):
def statistic(x, y):
statistic, _ = pearsonr(x, y, alternative=alternative)
return statistic
if method.rvs is None:
rng = np.random.default_rng()
method.rvs = rng.normal, rng.normal
res = monte_carlo_test((x, y,), statistic=statistic,
alternative=alternative, **method._asdict())
return PearsonRResult(statistic=res.statistic, pvalue=res.pvalue, n=n,
alternative=alternative, x=x, y=y)
elif method is not None:
message = ('`method` must be an instance of `PermutationMethod`,'
'`MonteCarloMethod`, or None.')
raise ValueError(message)
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
r = dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0]))
result = PearsonRResult(statistic=r, pvalue=1.0, n=n,
alternative=alternative, x=x, y=y)
return result
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
msg = ("An input array is nearly constant; the computed "
"correlation coefficient may be inaccurate.")
warnings.warn(stats.NearConstantInputWarning(msg))
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the distribution of `r` under the null
# hypothesis is the beta distribution on (-1, 1) with a = b = n/2 - 1.
ab = n/2 - 1
dist = stats.beta(ab, ab, loc=-1, scale=2)
if alternative == 'two-sided':
prob = 2*dist.sf(abs(r))
elif alternative == 'less':
prob = dist.cdf(r)
elif alternative == 'greater':
prob = dist.sf(r)
else:
raise ValueError('alternative must be one of '
'["two-sided", "less", "greater"]')
return PearsonRResult(statistic=r, pvalue=prob, n=n,
alternative=alternative, x=x, y=y)
def fisher_exact(table, alternative='two-sided'):
"""Perform a Fisher exact test on a 2x2 contingency table.
The null hypothesis is that the true odds ratio of the populations
underlying the observations is one, and the observations were sampled
from these populations under a condition: the marginals of the
resulting table must equal those of the observed table. The statistic
returned is the unconditional maximum likelihood estimate of the odds
ratio, and the p-value is the probability under the null hypothesis of
obtaining a table at least as extreme as the one that was actually
observed. There are other possible choices of statistic and two-sided
p-value definition associated with Fisher's exact test; please see the
Notes for more information.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements must be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the odds ratio of the underlying population is not one
* 'less': the odds ratio of the underlying population is less than one
* 'greater': the odds ratio of the underlying population is greater
than one
See the Notes for more details.
Returns
-------
res : SignificanceResult
An object containing attributes:
statistic : float
This is the prior odds ratio, not a posterior estimate.
pvalue : float
The probability under the null hypothesis of obtaining a
table at least as extreme as the one that was actually observed.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table. This can be used as an alternative to
`fisher_exact` when the numbers in the table are large.
contingency.odds_ratio : Compute the odds ratio (sample or conditional
MLE) for a 2x2 contingency table.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
boschloo_exact : Boschloo's exact test, which is a more powerful
alternative than Fisher's exact test for 2x2 contingency tables.
Notes
-----
*Null hypothesis and p-values*
The null hypothesis is that the true odds ratio of the populations
underlying the observations is one, and the observations were sampled at
random from these populations under a condition: the marginals of the
resulting table must equal those of the observed table. Equivalently,
the null hypothesis is that the input table is from the hypergeometric
distribution with parameters (as used in `hypergeom`)
``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the
input table is ``[[a, b], [c, d]]``. This distribution has support
``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values
in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x``
can be interpreted as the upper-left element of a 2x2 table, so the
tables in the distribution have form::
[ x n - x ]
[N - x M - (n + N) + x]
For example, if::
table = [6 2]
[1 4]
then the support is ``2 <= x <= 7``, and the tables in the distribution
are::
[2 6] [3 5] [4 4] [5 3] [6 2] [7 1]
[5 0] [4 1] [3 2] [2 3] [1 4] [0 5]
The probability of each table is given by the hypergeometric distribution
``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to
three significant digits)::
x 2 3 4 5 6 7
p 0.0163 0.163 0.408 0.326 0.0816 0.00466
These can be computed with::
>>> import numpy as np
>>> from scipy.stats import hypergeom
>>> table = np.array([[6, 2], [1, 4]])
>>> M = table.sum()
>>> n = table[0].sum()
>>> N = table[:, 0].sum()
>>> start, end = hypergeom.support(M, n, N)
>>> hypergeom.pmf(np.arange(start, end+1), M, n, N)
array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508,
0.004662 ])
The two-sided p-value is the probability that, under the null hypothesis,
a random table would have a probability equal to or less than the
probability of the input table. For our example, the probability of
the input table (where ``x = 6``) is 0.0816. The x values where the
probability does not exceed this are 2, 6 and 7, so the two-sided p-value
is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``::
>>> from scipy.stats import fisher_exact
>>> res = fisher_exact(table, alternative='two-sided')
>>> res.pvalue
0.10256410256410257
The one-sided p-value for ``alternative='greater'`` is the probability
that a random table has ``x >= a``, which in our example is ``x >= 6``,
or ``0.0816 + 0.00466 ~= 0.08626``::
>>> res = fisher_exact(table, alternative='greater')
>>> res.pvalue
0.08624708624708627
This is equivalent to computing the survival function of the
distribution at ``x = 5`` (one less than ``x`` from the input table,
because we want to include the probability of ``x = 6`` in the sum)::
>>> hypergeom.sf(5, M, n, N)
0.08624708624708627
For ``alternative='less'``, the one-sided p-value is the probability
that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example),
or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``::
>>> res = fisher_exact(table, alternative='less')
>>> res.pvalue
0.9953379953379957
This is equivalent to computing the cumulative distribution function
of the distribution at ``x = 6``:
>>> hypergeom.cdf(6, M, n, N)
0.9953379953379957
*Odds ratio*
The calculated odds ratio is different from the value computed by the
R function ``fisher.test``. This implementation returns the "sample"
or "unconditional" maximum likelihood estimate, while ``fisher.test``
in R uses the conditional maximum likelihood estimate. To compute the
conditional maximum likelihood estimate of the odds ratio, use
`scipy.stats.contingency.odds_ratio`.
References
----------
.. [1] Fisher, Sir Ronald A, "The Design of Experiments:
Mathematics of a Lady Tasting Tea." ISBN 978-0-486-41151-4, 1935.
.. [2] "Fisher's exact test",
https://en.wikipedia.org/wiki/Fisher's_exact_test
.. [3] Emma V. Low et al. "Identifying the lowest effective dose of
acetazolamide for the prophylaxis of acute mountain sickness:
systematic review and meta-analysis."
BMJ, 345, :doi:`10.1136/bmj.e6779`, 2012.
Examples
--------
In [3]_, the effective dose of acetazolamide for the prophylaxis of acute
mountain sickness was investigated. The study notably concluded:
Acetazolamide 250 mg, 500 mg, and 750 mg daily were all efficacious for
preventing acute mountain sickness. Acetazolamide 250 mg was the lowest
effective dose with available evidence for this indication.
The following table summarizes the results of the experiment in which
some participants took a daily dose of acetazolamide 250 mg while others
took a placebo.
Cases of acute mountain sickness were recorded::
Acetazolamide Control/Placebo
Acute mountain sickness 7 17
No 15 5
Is there evidence that the acetazolamide 250 mg reduces the risk of
acute mountain sickness?
We begin by formulating a null hypothesis :math:`H_0`:
The odds of experiencing acute mountain sickness are the same with
the acetazolamide treatment as they are with placebo.
Let's assess the plausibility of this hypothesis with
Fisher's test.
>>> from scipy.stats import fisher_exact
>>> res = fisher_exact([[7, 17], [15, 5]], alternative='less')
>>> res.statistic
0.13725490196078433
>>> res.pvalue
0.0028841933752349743
Using a significance level of 5%, we would reject the null hypothesis in
favor of the alternative hypothesis: "The odds of experiencing acute
mountain sickness with acetazolamide treatment are less than the odds of
experiencing acute mountain sickness with placebo."
.. note::
Because the null distribution of Fisher's exact test is formed under
the assumption that both row and column sums are fixed, the result of
the test are conservative when applied to an experiment in which the
row sums are not fixed.
In this case, the column sums are fixed; there are 22 subjects in each
group. But the number of cases of acute mountain sickness is not
(and cannot be) fixed before conducting the experiment. It is a
consequence.
Boschloo's test does not depend on the assumption that the row sums
are fixed, and consequently, it provides a more powerful test in this
situation.
>>> from scipy.stats import boschloo_exact
>>> res = boschloo_exact([[7, 17], [15, 5]], alternative='less')
>>> res.statistic
0.0028841933752349743
>>> res.pvalue
0.0015141406667567101
We verify that the p-value is less than with `fisher_exact`.
"""
hypergeom = distributions.hypergeom
# int32 is not enough for the algorithm
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return SignificanceResult(np.nan, 1.0)
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def pmf(x):
return hypergeom.pmf(x, n1 + n2, n1, n)
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1e-14
gamma = 1 + epsilon
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= epsilon:
return SignificanceResult(oddsratio, 1.)
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact * gamma:
return SignificanceResult(oddsratio, plower)
guess = _binary_search(lambda x: -pmf(x), -pexact * gamma, mode, n)
pvalue = plower + hypergeom.sf(guess, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact * gamma:
return SignificanceResult(oddsratio, pupper)
guess = _binary_search(pmf, pexact * gamma, 0, mode)
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return SignificanceResult(oddsratio, pvalue)
def spearmanr(a, b=None, axis=0, nan_policy='propagate',
alternative='two-sided'):
r"""Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets.
Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. Although calculation of the
p-value does not make strong assumptions about the distributions underlying
the samples, it is only accurate for very large samples (>500
observations). For smaller sample sizes, consider a permutation test (see
Examples section below).
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the correlation is nonzero
* 'less': the correlation is negative (less than zero)
* 'greater': the correlation is positive (greater than zero)
.. versionadded:: 1.7.0
Returns
-------
res : SignificanceResult
An object containing attributes:
statistic : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters). Correlation matrix is square
with length equal to total number of variables (columns or rows) in
``a`` and ``b`` combined.
pvalue : float
The p-value for a hypothesis test whose null hypothesis
is that two samples have no ordinal correlation. See
`alternative` above for alternative hypotheses. `pvalue` has the
same shape as `statistic`.
Warns
-----
`~scipy.stats.ConstantInputWarning`
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
.. [2] Kendall, M. G. and Stuart, A. (1973).
The Advanced Theory of Statistics, Volume 2: Inference and Relationship.
Griffin. 1973.
Section 31.18
.. [3] Kershenobich, D., Fierro, F. J., & Rojkind, M. (1970). The
relationship between the free pool of proline and collagen content in
human liver cirrhosis. The Journal of Clinical Investigation, 49(12),
2246-2249.
.. [4] Hollander, M., Wolfe, D. A., & Chicken, E. (2013). Nonparametric
statistical methods. John Wiley & Sons.
.. [5] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."
Statistical Applications in Genetics and Molecular Biology 9.1 (2010).
.. [6] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are
superior to t and F tests in biomedical research. The American
Statistician, 52(2), 127-132.
Examples
--------
Consider the following data from [3]_, which studied the relationship
between free proline (an amino acid) and total collagen (a protein often
found in connective tissue) in unhealthy human livers.
The ``x`` and ``y`` arrays below record measurements of the two compounds.
The observations are paired: each free proline measurement was taken from
the same liver as the total collagen measurement at the same index.
>>> import numpy as np
>>> # total collagen (mg/g dry weight of liver)
>>> x = np.array([7.1, 7.1, 7.2, 8.3, 9.4, 10.5, 11.4])
>>> # free proline (μ mole/g dry weight of liver)
>>> y = np.array([2.8, 2.9, 2.8, 2.6, 3.5, 4.6, 5.0])
These data were analyzed in [4]_ using Spearman's correlation coefficient,
a statistic sensitive to monotonic correlation between the samples.
>>> from scipy import stats
>>> res = stats.spearmanr(x, y)
>>> res.statistic
0.7000000000000001
The value of this statistic tends to be high (close to 1) for samples with
a strongly positive ordinal correlation, low (close to -1) for samples with
a strongly negative ordinal correlation, and small in magnitude (close to
zero) for samples with weak ordinal correlation.
The test is performed by comparing the observed value of the
statistic against the null distribution: the distribution of statistic
values derived under the null hypothesis that total collagen and free
proline measurements are independent.
For this test, the statistic can be transformed such that the null
distribution for large samples is Student's t distribution with
``len(x) - 2`` degrees of freedom.
>>> import matplotlib.pyplot as plt
>>> dof = len(x)-2 # len(x) == len(y)
>>> dist = stats.t(df=dof)
>>> t_vals = np.linspace(-5, 5, 100)
>>> pdf = dist.pdf(t_vals)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> def plot(ax): # we'll re-use this
... ax.plot(t_vals, pdf)
... ax.set_title("Spearman's Rho Test Null Distribution")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
>>> plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution as extreme or more extreme than the observed
value of the statistic. In a two-sided test in which the statistic is
positive, elements of the null distribution greater than the transformed
statistic and elements of the null distribution less than the negative of
the observed statistic are both considered "more extreme".
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> rs = res.statistic # original statistic
>>> transformed = rs * np.sqrt(dof / ((rs+1.0)*(1.0-rs)))
>>> pvalue = dist.cdf(-transformed) + dist.sf(transformed)
>>> annotation = (f'p-value={pvalue:.4f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (2.7, 0.025), (3, 0.03), arrowprops=props)
>>> i = t_vals >= transformed
>>> ax.fill_between(t_vals[i], y1=0, y2=pdf[i], color='C0')
>>> i = t_vals <= -transformed
>>> ax.fill_between(t_vals[i], y1=0, y2=pdf[i], color='C0')
>>> ax.set_xlim(-5, 5)
>>> ax.set_ylim(0, 0.1)
>>> plt.show()
>>> res.pvalue
0.07991669030889909 # two-sided p-value
If the p-value is "small" - that is, if there is a low probability of
sampling data from independent distributions that produces such an extreme
value of the statistic - this may be taken as evidence against the null
hypothesis in favor of the alternative: the distribution of total collagen
and free proline are *not* independent. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence for the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [5]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
- Small p-values are not evidence for a *large* effect; rather, they can
only provide evidence for a "significant" effect, meaning that they are
unlikely to have occurred under the null hypothesis.
Suppose that before performing the experiment, the authors had reason
to predict a positive correlation between the total collagen and free
proline measurements, and that they had chosen to assess the plausibility
of the null hypothesis against a one-sided alternative: free proline has a
positive ordinal correlation with total collagen. In this case, only those
values in the null distribution that are as great or greater than the
observed statistic are considered to be more extreme.
>>> res = stats.spearmanr(x, y, alternative='greater')
>>> res.statistic
0.7000000000000001 # same statistic
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> pvalue = dist.sf(transformed)
>>> annotation = (f'p-value={pvalue:.6f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (3, 0.018), (3.5, 0.03), arrowprops=props)
>>> i = t_vals >= transformed
>>> ax.fill_between(t_vals[i], y1=0, y2=pdf[i], color='C0')
>>> ax.set_xlim(1, 5)
>>> ax.set_ylim(0, 0.1)
>>> plt.show()
>>> res.pvalue
0.03995834515444954 # one-sided p-value; half of the two-sided p-value
Note that the t-distribution provides an asymptotic approximation of the
null distribution; it is only accurate for samples with many observations.
For small samples, it may be more appropriate to perform a permutation
test: Under the null hypothesis that total collagen and free proline are
independent, each of the free proline measurements were equally likely to
have been observed with any of the total collagen measurements. Therefore,
we can form an *exact* null distribution by calculating the statistic under
each possible pairing of elements between ``x`` and ``y``.
>>> def statistic(x): # explore all possible pairings by permuting `x`
... rs = stats.spearmanr(x, y).statistic # ignore pvalue
... transformed = rs * np.sqrt(dof / ((rs+1.0)*(1.0-rs)))
... return transformed
>>> ref = stats.permutation_test((x,), statistic, alternative='greater',
... permutation_type='pairings')
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> ax.hist(ref.null_distribution, np.linspace(-5, 5, 26),
... density=True)
>>> ax.legend(['aymptotic approximation\n(many observations)',
... f'exact \n({len(ref.null_distribution)} permutations)'])
>>> plt.show()
>>> ref.pvalue
0.04563492063492063 # exact one-sided p-value
"""
if axis is not None and axis > 1:
raise ValueError("spearmanr only handles 1-D or 2-D arrays, "
"supplied axis argument {}, please use only "
"values 0, 1 or None for axis".format(axis))
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 "
"variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
res = SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
warn_msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
if axisout == 0:
if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(stats.ConstantInputWarning(warn_msg))
res = SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
else: # case when axisout == 1 b/c a is 2 dim only
if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(stats.ConstantInputWarning(warn_msg))
res = SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy,
alternative=alternative)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
res = SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).any(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
with np.errstate(divide='ignore'):
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
t, prob = _ttest_finish(dof, t, alternative)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
res = SignificanceResult(rs[1, 0], prob[1, 0])
res.correlation = rs[1, 0]
return res
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
res = SignificanceResult(rs, prob)
res.correlation = rs
return res
def pointbiserialr(x, y):
r"""Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function may be computed using a shortcut formula but produces the
same result as `pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
res: SignificanceResult
An object containing attributes:
statistic : float
The R value.
pvalue : float
The two-sided p-value.
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr`.
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_1} - \overline{Y_0}}
{s_y}
\sqrt{\frac{N_0 N_1}
{N (N - 1)}}
Where :math:`\overline{Y_{0}}` and :math:`\overline{Y_{1}}` are means
of the metric observations coded 0 and 1 respectively; :math:`N_{0}` and
:math:`N_{1}` are number of observations coded 0 and 1 respectively;
:math:`N` is the total number of observations and :math:`s_{y}` is the
standard deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
:doi:`10.1002/9781118445112.stat06227`
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
# create result object with alias for backward compatibility
res = SignificanceResult(rpb, prob)
res.correlation = rpb
return res
def kendalltau(x, y, initial_lexsort=_NoValue, nan_policy='propagate',
method='auto', variant='b', alternative='two-sided'):
r"""Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, and values close to -1
indicate strong disagreement. This implements two variants of Kendall's
tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These
differ only in how they are normalized to lie within the range -1 to 1;
the hypothesis tests (their p-values) are identical. Kendall's original
tau-a is not implemented separately because both tau-b and tau-c reduce
to tau-a in the absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they
will be flattened to 1-D.
initial_lexsort : bool, optional, deprecated
This argument is unused.
.. deprecated:: 1.10.0
`kendalltau` keyword argument `initial_lexsort` is deprecated as it
is unused and will be removed in SciPy 1.14.0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off
between speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present. As the sample size increases, the 'exact' computation
time may grow and the result may lose some precision.
variant : {'b', 'c'}, optional
Defines which variant of Kendall's tau is returned. Default is 'b'.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the rank correlation is nonzero
* 'less': the rank correlation is negative (less than zero)
* 'greater': the rank correlation is positive (greater than zero)
Returns
-------
res : SignificanceResult
An object containing attributes:
statistic : float
The tau statistic.
pvalue : float
The p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
tau_c = 2 (P - Q) / (n**2 * (m - 1) / m)
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U. n is the total number of samples, and m is the
number of unique values in either `x` or `y`, whichever is smaller.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
.. [6] Kershenobich, D., Fierro, F. J., & Rojkind, M. (1970). The
relationship between the free pool of proline and collagen content
in human liver cirrhosis. The Journal of Clinical Investigation,
49(12), 2246-2249.
.. [7] Hollander, M., Wolfe, D. A., & Chicken, E. (2013). Nonparametric
statistical methods. John Wiley & Sons.
.. [8] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly
Drawn." Statistical Applications in Genetics and Molecular Biology
9.1 (2010).
Examples
--------
Consider the following data from [6]_, which studied the relationship
between free proline (an amino acid) and total collagen (a protein often
found in connective tissue) in unhealthy human livers.
The ``x`` and ``y`` arrays below record measurements of the two compounds.
The observations are paired: each free proline measurement was taken from
the same liver as the total collagen measurement at the same index.
>>> import numpy as np
>>> # total collagen (mg/g dry weight of liver)
>>> x = np.array([7.1, 7.1, 7.2, 8.3, 9.4, 10.5, 11.4])
>>> # free proline (μ mole/g dry weight of liver)
>>> y = np.array([2.8, 2.9, 2.8, 2.6, 3.5, 4.6, 5.0])
These data were analyzed in [7]_ using Spearman's correlation coefficient,
a statistic similar to to Kendall's tau in that it is also sensitive to
ordinal correlation between the samples. Let's perform an analagous study
using Kendall's tau.
>>> from scipy import stats
>>> res = stats.kendalltau(x, y)
>>> res.statistic
0.5499999999999999
The value of this statistic tends to be high (close to 1) for samples with
a strongly positive ordinal correlation, low (close to -1) for samples with
a strongly negative ordinal correlation, and small in magnitude (close to
zero) for samples with weak ordinal correlation.
The test is performed by comparing the observed value of the
statistic against the null distribution: the distribution of statistic
values derived under the null hypothesis that total collagen and free
proline measurements are independent.
For this test, the null distribution for large samples without ties is
approximated as the normal distribution with variance
``(2*(2*n + 5))/(9*n*(n - 1))``, where ``n = len(x)``.
>>> import matplotlib.pyplot as plt
>>> n = len(x) # len(x) == len(y)
>>> var = (2*(2*n + 5))/(9*n*(n - 1))
>>> dist = stats.norm(scale=np.sqrt(var))
>>> z_vals = np.linspace(-1.25, 1.25, 100)
>>> pdf = dist.pdf(z_vals)
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> def plot(ax): # we'll re-use this
... ax.plot(z_vals, pdf)
... ax.set_title("Kendall Tau Test Null Distribution")
... ax.set_xlabel("statistic")
... ax.set_ylabel("probability density")
>>> plot(ax)
>>> plt.show()
The comparison is quantified by the p-value: the proportion of values in
the null distribution as extreme or more extreme than the observed
value of the statistic. In a two-sided test in which the statistic is
positive, elements of the null distribution greater than the transformed
statistic and elements of the null distribution less than the negative of
the observed statistic are both considered "more extreme".
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> pvalue = dist.cdf(-res.statistic) + dist.sf(res.statistic)
>>> annotation = (f'p-value={pvalue:.4f}\n(shaded area)')
>>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8)
>>> _ = ax.annotate(annotation, (0.65, 0.15), (0.8, 0.3), arrowprops=props)
>>> i = z_vals >= res.statistic
>>> ax.fill_between(z_vals[i], y1=0, y2=pdf[i], color='C0')
>>> i = z_vals <= -res.statistic
>>> ax.fill_between(z_vals[i], y1=0, y2=pdf[i], color='C0')
>>> ax.set_xlim(-1.25, 1.25)
>>> ax.set_ylim(0, 0.5)
>>> plt.show()
>>> res.pvalue
0.09108705741631495 # approximate p-value
Note that there is slight disagreement between the shaded area of the curve
and the p-value returned by `kendalltau`. This is because our data has
ties, and we have neglected a tie correction to the null distribution
variance that `kendalltau` performs. For samples without ties, the shaded
areas of our plot and p-value returned by `kendalltau` would match exactly.
If the p-value is "small" - that is, if there is a low probability of
sampling data from independent distributions that produces such an extreme
value of the statistic - this may be taken as evidence against the null
hypothesis in favor of the alternative: the distribution of total collagen
and free proline are *not* independent. Note that:
- The inverse is not true; that is, the test is not used to provide
evidence for the null hypothesis.
- The threshold for values that will be considered "small" is a choice that
should be made before the data is analyzed [8]_ with consideration of the
risks of both false positives (incorrectly rejecting the null hypothesis)
and false negatives (failure to reject a false null hypothesis).
- Small p-values are not evidence for a *large* effect; rather, they can
only provide evidence for a "significant" effect, meaning that they are
unlikely to have occurred under the null hypothesis.
For samples without ties of moderate size, `kendalltau` can compute the
p-value exactly. However, in the presence of ties, `kendalltau` resorts
to an asymptotic approximation. Nonetheles, we can use a permutation test
to compute the null distribution exactly: Under the null hypothesis that
total collagen and free proline are independent, each of the free proline
measurements were equally likely to have been observed with any of the
total collagen measurements. Therefore, we can form an *exact* null
distribution by calculating the statistic under each possible pairing of
elements between ``x`` and ``y``.
>>> def statistic(x): # explore all possible pairings by permuting `x`
... return stats.kendalltau(x, y).statistic # ignore pvalue
>>> ref = stats.permutation_test((x,), statistic,
... permutation_type='pairings')
>>> fig, ax = plt.subplots(figsize=(8, 5))
>>> plot(ax)
>>> bins = np.linspace(-1.25, 1.25, 25)
>>> ax.hist(ref.null_distribution, bins=bins, density=True)
>>> ax.legend(['aymptotic approximation\n(many observations)',
... 'exact null distribution'])
>>> plot(ax)
>>> plt.show()
>>> ref.pvalue
0.12222222222222222 # exact p-value
Note that there is significant disagreement between the exact p-value
calculated here and the approximation returned by `kendalltau` above. For
small samples with ties, consider performing a permutation test for more
accurate results.
"""
if initial_lexsort is not _NoValue:
msg = ("'kendalltau' keyword argument 'initial_lexsort' is deprecated"
" as it is unused and will be removed in SciPy 1.12.0.")
warnings.warn(msg, DeprecationWarning, stacklevel=2)
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same "
f"size, found x-size {x.size} and y-size {y.size}")
elif not x.size or not y.size:
# Return NaN if arrays are empty
res = SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
res = SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
if variant == 'b':
return mstats_basic.kendalltau(x, y, method=method, use_ties=True,
alternative=alternative)
else:
message = ("nan_policy='omit' is currently compatible only with "
"variant='b'.")
raise ValueError(message)
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
# Python ints to avoid overflow down the line
return (int((cnt * (cnt - 1) // 2).sum()),
int((cnt * (cnt - 1.) * (cnt - 2)).sum()),
int((cnt * (cnt - 1.) * (2*cnt + 5)).sum()))
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = int((cnt * (cnt - 1) // 2).sum()) # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
res = SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
if variant == 'b':
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
elif variant == 'c':
minclasses = min(len(set(x)), len(set(y)))
tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses)
else:
raise ValueError(f"Unknown variant of the method chosen: {variant}. "
"variant must be 'b' or 'c'.")
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# The p-value calculation is the same for all variants since the p-value
# depends only on con_minus_dis.
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or
min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
pvalue = mstats_basic._kendall_p_exact(size, tot-dis, alternative)
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
m = size * (size - 1.)
var = ((m * (2*size + 5) - x1 - y1) / 18 +
(2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2)))
z = con_minus_dis / np.sqrt(var)
_, pvalue = _normtest_finish(z, alternative)
else:
raise ValueError(f"Unknown method {method} specified. Use 'auto', "
"'exact' or 'asymptotic'.")
# create result object with alias for backward compatibility
res = SignificanceResult(tau, pvalue)
res.correlation = tau
return res
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element (higher importance ranks being
associated with smaller values, e.g., 0 is the highest possible rank),
and a weigher function, which assigns a weight based on the rank to
each element. The weight of an exchange is then the sum or the product
of the weights of the ranks of the exchanged elements. The default
parameters compute :math:`\tau_\mathrm h`: an exchange between
elements with rank :math:`r` and :math:`s` (starting from zero) has
weight :math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters. Note that the convention used
here for ranking (lower values imply higher importance) is opposite
to that used by other SciPy statistical functions.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank : array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
res: SignificanceResult
An object containing attributes:
statistic : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null distribution of the statistic is
unknown (even in the additive hyperbolic case).
See Also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> res = stats.weightedtau(x, y)
>>> res.statistic
-0.56694968153682723
>>> res.pvalue
nan
>>> res = stats.weightedtau(x, y, additive=False)
>>> res.statistic
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> res = stats.weightedtau(x, y)
>>> res.statistic
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> res = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> res.statistic
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
SignificanceResult(statistic=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
SignificanceResult(statistic=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be "
"of the same size, "
"found x-size {} and y-size {}".format(x.size, y.size))
if not x.size:
# Return NaN if arrays are empty
res = SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(y)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
tau = (
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2
res = SignificanceResult(tau, np.nan)
res.correlation = tau
return res
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError(
"All inputs to `weightedtau` must be of the same size, "
"found x-size {} and rank-size {}".format(x.size, rank.size)
)
tau = _weightedrankedtau(x, y, rank, weigher, additive)
res = SignificanceResult(tau, np.nan)
res.correlation = tau
return res
# FROM MGCPY: https://github.com/neurodata/mgcpy
class _ParallelP:
"""Helper function to calculate parallel p-value."""
def __init__(self, x, y, random_states):
self.x = x
self.y = y
self.random_states = random_states
def __call__(self, index):
order = self.random_states[index].permutation(self.y.shape[0])
permy = self.y[order][:, order]
# calculate permuted stats, store in null distribution
perm_stat = _mgc_stat(self.x, permy)[0]
return perm_stat
def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):
r"""Helper function that calculates the p-value. See below for uses.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)`.
stat : float
The sample test statistic.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is 1000 replications.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
available to the Process. Alternatively supply a map-like callable,
such as `multiprocessing.Pool.map` for evaluating the population in
parallel. This evaluation is carried out as `workers(func, iterable)`.
Requires that `func` be pickleable.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
pvalue : float
The sample test p-value.
null_dist : list
The approximated null distribution.
"""
# generate seeds for each rep (change to new parallel random number
# capabilities in numpy >= 1.17+)
random_state = check_random_state(random_state)
random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
size=4, dtype=np.uint32)) for _ in range(reps)]
# parallelizes with specified workers over number of reps and set seeds
parallelp = _ParallelP(x=x, y=y, random_states=random_states)
with MapWrapper(workers) as mapwrapper:
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
# calculate p-value and significant permutation map through list
pvalue = (1 + (null_dist >= stat).sum()) / (1 + reps)
return pvalue, null_dist
def _euclidean_dist(x):
return cdist(x, x)
MGCResult = _make_tuple_bunch('MGCResult',
['statistic', 'pvalue', 'mgc_dict'], [])
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
workers=1, is_twosamp=False, random_state=None):
r"""Computes the Multiscale Graph Correlation (MGC) test statistic.
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
called the "scale". A priori, however, it is not know which scales will be
most informative. So, MGC computes all distance pairs, and then efficiently
computes the distance correlations for all scales. The local correlations
illustrate which scales are relatively informative about the relationship.
The key, therefore, to successfully discover and decipher relationships
between disparate data modalities is to adaptively determine which scales
are the most informative, and the geometric implication for the most
informative scales. Doing so not only provides an estimate of whether the
modalities are related, but also provides insight into how the
determination was made. This is especially important in high-dimensional
data, where simple visualizations do not reveal relationships to the
unaided human eye. Characterizations of this implementation in particular
have been derived from and benchmarked within in [2]_.
Parameters
----------
x, y : ndarray
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
the number of samples and `p` and `q` are the number of dimensions,
then the MGC independence test will be run. Alternatively, ``x`` and
``y`` can have shapes ``(n, n)`` if they are distance or similarity
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
two-sample MGC test will be run.
compute_distance : callable, optional
A function that computes the distance or similarity among the samples
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
already distance matrices. The default uses the euclidean norm metric.
If you are calling a custom function, either create the distance
matrix before-hand or create a function of the form
``compute_distance(x)`` where `x` is the data matrix for which
pairwise distances are calculated.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is ``1000``.
workers : int or map-like callable, optional
If ``workers`` is an int the population is subdivided into ``workers``
sections and evaluated in parallel (uses ``multiprocessing.Pool
<multiprocessing>``). Supply ``-1`` to use all cores available to the
Process. Alternatively supply a map-like callable, such as
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable. The default is ``1``.
is_twosamp : bool, optional
If `True`, a two sample test will be run. If ``x`` and ``y`` have
shapes ``(n, p)`` and ``(m, p)``, this optional will be overridden and
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
``(n, p)`` and a two sample test is desired. The default is ``False``.
Note that this will not run if inputs are distance matrices.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
res : MGCResult
An object containing attributes:
statistic : float
The sample MGC test statistic within `[-1, 1]`.
pvalue : float
The p-value obtained via permutation.
mgc_dict : dict
Contains additional useful results:
- mgc_map : ndarray
A 2D representation of the latent geometry of the
relationship.
- opt_scale : (int, int)
The estimated optimal scale as a `(x, y)` pair.
- null_dist : list
The null distribution derived from the permuted matrices.
See Also
--------
pearsonr : Pearson correlation coefficient and p-value for testing
non-correlation.
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
Notes
-----
A description of the process of MGC and applications on neuroscience data
can be found in [1]_. It is performed using the following steps:
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
modified to be mean zero columnwise. This results in two
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
centering and unbiased modification) [3]_.
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
are calculated for each property. Here, :math:`G_k (i, j)` indicates
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
the :math:`i`-th row of :math:`B`
* Let :math:`\circ` denotes the entry-wise matrix product, then local
correlations are summed and normalized using the following statistic:
.. math::
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
#. The MGC test statistic is the smoothed optimal local correlation of
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
(which essentially set all isolated large correlations) as 0 and
connected large correlations the same as before, see [3]_.) MGC is,
.. math::
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
\right)
The test statistic returns a value between :math:`(-1, 1)` since it is
normalized.
The p-value returned is calculated using a permutation test. This process
is completed by first randomly permuting :math:`y` to estimate the null
distribution and then calculating the probability of observing a test
statistic, under the null, at least as extreme as the observed test
statistic.
MGC requires at least 5 samples to run with reliable results. It can also
handle high-dimensional data sets.
In addition, by manipulating the input data matrices, the two-sample
testing problem can be reduced to the independence testing problem [4]_.
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
follows:
.. math::
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
Then, the MGC statistic can be calculated as normal. This methodology can
be extended to similar tests such as distance correlation [4]_.
.. versionadded:: 1.4.0
References
----------
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
relationships across disparate data modalities. ELife.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
mgcpy: A Comprehensive High Dimensional Independence Testing Python
Package. :arXiv:`1907.02088`
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
correlation to multiscale graph correlation. Journal of the American
Statistical Association.
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
Distance and Kernel Methods for Hypothesis Testing.
:arXiv:`1806.05514`
Examples
--------
>>> import numpy as np
>>> from scipy.stats import multiscale_graphcorr
>>> x = np.arange(100)
>>> y = x
>>> res = multiscale_graphcorr(x, y)
>>> res.statistic, res.pvalue
(1.0, 0.001)
To run an unpaired two-sample test,
>>> x = np.arange(100)
>>> y = np.arange(79)
>>> res = multiscale_graphcorr(x, y)
>>> res.statistic, res.pvalue # doctest: +SKIP
(0.033258146255703246, 0.023)
or, if shape of the inputs are the same,
>>> x = np.arange(100)
>>> y = x
>>> res = multiscale_graphcorr(x, y, is_twosamp=True)
>>> res.statistic, res.pvalue # doctest: +SKIP
(-0.008021809890200488, 1.0)
"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise ValueError("x and y must be ndarrays")
# convert arrays of type (n,) to (n, 1)
if x.ndim == 1:
x = x[:, np.newaxis]
elif x.ndim != 2:
raise ValueError("Expected a 2-D array `x`, found shape "
"{}".format(x.shape))
if y.ndim == 1:
y = y[:, np.newaxis]
elif y.ndim != 2:
raise ValueError("Expected a 2-D array `y`, found shape "
"{}".format(y.shape))
nx, px = x.shape
ny, py = y.shape
# check for NaNs
_contains_nan(x, nan_policy='raise')
_contains_nan(y, nan_policy='raise')
# check for positive or negative infinity and raise error
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
raise ValueError("Inputs contain infinities")
if nx != ny:
if px == py:
# reshape x and y for two sample testing
is_twosamp = True
else:
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
"and [n, q] or have shape [n, p] and [m, p].")
if nx < 5 or ny < 5:
raise ValueError("MGC requires at least 5 samples to give reasonable "
"results.")
# convert x and y to float
x = x.astype(np.float64)
y = y.astype(np.float64)
# check if compute_distance_matrix if a callable()
if not callable(compute_distance) and compute_distance is not None:
raise ValueError("Compute_distance must be a function.")
# check if number of reps exists, integer, or > 0 (if under 1000 raises
# warning)
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
elif reps < 1000:
msg = ("The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!")
warnings.warn(msg, RuntimeWarning)
if is_twosamp:
if compute_distance is None:
raise ValueError("Cannot run if inputs are distance matrices")
x, y = _two_sample_transform(x, y)
if compute_distance is not None:
# compute distance matrices for x and y
x = compute_distance(x)
y = compute_distance(y)
# calculate MGC stat
stat, stat_dict = _mgc_stat(x, y)
stat_mgc_map = stat_dict["stat_mgc_map"]
opt_scale = stat_dict["opt_scale"]
# calculate permutation MGC p-value
pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers,
random_state=random_state)
# save all stats (other than stat/p-value) in dictionary
mgc_dict = {"mgc_map": stat_mgc_map,
"opt_scale": opt_scale,
"null_dist": null_dist}
# create result object with alias for backward compatibility
res = MGCResult(stat, pvalue, mgc_dict)
res.stat = stat
return res
def _mgc_stat(distx, disty):
r"""Helper function that calculates the MGC stat. See above for use.
Parameters
----------
distx, disty : ndarray
`distx` and `disty` have shapes `(n, p)` and `(n, q)` or
`(n, n)` and `(n, n)`
if distance matrices.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
stat_dict : dict
Contains additional useful additional returns containing the following
keys:
- stat_mgc_map : ndarray
MGC-map of the statistics.
- opt_scale : (float, float)
The estimated optimal scale as a `(x, y)` pair.
"""
# calculate MGC map and optimal scale
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
n, m = stat_mgc_map.shape
if m == 1 or n == 1:
# the global scale at is the statistic calculated at maximial nearest
# neighbors. There is not enough local scale to search over, so
# default to global scale
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = m * n
else:
samp_size = len(distx) - 1
# threshold to find connected region of significant local correlations
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
# maximum within the significant region
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
stat_dict = {"stat_mgc_map": stat_mgc_map,
"opt_scale": opt_scale}
return stat, stat_dict
def _threshold_mgc_map(stat_mgc_map, samp_size):
r"""
Finds a connected region of significance in the MGC-map by thresholding.
Parameters
----------
stat_mgc_map : ndarray
All local correlations within `[-1,1]`.
samp_size : int
The sample size of original data.
Returns
-------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
"""
m, n = stat_mgc_map.shape
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance. Threshold is based on a beta
# approximation.
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
# the global scale at is the statistic calculated at maximial nearest
# neighbors. Threshold is the maximum on the global and local scales
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
# find the largest connected component of significant correlations
sig_connect = stat_mgc_map > threshold
if np.sum(sig_connect) > 0:
sig_connect, _ = _measurements.label(sig_connect)
_, label_counts = np.unique(sig_connect, return_counts=True)
# skip the first element in label_counts, as it is count(zeros)
max_label = np.argmax(label_counts[1:]) + 1
sig_connect = sig_connect == max_label
else:
sig_connect = np.array([[False]])
return sig_connect
def _smooth_mgc_map(sig_connect, stat_mgc_map):
"""Finds the smoothed maximal within the significant region R.
If area of R is too small it returns the last local correlation. Otherwise,
returns the maximum within significant_connected_region.
Parameters
----------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
stat_mgc_map : ndarray
All local correlations within `[-1, 1]`.
Returns
-------
stat : float
The sample MGC statistic within `[-1, 1]`.
opt_scale: (float, float)
The estimated optimal scale as an `(x, y)` pair.
"""
m, n = stat_mgc_map.shape
# the global scale at is the statistic calculated at maximial nearest
# neighbors. By default, statistic and optimal scale are global.
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = [m, n]
if np.linalg.norm(sig_connect) != 0:
# proceed only when the connected region's area is sufficiently large
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
max_corr = max(stat_mgc_map[sig_connect])
# find all scales within significant_connected_region that maximize
# the local correlation
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
if max_corr >= stat:
stat = max_corr
k, l = max_corr_index
one_d_indices = k * n + l # 2D to 1D indexing
k = np.max(one_d_indices) // n
l = np.max(one_d_indices) % n
opt_scale = [k+1, l+1] # adding 1s to match R indexing
return stat, opt_scale
def _two_sample_transform(u, v):
"""Helper function that concatenates x and y for two sample MGC stat.
See above for use.
Parameters
----------
u, v : ndarray
`u` and `v` have shapes `(n, p)` and `(m, p)`.
Returns
-------
x : ndarray
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
`(2n, p)`.
y : ndarray
Label matrix for `x` where 0 refers to samples that comes from `u` and
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
"""
nx = u.shape[0]
ny = v.shape[0]
x = np.concatenate([u, v], axis=0)
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
return x, y
#####################################
# INFERENTIAL STATISTICS #
#####################################
TtestResultBase = _make_tuple_bunch('TtestResultBase',
['statistic', 'pvalue'], ['df'])
class TtestResult(TtestResultBase):
"""
Result of a t-test.
See the documentation of the particular t-test function for more
information about the definition of the statistic and meaning of
the confidence interval.
Attributes
----------
statistic : float or array
The t-statistic of the sample.
pvalue : float or array
The p-value associated with the given alternative.
df : float or array
The number of degrees of freedom used in calculation of the
t-statistic; this is one less than the size of the sample
(``a.shape[axis]-1`` if there are no masked elements or omitted NaNs).
Methods
-------
confidence_interval
Computes a confidence interval around the population statistic
for the given confidence level.
The confidence interval is returned in a ``namedtuple`` with
fields `low` and `high`.
"""
def __init__(self, statistic, pvalue, df, # public
alternative, standard_error, estimate): # private
super().__init__(statistic, pvalue, df=df)
self._alternative = alternative
self._standard_error = standard_error # denominator of t-statistic
self._estimate = estimate # point estimate of sample mean
def confidence_interval(self, confidence_level=0.95):
"""
Parameters
----------
confidence_level : float
The confidence level for the calculation of the population mean
confidence interval. Default is 0.95.
Returns
-------
ci : namedtuple
The confidence interval is returned in a ``namedtuple`` with
fields `low` and `high`.
"""
low, high = _t_confidence_interval(self.df, self.statistic,
confidence_level, self._alternative)
low = low * self._standard_error + self._estimate
high = high * self._standard_error + self._estimate
return ConfidenceInterval(low=low, high=high)
def pack_TtestResult(statistic, pvalue, df, alternative, standard_error,
estimate):
# this could be any number of dimensions (including 0d), but there is
# at most one unique non-NaN value
alternative = np.atleast_1d(alternative) # can't index 0D object
alternative = alternative[np.isfinite(alternative)]
alternative = alternative[0] if alternative.size else np.nan
return TtestResult(statistic, pvalue, df=df, alternative=alternative,
standard_error=standard_error, estimate=estimate)
def unpack_TtestResult(res):
return (res.statistic, res.pvalue, res.df, res._alternative,
res._standard_error, res._estimate)
@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2,
result_to_tuple=unpack_TtestResult, n_outputs=6)
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate',
alternative="two-sided"):
"""Calculate the T-test for the mean of ONE group of scores.
This is a test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then its length along
`axis` must equal 1, and it must otherwise be broadcastable with `a`.
axis : int or None, optional
Axis along which to compute test; default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the mean of the underlying distribution of the sample
is different than the given population mean (`popmean`)
* 'less': the mean of the underlying distribution of the sample is
less than the given population mean (`popmean`)
* 'greater': the mean of the underlying distribution of the sample is
greater than the given population mean (`popmean`)
Returns
-------
result : `~scipy.stats._result_classes.TtestResult`
An object with the following attributes:
statistic : float or array
The t-statistic.
pvalue : float or array
The p-value associated with the given alternative.
df : float or array
The number of degrees of freedom used in calculation of the
t-statistic; this is one less than the size of the sample
(``a.shape[axis]``).
.. versionadded:: 1.10.0
The object also has the following method:
confidence_interval(confidence_level=0.95)
Computes a confidence interval around the population
mean for the given confidence level.
The confidence interval is returned in a ``namedtuple`` with
fields `low` and `high`.
.. versionadded:: 1.10.0
Notes
-----
The statistic is calculated as ``(np.mean(a) - popmean)/se``, where
``se`` is the standard error. Therefore, the statistic will be positive
when the sample mean is greater than the population mean and negative when
the sample mean is less than the population mean.
Examples
--------
Suppose we wish to test the null hypothesis that the mean of a population
is equal to 0.5. We choose a confidence level of 99%; that is, we will
reject the null hypothesis in favor of the alternative if the p-value is
less than 0.01.
When testing random variates from the standard uniform distribution, which
has a mean of 0.5, we expect the data to be consistent with the null
hypothesis most of the time.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs = stats.uniform.rvs(size=50, random_state=rng)
>>> stats.ttest_1samp(rvs, popmean=0.5)
TtestResult(statistic=2.456308468440, pvalue=0.017628209047638, df=49)
As expected, the p-value of 0.017 is not below our threshold of 0.01, so
we cannot reject the null hypothesis.
When testing data from the standard *normal* distribution, which has a mean
of 0, we would expect the null hypothesis to be rejected.
>>> rvs = stats.norm.rvs(size=50, random_state=rng)
>>> stats.ttest_1samp(rvs, popmean=0.5)
TtestResult(statistic=-7.433605518875, pvalue=1.416760157221e-09, df=49)
Indeed, the p-value is lower than our threshold of 0.01, so we reject the
null hypothesis in favor of the default "two-sided" alternative: the mean
of the population is *not* equal to 0.5.
However, suppose we were to test the null hypothesis against the
one-sided alternative that the mean of the population is *greater* than
0.5. Since the mean of the standard normal is less than 0.5, we would not
expect the null hypothesis to be rejected.
>>> stats.ttest_1samp(rvs, popmean=0.5, alternative='greater')
TtestResult(statistic=-7.433605518875, pvalue=0.99999999929, df=49)
Unsurprisingly, with a p-value greater than our threshold, we would not
reject the null hypothesis.
Note that when working with a confidence level of 99%, a true null
hypothesis will be rejected approximately 1% of the time.
>>> rvs = stats.uniform.rvs(size=(100, 50), random_state=rng)
>>> res = stats.ttest_1samp(rvs, popmean=0.5, axis=1)
>>> np.sum(res.pvalue < 0.01)
1
Indeed, even though all 100 samples above were drawn from the standard
uniform distribution, which *does* have a population mean of 0.5, we would
mistakenly reject the null hypothesis for one of them.
`ttest_1samp` can also compute a confidence interval around the population
mean.
>>> rvs = stats.norm.rvs(size=50, random_state=rng)
>>> res = stats.ttest_1samp(rvs, popmean=0)
>>> ci = res.confidence_interval(confidence_level=0.95)
>>> ci
ConfidenceInterval(low=-0.3193887540880017, high=0.2898583388980972)
The bounds of the 95% confidence interval are the
minimum and maximum values of the parameter `popmean` for which the
p-value of the test would be 0.05.
>>> res = stats.ttest_1samp(rvs, popmean=ci.low)
>>> np.testing.assert_allclose(res.pvalue, 0.05)
>>> res = stats.ttest_1samp(rvs, popmean=ci.high)
>>> np.testing.assert_allclose(res.pvalue, 0.05)
Under certain assumptions about the population from which a sample
is drawn, the confidence interval with confidence level 95% is expected
to contain the true population mean in 95% of sample replications.
>>> rvs = stats.norm.rvs(size=(50, 1000), loc=1, random_state=rng)
>>> res = stats.ttest_1samp(rvs, popmean=0)
>>> ci = res.confidence_interval()
>>> contains_pop_mean = (ci.low < 1) & (ci.high > 1)
>>> contains_pop_mean.sum()
953
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
df = n - 1
mean = np.mean(a, axis)
try:
popmean = np.squeeze(popmean, axis=axis)
except ValueError as e:
raise ValueError("`popmean.shape[axis]` must equal 1.") from e
d = mean - popmean
v = _var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
# when nan_policy='omit', `df` can be different for different axis-slices
df = np.broadcast_to(df, t.shape)[()]
# _axis_nan_policy decorator doesn't play well with strings
alternative_num = {"less": -1, "two-sided": 0, "greater": 1}[alternative]
return TtestResult(t, prob, df=df, alternative=alternative_num,
standard_error=denom, estimate=mean)
def _t_confidence_interval(df, t, confidence_level, alternative):
# Input validation on `alternative` is already done
# We just need IV on confidence_level
if confidence_level < 0 or confidence_level > 1:
message = "`confidence_level` must be a number between 0 and 1."
raise ValueError(message)
if alternative < 0: # 'less'
p = confidence_level
low, high = np.broadcast_arrays(-np.inf, special.stdtrit(df, p))
elif alternative > 0: # 'greater'
p = 1 - confidence_level
low, high = np.broadcast_arrays(special.stdtrit(df, p), np.inf)
elif alternative == 0: # 'two-sided'
tail_probability = (1 - confidence_level)/2
p = tail_probability, 1-tail_probability
# axis of p must be the zeroth and orthogonal to all the rest
p = np.reshape(p, [2] + [1]*np.asarray(df).ndim)
low, high = special.stdtrit(df, p)
else: # alternative is NaN when input is empty (see _axis_nan_policy)
p, nans = np.broadcast_arrays(t, np.nan)
low, high = nans, nans
return low[()], high[()]
def _ttest_finish(df, t, alternative):
"""Common code between all 3 t-test functions."""
# We use ``stdtr`` directly here as it handles the case when ``nan``
# values are present in the data and masked arrays are passed
# while ``t.cdf`` emits runtime warnings. This way ``_ttest_finish``
# can be shared between the ``stats`` and ``mstats`` versions.
if alternative == 'less':
pval = special.stdtr(df, t)
elif alternative == 'greater':
pval = special.stdtr(df, -t)
elif alternative == 'two-sided':
pval = special.stdtr(df, -np.abs(t))*2
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if t.ndim == 0:
t = t[()]
if pval.ndim == 0:
pval = pval[()]
return t, pval
def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
# If there is a single observation in one sample, this formula for pooled
# variance breaks down because the variance of that sample is undefined.
# The pooled variance is still defined, though, because the (n-1) in the
# numerator should cancel with the (n-1) in the denominator, leaving only
# the sum of squared differences from the mean: zero.
v1 = np.where(n1 == 1, 0, v1)[()]
v2 = np.where(n2 == 1, 0, v2)[()]
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True, alternative="two-sided"):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The corrected sample standard deviation of sample 1 (i.e. ``ddof=1``).
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The corrected sample standard deviation of sample 2 (i.e. ``ddof=1``).
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions are unequal.
* 'less': the mean of the first distribution is less than the
mean of the second distribution.
* 'greater': the mean of the first distribution is greater than the
mean of the second distribution.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
The statistic is calculated as ``(mean1 - mean2)/se``, where ``se`` is the
standard error. Therefore, the statistic will be positive when `mean1` is
greater than `mean2` and negative when `mean1` is less than `mean2`.
This method does not check whether any of the elements of `std1` or `std2`
are negative. If any elements of the `std1` or `std2` parameters are
negative in a call to this method, this method will return the same result
as if it were passed ``numpy.abs(std1)`` and ``numpy.abs(std2)``,
respectively, instead; no exceptions or warnings will be emitted.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows (with the
Sample Variance being the corrected sample variance)::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> import numpy as np
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.161073
Sample 2 200 45 0.225 0.175251
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.161073), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.175251), nobs2=200)
Ttest_indResult(statistic=-0.5627187905196761, pvalue=0.5739887114209541)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
mean1 = np.asarray(mean1)
std1 = np.asarray(std1)
mean2 = np.asarray(mean2)
std2 = np.asarray(std2)
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
return Ttest_indResult(*res)
@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2,
result_to_tuple=unpack_TtestResult, n_outputs=6)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate',
permutations=None, random_state=None, alternative="two-sided",
trim=0):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
The 'omit' option is not currently available for permutation tests or
one-sided asympyotic tests.
permutations : non-negative int, np.inf, or None (default), optional
If 0 or None (default), use the t-distribution to calculate p-values.
Otherwise, `permutations` is the number of random permutations that
will be used to estimate p-values using a permutation test. If
`permutations` equals or exceeds the number of distinct partitions of
the pooled data, an exact test is performed instead (i.e. each
distinct partition is used exactly once). See Notes for details.
.. versionadded:: 1.7.0
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Pseudorandom number generator state used to generate permutations
(used only when `permutations` is not None).
.. versionadded:: 1.7.0
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
trim : float, optional
If nonzero, performs a trimmed (Yuen's) t-test.
Defines the fraction of elements to be trimmed from each end of the
input samples. If 0 (default), no elements will be trimmed from either
side. The number of trimmed elements from each tail is the floor of the
trim times the number of elements. Valid range is [0, .5).
.. versionadded:: 1.7
Returns
-------
result : `~scipy.stats._result_classes.TtestResult`
An object with the following attributes:
statistic : float or ndarray
The t-statistic.
pvalue : float or ndarray
The p-value associated with the given alternative.
df : float or ndarray
The number of degrees of freedom used in calculation of the
t-statistic. This is always NaN for a permutation t-test.
.. versionadded:: 1.11.0
The object also has the following method:
confidence_interval(confidence_level=0.95)
Computes a confidence interval around the difference in
population means for the given confidence level.
The confidence interval is returned in a ``namedtuple`` with
fields ``low`` and ``high``.
When a permutation t-test is performed, the confidence interval
is not computed, and fields ``low`` and ``high`` contain NaN.
.. versionadded:: 1.11.0
Notes
-----
Suppose we observe two independent samples, e.g. flower petal lengths, and
we are considering whether the two samples were drawn from the same
population (e.g. the same species of flower or two species with similar
petal characteristics) or two different populations.
The t-test quantifies the difference between the arithmetic means
of the two samples. The p-value quantifies the probability of observing
as or more extreme values assuming the null hypothesis, that the
samples are drawn from populations with the same population means, is true.
A p-value larger than a chosen threshold (e.g. 5% or 1%) indicates that
our observation is not so unlikely to have occurred by chance. Therefore,
we do not reject the null hypothesis of equal population means.
If the p-value is smaller than our threshold, then we have evidence
against the null hypothesis of equal population means.
By default, the p-value is determined by comparing the t-statistic of the
observed data against a theoretical t-distribution.
When ``1 < permutations < binom(n, k)``, where
* ``k`` is the number of observations in `a`,
* ``n`` is the total number of observations in `a` and `b`, and
* ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
the data are pooled (concatenated), randomly assigned to either group `a`
or `b`, and the t-statistic is calculated. This process is performed
repeatedly (`permutation` times), generating a distribution of the
t-statistic under the null hypothesis, and the t-statistic of the observed
data is compared to this distribution to determine the p-value.
Specifically, the p-value reported is the "achieved significance level"
(ASL) as defined in 4.4 of [3]_. Note that there are other ways of
estimating p-values using randomized permutation tests; for other
options, see the more general `permutation_test`.
When ``permutations >= binom(n, k)``, an exact test is performed: the data
are partitioned between the groups in each distinct way exactly once.
The permutation test can be computationally expensive and not necessarily
more accurate than the analytical test, but it does not make strong
assumptions about the shape of the underlying distribution.
Use of trimming is commonly referred to as the trimmed t-test. At times
called Yuen's t-test, this is an extension of Welch's t-test, with the
difference being the use of winsorized means in calculation of the variance
and the trimmed sample size in calculation of the statistic. Trimming is
recommended if the underlying distribution is long-tailed or contaminated
with outliers [4]_.
The statistic is calculated as ``(np.mean(a) - np.mean(b))/se``, where
``se`` is the standard error. Therefore, the statistic will be positive
when the sample mean of `a` is greater than the sample mean of `b` and
negative when the sample mean of `a` is less than the sample mean of
`b`.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
.. [3] B. Efron and T. Hastie. Computer Age Statistical Inference. (2016).
.. [4] Yuen, Karen K. "The Two-Sample Trimmed t for Unequal Population
Variances." Biometrika, vol. 61, no. 1, 1974, pp. 165-170. JSTOR,
www.jstor.org/stable/2334299. Accessed 30 Mar. 2021.
.. [5] Yuen, Karen K., and W. J. Dixon. "The Approximate Behaviour and
Performance of the Two-Sample Trimmed t." Biometrika, vol. 60,
no. 2, 1973, pp. 369-374. JSTOR, www.jstor.org/stable/2334550.
Accessed 30 Mar. 2021.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs2)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952038870015)
>>> stats.ttest_ind(rvs1, rvs2, equal_var=False)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952553131064)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs3)
Ttest_indResult(statistic=-1.6370984482905417, pvalue=0.1019251574705033)
>>> stats.ttest_ind(rvs1, rvs3, equal_var=False)
Ttest_indResult(statistic=-1.637098448290542, pvalue=0.10202110497954867)
When ``n1 != n2``, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs4)
Ttest_indResult(statistic=-1.9481646859513422, pvalue=0.05186270935842703)
>>> stats.ttest_ind(rvs1, rvs4, equal_var=False)
Ttest_indResult(statistic=-1.3146566100751664, pvalue=0.1913495266513811)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs5)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0046418707568707885)
>>> stats.ttest_ind(rvs1, rvs5, equal_var=False)
Ttest_indResult(statistic=-1.8686598649188084, pvalue=0.06434714193919686)
When performing a permutation test, more permutations typically yields
more accurate results. Use a ``np.random.Generator`` to ensure
reproducibility:
>>> stats.ttest_ind(rvs1, rvs5, permutations=10000,
... random_state=rng)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0052994700529947)
Take these two samples, one of which has an extreme tail.
>>> a = (56, 128.6, 12, 123.8, 64.34, 78, 763.3)
>>> b = (1.1, 2.9, 4.2)
Use the `trim` keyword to perform a trimmed (Yuen) t-test. For example,
using 20% trimming, ``trim=.2``, the test will reduce the impact of one
(``np.floor(trim*len(a))``) element from each tail of sample `a`. It will
have no effect on sample `b` because ``np.floor(trim*len(b))`` is 0.
>>> stats.ttest_ind(a, b, trim=.2)
Ttest_indResult(statistic=3.4463884028073513,
pvalue=0.01369338726499547)
"""
if not (0 <= trim < .5):
raise ValueError("Trimming percentage should be 0 <= `trim` < .5.")
NaN = _get_nan(a, b)
if a.size == 0 or b.size == 0:
# _axis_nan_policy decorator ensures this only happens with 1d input
return TtestResult(NaN, NaN, df=NaN, alternative=NaN,
standard_error=NaN, estimate=NaN)
if permutations is not None and permutations != 0:
if trim != 0:
raise ValueError("Permutations are currently not supported "
"with trimming.")
if permutations < 0 or (np.isfinite(permutations) and
int(permutations) != permutations):
raise ValueError("Permutations must be a non-negative integer.")
t, prob = _permutation_ttest(a, b, permutations=permutations,
axis=axis, equal_var=equal_var,
nan_policy=nan_policy,
random_state=random_state,
alternative=alternative)
df, denom, estimate = NaN, NaN, NaN
else:
n1 = a.shape[axis]
n2 = b.shape[axis]
if trim == 0:
if equal_var:
old_errstate = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
v1 = _var(a, axis, ddof=1)
v2 = _var(b, axis, ddof=1)
if equal_var:
np.seterr(**old_errstate)
m1 = np.mean(a, axis)
m2 = np.mean(b, axis)
else:
v1, m1, n1 = _ttest_trim_var_mean_len(a, trim, axis)
v2, m2, n2 = _ttest_trim_var_mean_len(b, trim, axis)
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
t, prob = _ttest_ind_from_stats(m1, m2, denom, df, alternative)
# when nan_policy='omit', `df` can be different for different axis-slices
df = np.broadcast_to(df, t.shape)[()]
estimate = m1-m2
# _axis_nan_policy decorator doesn't play well with strings
alternative_num = {"less": -1, "two-sided": 0, "greater": 1}[alternative]
return TtestResult(t, prob, df=df, alternative=alternative_num,
standard_error=denom, estimate=estimate)
def _ttest_trim_var_mean_len(a, trim, axis):
"""Variance, mean, and length of winsorized input along specified axis"""
# for use with `ttest_ind` when trimming.
# further calculations in this test assume that the inputs are sorted.
# From [4] Section 1 "Let x_1, ..., x_n be n ordered observations..."
a = np.sort(a, axis=axis)
# `g` is the number of elements to be replaced on each tail, converted
# from a percentage amount of trimming
n = a.shape[axis]
g = int(n * trim)
# Calculate the Winsorized variance of the input samples according to
# specified `g`
v = _calculate_winsorized_variance(a, g, axis)
# the total number of elements in the trimmed samples
n -= 2 * g
# calculate the g-times trimmed mean, as defined in [4] (1-1)
m = trim_mean(a, trim, axis=axis)
return v, m, n
def _calculate_winsorized_variance(a, g, axis):
"""Calculates g-times winsorized variance along specified axis"""
# it is expected that the input `a` is sorted along the correct axis
if g == 0:
return _var(a, ddof=1, axis=axis)
# move the intended axis to the end that way it is easier to manipulate
a_win = np.moveaxis(a, axis, -1)
# save where NaNs are for later use.
nans_indices = np.any(np.isnan(a_win), axis=-1)
# Winsorization and variance calculation are done in one step in [4]
# (1-3), but here winsorization is done first; replace the left and
# right sides with the repeating value. This can be see in effect in (
# 1-3) in [4], where the leftmost and rightmost tails are replaced with
# `(g + 1) * x_{g + 1}` on the left and `(g + 1) * x_{n - g}` on the
# right. Zero-indexing turns `g + 1` to `g`, and `n - g` to `- g - 1` in
# array indexing.
a_win[..., :g] = a_win[..., [g]]
a_win[..., -g:] = a_win[..., [-g - 1]]
# Determine the variance. In [4], the degrees of freedom is expressed as
# `h - 1`, where `h = n - 2g` (unnumbered equations in Section 1, end of
# page 369, beginning of page 370). This is converted to NumPy's format,
# `n - ddof` for use with `np.var`. The result is converted to an
# array to accommodate indexing later.
var_win = np.asarray(_var(a_win, ddof=(2 * g + 1), axis=-1))
# with `nan_policy='propagate'`, NaNs may be completely trimmed out
# because they were sorted into the tail of the array. In these cases,
# replace computed variances with `np.nan`.
var_win[nans_indices] = np.nan
return var_win
def _permutation_distribution_t(data, permutations, size_a, equal_var,
random_state=None):
"""Generation permutation distribution of t statistic"""
random_state = check_random_state(random_state)
# prepare permutation indices
size = data.shape[-1]
# number of distinct combinations
n_max = special.comb(size, size_a)
if permutations < n_max:
perm_generator = (random_state.permutation(size)
for i in range(permutations))
else:
permutations = n_max
perm_generator = (np.concatenate(z)
for z in _all_partitions(size_a, size-size_a))
t_stat = []
for indices in _batch_generator(perm_generator, batch=50):
# get one batch from perm_generator at a time as a list
indices = np.array(indices)
# generate permutations
data_perm = data[..., indices]
# move axis indexing permutations to position 0 to broadcast
# nicely with t_stat_observed, which doesn't have this dimension
data_perm = np.moveaxis(data_perm, -2, 0)
a = data_perm[..., :size_a]
b = data_perm[..., size_a:]
t_stat.append(_calc_t_stat(a, b, equal_var))
t_stat = np.concatenate(t_stat, axis=0)
return t_stat, permutations, n_max
def _calc_t_stat(a, b, equal_var, axis=-1):
"""Calculate the t statistic along the given dimension."""
na = a.shape[axis]
nb = b.shape[axis]
avg_a = np.mean(a, axis=axis)
avg_b = np.mean(b, axis=axis)
var_a = _var(a, axis=axis, ddof=1)
var_b = _var(b, axis=axis, ddof=1)
if not equal_var:
denom = _unequal_var_ttest_denom(var_a, na, var_b, nb)[1]
else:
denom = _equal_var_ttest_denom(var_a, na, var_b, nb)[1]
return (avg_a-avg_b)/denom
def _permutation_ttest(a, b, permutations, axis=0, equal_var=True,
nan_policy='propagate', random_state=None,
alternative="two-sided"):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores
using permutation methods.
This test is similar to `stats.ttest_ind`, except it doesn't rely on an
approximate normality assumption since it uses a permutation test.
This function is only called from ttest_ind when permutations is not None.
Parameters
----------
a, b : array_like
The arrays must be broadcastable, except along the dimension
corresponding to `axis` (the zeroth, by default).
axis : int, optional
The axis over which to operate on a and b.
permutations : int, optional
Number of permutations used to calculate p-value. If greater than or
equal to the number of distinct permutations, perform an exact test.
equal_var : bool, optional
If False, an equal variance (Welch's) t-test is conducted. Otherwise,
an ordinary t-test is conducted.
random_state : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Pseudorandom number generator state used for generating random
permutations.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
"""
random_state = check_random_state(random_state)
t_stat_observed = _calc_t_stat(a, b, equal_var, axis=axis)
na = a.shape[axis]
mat = _broadcast_concatenate((a, b), axis=axis)
mat = np.moveaxis(mat, axis, -1)
t_stat, permutations, n_max = _permutation_distribution_t(
mat, permutations, size_a=na, equal_var=equal_var,
random_state=random_state)
compare = {"less": np.less_equal,
"greater": np.greater_equal,
"two-sided": lambda x, y: (x <= -np.abs(y)) | (x >= np.abs(y))}
# Calculate the p-values
cmps = compare[alternative](t_stat, t_stat_observed)
# Randomized test p-value calculation should use biased estimate; see e.g.
# https://www.degruyter.com/document/doi/10.2202/1544-6115.1585/
adjustment = 1 if n_max > permutations else 0
pvalues = (cmps.sum(axis=0) + adjustment) / (permutations + adjustment)
# nans propagate naturally in statistic calculation, but need to be
# propagated manually into pvalues
if nan_policy == 'propagate' and np.isnan(t_stat_observed).any():
if np.ndim(pvalues) == 0:
pvalues = np.float64(np.nan)
else:
pvalues[np.isnan(t_stat_observed)] = np.nan
return (t_stat_observed, pvalues)
def _get_len(a, axis, msg):
try:
n = a.shape[axis]
except IndexError:
raise np.AxisError(axis, a.ndim, msg) from None
return n
@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2,
result_to_tuple=unpack_TtestResult, n_outputs=6,
paired=True)
def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"):
"""Calculate the t-test on TWO RELATED samples of scores, a and b.
This is a test for the null hypothesis that two related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
Returns
-------
result : `~scipy.stats._result_classes.TtestResult`
An object with the following attributes:
statistic : float or array
The t-statistic.
pvalue : float or array
The p-value associated with the given alternative.
df : float or array
The number of degrees of freedom used in calculation of the
t-statistic; this is one less than the size of the sample
(``a.shape[axis]``).
.. versionadded:: 1.10.0
The object also has the following method:
confidence_interval(confidence_level=0.95)
Computes a confidence interval around the difference in
population means for the given confidence level.
The confidence interval is returned in a ``namedtuple`` with
fields `low` and `high`.
.. versionadded:: 1.10.0
Notes
-----
Examples for use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
The t-statistic is calculated as ``np.mean(a - b)/se``, where ``se`` is the
standard error. Therefore, the t-statistic will be positive when the sample
mean of ``a - b`` is greater than zero and negative when the sample mean of
``a - b`` is less than zero.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = (stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs2)
TtestResult(statistic=-0.4549717054410304, pvalue=0.6493274702088672, df=499) # noqa
>>> rvs3 = (stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs3)
TtestResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09, df=499) # noqa
"""
a, b, axis = _chk2_asarray(a, b, axis)
na = _get_len(a, axis, "first argument")
nb = _get_len(b, axis, "second argument")
if na != nb:
raise ValueError('unequal length arrays')
if na == 0 or nb == 0:
# _axis_nan_policy decorator ensures this only happens with 1d input
NaN = _get_nan(a, b)
return TtestResult(NaN, NaN, df=NaN, alternative=NaN,
standard_error=NaN, estimate=NaN)
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = _var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t, alternative)
# when nan_policy='omit', `df` can be different for different axis-slices
df = np.broadcast_to(df, t.shape)[()]
# _axis_nan_policy decorator doesn't play well with strings
alternative_num = {"less": -1, "two-sided": 0, "greater": 1}[alternative]
return TtestResult(t, prob, df=df, alternative=alternative_num,
standard_error=denom, estimate=dm)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""Count the number of non-masked elements of an array.
This function behaves like `np.ma.count`, but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def _m_broadcast_to(a, shape):
if np.ma.isMaskedArray(a):
return np.ma.masked_array(np.broadcast_to(a, shape),
mask=np.broadcast_to(a.mask, shape))
return np.broadcast_to(a, shape, subok=True)
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used:
* ``"pearson"`` (value 1)
Pearson's chi-squared statistic. In this case, the function is
equivalent to `chisquare`.
* ``"log-likelihood"`` (value 0)
Log-likelihood ratio. Also known as the G-test [3]_.
* ``"freeman-tukey"`` (value -1/2)
Freeman-Tukey statistic.
* ``"mod-log-likelihood"`` (value -1)
Modified log-likelihood ratio.
* ``"neyman"`` (value -2)
Neyman's statistic.
* ``"cressie-read"`` (value 2/3)
The power recommended in [5]_.
Returns
-------
res: Power_divergenceResult
An object containing attributes:
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `power_divergence` raises an error if the sums
do not agree within a relative tolerance of ``1e-8``.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> import numpy as np
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {!r}. "
"Valid strings are {}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
f_obs_float = f_obs.astype(np.float64)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
bshape = np.broadcast_shapes(f_obs_float.shape, f_exp.shape)
f_obs_float = _m_broadcast_to(f_obs_float, bshape)
f_exp = _m_broadcast_to(f_exp, bshape)
rtol = 1e-8 # to pass existing tests
with np.errstate(invalid='ignore'):
f_obs_sum = f_obs_float.sum(axis=axis)
f_exp_sum = f_exp.sum(axis=axis)
relative_diff = (np.abs(f_obs_sum - f_exp_sum) /
np.minimum(f_obs_sum, f_exp_sum))
diff_gt_tol = (relative_diff > rtol).any()
if diff_gt_tol:
msg = (f"For each axis slice, the sum of the observed "
f"frequencies must agree with the sum of the "
f"expected frequencies to a relative tolerance "
f"of {rtol}, but the percent differences are:\n"
f"{relative_diff}")
raise ValueError(msg)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs_float - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""Calculate a one-way chi-square test.
The chi-square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
res: Power_divergenceResult
An object containing attributes:
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
scipy.stats.power_divergence
scipy.stats.fisher_exact : Fisher exact test on a 2x2 contingency table.
scipy.stats.barnard_exact : An unconditional exact test. An alternative
to chi-squared test for small sample sizes.
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5. According to [3]_, the
total number of samples is recommended to be greater than 13,
otherwise exact tests (such as Barnard's Exact test) should be used
because they do not overreject.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `chisquare` raises an error if the sums do not
agree within a relative tolerance of ``1e-8``.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not chi-square, in which case this test
is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] Pearson, Karl. "On the criterion that a given system of deviations from the probable
in the case of a correlated system of variables is such that it can be reasonably
supposed to have arisen from random sampling", Philosophical Magazine. Series 5. 50
(1900), pp. 157-175.
.. [4] Mannan, R. William and E. Charles. Meslow. "Bird populations and
vegetation characteristics in managed and old-growth forests,
northeastern Oregon." Journal of Wildlife Management
48, 1219-1238, :doi:`10.2307/3801783`, 1984.
Examples
--------
In [4]_, bird foraging behavior was investigated in an old-growth forest
of Oregon.
In the forest, 44% of the canopy volume was Douglas fir,
24% was ponderosa pine, 29% was grand fir, and 3% was western larch.
The authors observed the behavior of several species of birds, one of
which was the red-breasted nuthatch. They made 189 observations of this
species foraging, recording 43 ("23%") of observations in Douglas fir,
52 ("28%") in ponderosa pine, 54 ("29%") in grand fir, and 40 ("21%") in
western larch.
Using a chi-square test, we can test the null hypothesis that the
proportions of foraging events are equal to the proportions of canopy
volume. The authors of the paper considered a p-value less than 1% to be
significant.
Using the above proportions of canopy volume and observed events, we can
infer expected frequencies.
>>> import numpy as np
>>> f_exp = np.array([44, 24, 29, 3]) / 100 * 189
The observed frequencies of foraging were:
>>> f_obs = np.array([43, 52, 54, 40])
We can now compare the observed frequencies with the expected frequencies.
>>> from scipy.stats import chisquare
>>> chisquare(f_obs=f_obs, f_exp=f_exp)
Power_divergenceResult(statistic=228.23515947653874, pvalue=3.3295585338846486e-49)
The p-value is well below the chosen significance level. Hence, the
authors considered the difference to be significant and concluded
that the relative proportions of foraging events were not the same
as the relative proportions of tree canopy volume.
Following are other generic examples to demonstrate how the other
parameters can be used.
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> chisquare([16, 18, 16, 14, 12, 12])
Power_divergenceResult(statistic=2.0, pvalue=0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
Power_divergenceResult(statistic=3.5, pvalue=0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
Power_divergenceResult(statistic=array([2. , 6.66666667]), pvalue=array([0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
Power_divergenceResult(statistic=23.31034482758621, pvalue=0.015975692534127565)
>>> chisquare(obs.ravel())
Power_divergenceResult(statistic=23.310344827586206, pvalue=0.01597569253412758)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
Power_divergenceResult(statistic=2.0, pvalue=0.7357588823428847)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
Power_divergenceResult(statistic=2.0, pvalue=array([0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
Power_divergenceResult(statistic=array([3.5 , 9.25]), pvalue=array([0.62338763, 0.09949846]))
""" # noqa
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
KstestResult = _make_tuple_bunch('KstestResult', ['statistic', 'pvalue'],
['statistic_location', 'statistic_sign'])
def _compute_dplus(cdfvals, x):
"""Computes D+ as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals : array_like
Sorted array of CDF values between 0 and 1
x: array_like
Sorted array of the stochastic variable itself
Returns
-------
res: Pair with the following elements:
- The maximum distance of the CDF values below Uniform(0, 1).
- The location at which the maximum is reached.
"""
n = len(cdfvals)
dplus = (np.arange(1.0, n + 1) / n - cdfvals)
amax = dplus.argmax()
loc_max = x[amax]
return (dplus[amax], loc_max)
def _compute_dminus(cdfvals, x):
"""Computes D- as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals : array_like
Sorted array of CDF values between 0 and 1
x: array_like
Sorted array of the stochastic variable itself
Returns
-------
res: Pair with the following elements:
- Maximum distance of the CDF values above Uniform(0, 1)
- The location at which the maximum is reached.
"""
n = len(cdfvals)
dminus = (cdfvals - np.arange(0.0, n)/n)
amax = dminus.argmax()
loc_max = x[amax]
return (dminus[amax], loc_max)
@_rename_parameter("mode", "method")
def ks_1samp(x, cdf, args=(), alternative='two-sided', method='auto'):
"""
Performs the one-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying distribution F(x) of a sample
against a given continuous distribution G(x). See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
x : array_like
a 1-D array of observations of iid random variables.
cdf : callable
callable used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used with `cdf`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
method : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice
the one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
res: KstestResult
An object containing attributes:
statistic : float
KS test statistic, either D+, D-, or D (the maximum of the two)
pvalue : float
One-tailed or two-tailed p-value.
statistic_location : float
Value of `x` corresponding with the KS statistic; i.e., the
distance between the empirical distribution function and the
hypothesized cumulative distribution function is measured at this
observation.
statistic_sign : int
+1 if the KS statistic is the maximum positive difference between
the empirical distribution function and the hypothesized cumulative
distribution function (D+); -1 if the KS statistic is the maximum
negative difference (D-).
See Also
--------
ks_2samp, kstest
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
Suppose we wish to test the null hypothesis that a sample is distributed
according to the standard normal.
We choose a confidence level of 95%; that is, we will reject the null
hypothesis in favor of the alternative if the p-value is less than 0.05.
When testing uniformly distributed data, we would expect the
null hypothesis to be rejected.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> stats.ks_1samp(stats.uniform.rvs(size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.5001899973268688, pvalue=1.1616392184763533e-23)
Indeed, the p-value is lower than our threshold of 0.05, so we reject the
null hypothesis in favor of the default "two-sided" alternative: the data
are *not* distributed according to the standard normal.
When testing random variates from the standard normal distribution, we
expect the data to be consistent with the null hypothesis most of the time.
>>> x = stats.norm.rvs(size=100, random_state=rng)
>>> stats.ks_1samp(x, stats.norm.cdf)
KstestResult(statistic=0.05345882212970396, pvalue=0.9227159037744717)
As expected, the p-value of 0.92 is not below our threshold of 0.05, so
we cannot reject the null hypothesis.
Suppose, however, that the random variates are distributed according to
a normal distribution that is shifted toward greater values. In this case,
the cumulative density function (CDF) of the underlying distribution tends
to be *less* than the CDF of the standard normal. Therefore, we would
expect the null hypothesis to be rejected with ``alternative='less'``:
>>> x = stats.norm.rvs(size=100, loc=0.5, random_state=rng)
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='less')
KstestResult(statistic=0.17482387821055168, pvalue=0.001913921057766743)
and indeed, with p-value smaller than our threshold, we reject the null
hypothesis in favor of the alternative.
"""
mode = method
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
if np.ma.is_masked(x):
x = x.compressed()
N = len(x)
x = np.sort(x)
cdfvals = cdf(x, *args)
if alternative == 'greater':
Dplus, d_location = _compute_dplus(cdfvals, x)
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N),
statistic_location=d_location,
statistic_sign=1)
if alternative == 'less':
Dminus, d_location = _compute_dminus(cdfvals, x)
return KstestResult(Dminus, distributions.ksone.sf(Dminus, N),
statistic_location=d_location,
statistic_sign=-1)
# alternative == 'two-sided':
Dplus, dplus_location = _compute_dplus(cdfvals, x)
Dminus, dminus_location = _compute_dminus(cdfvals, x)
if Dplus > Dminus:
D = Dplus
d_location = dplus_location
d_sign = 1
else:
D = Dminus
d_location = dminus_location
d_sign = -1
if mode == 'auto': # Always select exact
mode = 'exact'
if mode == 'exact':
prob = distributions.kstwo.sf(D, N)
elif mode == 'asymp':
prob = distributions.kstwobign.sf(D * np.sqrt(N))
else:
# mode == 'approx'
prob = 2 * distributions.ksone.sf(D, N)
prob = np.clip(prob, 0, 1)
return KstestResult(D, prob,
statistic_location=d_location,
statistic_sign=d_sign)
Ks_2sampResult = KstestResult
def _compute_prob_outside_square(n, h):
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... )
# / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with
# h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h):
"""Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y
directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without
# previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Not every x needs to be considered.
# xj holds the list of x values to be checked.
# Wherever n*x/m + ng*h crosses an integer
lxj = n + (mg-h)//mg
xj = [(h + mg * j + ng-1)//ng for j in range(lxj)]
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return special.binom(m + n, n)
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
for j in range(1, lxj):
Bj = special.binom(xj[j] + j, j)
for i in range(j):
bin = special.binom(xj[j] - xj[i] + j - i, j-i)
Bj -= bin * B[i]
B[j] = Bj
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = special.binom((m-xj[j]) + (n - j), n-j)
term = B[j] * bin
num_paths += term
return num_paths
def _attempt_exact_2kssamp(n1, n2, g, d, alternative):
"""Attempts to compute the exact 2sample probability.
n1, n2 are the sample sizes
g is the gcd(n1, n2)
d is the computed max difference in ECDFs
Returns (success, d, probability)
"""
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
return True, d, 1.0
saw_fp_error, prob = False, np.nan
try:
with np.errstate(invalid="raise", over="raise"):
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = _compute_outer_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
jrange = np.arange(h)
prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0))
else:
with np.errstate(over='raise'):
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1)
if num_paths > bin or np.isinf(bin):
saw_fp_error = True
else:
prob = num_paths / bin
except (FloatingPointError, OverflowError):
saw_fp_error = True
if saw_fp_error:
return False, d, np.nan
if not (0 <= prob <= 1):
return False, d, prob
return True, d, prob
@_rename_parameter("mode", "method")
def ks_2samp(data1, data2, alternative='two-sided', method='auto'):
"""
Performs the two-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying continuous distributions F(x) and G(x)
of two independent samples. See Notes for a description of the available
null and alternative hypotheses.
Parameters
----------
data1, data2 : array_like, 1-Dimensional
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
method : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
res: KstestResult
An object containing attributes:
statistic : float
KS test statistic.
pvalue : float
One-tailed or two-tailed p-value.
statistic_location : float
Value from `data1` or `data2` corresponding with the KS statistic;
i.e., the distance between the empirical distribution functions is
measured at this observation.
statistic_sign : int
+1 if the empirical distribution function of `data1` exceeds
the empirical distribution function of `data2` at
`statistic_location`, otherwise -1.
See Also
--------
kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x. The statistic
is the magnitude of the minimum (most negative) difference between the
empirical distribution functions of the samples.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x. The statistic
is the maximum (most positive) difference between the empirical
distribution functions of the samples.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical. The statistic is the maximum absolute difference between the
empirical distribution functions of the samples.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values of the data. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
If the KS statistic is large, then the p-value will be small, and this may
be taken as evidence against the null hypothesis in favor of the
alternative.
If ``method='exact'``, `ks_2samp` attempts to compute an exact p-value,
that is, the probability under the null hypothesis of obtaining a test
statistic value as extreme as the value computed from the data.
If ``method='asymp'``, the asymptotic Kolmogorov-Smirnov distribution is
used to compute an approximate p-value.
If ``method='auto'``, an exact p-value computation is attempted if both
sample sizes are less than 10000; otherwise, the asymptotic method is used.
In any case, if an exact p-value calculation is attempted and fails, a
warning will be emitted, and the asymptotic p-value will be returned.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
Suppose we wish to test the null hypothesis that two samples were drawn
from the same distribution.
We choose a confidence level of 95%; that is, we will reject the null
hypothesis in favor of the alternative if the p-value is less than 0.05.
If the first sample were drawn from a uniform distribution and the second
were drawn from the standard normal, we would expect the null hypothesis
to be rejected.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> sample1 = stats.uniform.rvs(size=100, random_state=rng)
>>> sample2 = stats.norm.rvs(size=110, random_state=rng)
>>> stats.ks_2samp(sample1, sample2)
KstestResult(statistic=0.5454545454545454, pvalue=7.37417839555191e-15)
Indeed, the p-value is lower than our threshold of 0.05, so we reject the
null hypothesis in favor of the default "two-sided" alternative: the data
were *not* drawn from the same distribution.
When both samples are drawn from the same distribution, we expect the data
to be consistent with the null hypothesis most of the time.
>>> sample1 = stats.norm.rvs(size=105, random_state=rng)
>>> sample2 = stats.norm.rvs(size=95, random_state=rng)
>>> stats.ks_2samp(sample1, sample2)
KstestResult(statistic=0.10927318295739348, pvalue=0.5438289009927495)
As expected, the p-value of 0.54 is not below our threshold of 0.05, so
we cannot reject the null hypothesis.
Suppose, however, that the first sample were drawn from
a normal distribution shifted toward greater values. In this case,
the cumulative density function (CDF) of the underlying distribution tends
to be *less* than the CDF underlying the second sample. Therefore, we would
expect the null hypothesis to be rejected with ``alternative='less'``:
>>> sample1 = stats.norm.rvs(size=105, loc=0.5, random_state=rng)
>>> stats.ks_2samp(sample1, sample2, alternative='less')
KstestResult(statistic=0.4055137844611529, pvalue=3.5474563068855554e-08)
and indeed, with p-value smaller than our threshold, we reject the null
hypothesis in favor of the alternative.
"""
mode = method
if mode not in ['auto', 'exact', 'asymp']:
raise ValueError(f'Invalid value for mode: {mode}')
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError(f'Invalid value for alternative: {alternative}')
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
if np.ma.is_masked(data1):
data1 = data1.compressed()
if np.ma.is_masked(data2):
data2 = data2.compressed()
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
# Identify the location of the statistic
argminS = np.argmin(cddiffs)
argmaxS = np.argmax(cddiffs)
loc_minS = data_all[argminS]
loc_maxS = data_all[argmaxS]
# Ensure sign of minS is not negative.
minS = np.clip(-cddiffs[argminS], 0, 1)
maxS = cddiffs[argmaxS]
if alternative == 'less' or (alternative == 'two-sided' and minS > maxS):
d = minS
d_location = loc_minS
d_sign = -1
else:
d = maxS
d_location = loc_maxS
d_sign = 1
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int32).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning,
stacklevel=3)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to method={mode}.", RuntimeWarning,
stacklevel=3)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = np.clip(prob, 0, 1)
return KstestResult(d, prob, statistic_location=d_location,
statistic_sign=d_sign)
def _parse_kstest_args(data1, data2, args, N):
# kstest allows many different variations of arguments.
# Pull out the parsing into a separate function
# (xvals, yvals, ) # 2sample
# (xvals, cdf function,..)
# (xvals, name of distribution, ...)
# (name of distribution, name of distribution, ...)
# Returns xvals, yvals, cdf
# where cdf is a cdf function, or None
# and yvals is either an array_like of values, or None
# and xvals is array_like.
rvsfunc, cdf = None, None
if isinstance(data1, str):
rvsfunc = getattr(distributions, data1).rvs
elif callable(data1):
rvsfunc = data1
if isinstance(data2, str):
cdf = getattr(distributions, data2).cdf
data2 = None
elif callable(data2):
cdf = data2
data2 = None
data1 = np.sort(rvsfunc(*args, size=N) if rvsfunc else data1)
return data1, data2, cdf
@_rename_parameter("mode", "method")
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', method='auto'):
"""
Performs the (one-sample or two-sample) Kolmogorov-Smirnov test for
goodness of fit.
The one-sample test compares the underlying distribution F(x) of a sample
against a given distribution G(x). The two-sample test compares the
underlying distributions of two independent samples. Both tests are valid
only for continuous distributions.
Parameters
----------
rvs : str, array_like, or callable
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used to generate random variables.
cdf : str, array_like or callable
If array_like, it should be a 1-D array of observations of random
variables, and the two-sample test is performed
(and rvs must be array_like).
If a callable, that callable is used to calculate the cdf.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used as the cdf function.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings or
callables.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
method : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice the
one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
res: KstestResult
An object containing attributes:
statistic : float
KS test statistic, either D+, D-, or D (the maximum of the two)
pvalue : float
One-tailed or two-tailed p-value.
statistic_location : float
In a one-sample test, this is the value of `rvs`
corresponding with the KS statistic; i.e., the distance between
the empirical distribution function and the hypothesized cumulative
distribution function is measured at this observation.
In a two-sample test, this is the value from `rvs` or `cdf`
corresponding with the KS statistic; i.e., the distance between
the empirical distribution functions is measured at this
observation.
statistic_sign : int
In a one-sample test, this is +1 if the KS statistic is the
maximum positive difference between the empirical distribution
function and the hypothesized cumulative distribution function
(D+); it is -1 if the KS statistic is the maximum negative
difference (D-).
In a two-sample test, this is +1 if the empirical distribution
function of `rvs` exceeds the empirical distribution
function of `cdf` at `statistic_location`, otherwise -1.
See Also
--------
ks_1samp, ks_2samp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
Suppose we wish to test the null hypothesis that a sample is distributed
according to the standard normal.
We choose a confidence level of 95%; that is, we will reject the null
hypothesis in favor of the alternative if the p-value is less than 0.05.
When testing uniformly distributed data, we would expect the
null hypothesis to be rejected.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> stats.kstest(stats.uniform.rvs(size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.5001899973268688, pvalue=1.1616392184763533e-23)
Indeed, the p-value is lower than our threshold of 0.05, so we reject the
null hypothesis in favor of the default "two-sided" alternative: the data
are *not* distributed according to the standard normal.
When testing random variates from the standard normal distribution, we
expect the data to be consistent with the null hypothesis most of the time.
>>> x = stats.norm.rvs(size=100, random_state=rng)
>>> stats.kstest(x, stats.norm.cdf)
KstestResult(statistic=0.05345882212970396, pvalue=0.9227159037744717)
As expected, the p-value of 0.92 is not below our threshold of 0.05, so
we cannot reject the null hypothesis.
Suppose, however, that the random variates are distributed according to
a normal distribution that is shifted toward greater values. In this case,
the cumulative density function (CDF) of the underlying distribution tends
to be *less* than the CDF of the standard normal. Therefore, we would
expect the null hypothesis to be rejected with ``alternative='less'``:
>>> x = stats.norm.rvs(size=100, loc=0.5, random_state=rng)
>>> stats.kstest(x, stats.norm.cdf, alternative='less')
KstestResult(statistic=0.17482387821055168, pvalue=0.001913921057766743)
and indeed, with p-value smaller than our threshold, we reject the null
hypothesis in favor of the alternative.
For convenience, the previous test can be performed using the name of the
distribution as the second argument.
>>> stats.kstest(x, "norm", alternative='less')
KstestResult(statistic=0.17482387821055168, pvalue=0.001913921057766743)
The examples above have all been one-sample tests identical to those
performed by `ks_1samp`. Note that `kstest` can also perform two-sample
tests identical to those performed by `ks_2samp`. For example, when two
samples are drawn from the same distribution, we expect the data to be
consistent with the null hypothesis most of the time.
>>> sample1 = stats.laplace.rvs(size=105, random_state=rng)
>>> sample2 = stats.laplace.rvs(size=95, random_state=rng)
>>> stats.kstest(sample1, sample2)
KstestResult(statistic=0.11779448621553884, pvalue=0.4494256912629795)
As expected, the p-value of 0.45 is not below our threshold of 0.05, so
we cannot reject the null hypothesis.
"""
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
xvals, yvals, cdf = _parse_kstest_args(rvs, cdf, args, N)
if cdf:
return ks_1samp(xvals, cdf, args=args, alternative=alternative,
method=method)
return ks_2samp(xvals, yvals, alternative=alternative, method=method)
def tiecorrect(rankvals):
"""Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `~scipy.stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
@_axis_nan_policy_factory(RanksumsResult, n_samples=2)
def ranksums(x, y, alternative='two-sided'):
"""Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': one of the distributions (underlying `x` or `y`) is
stochastically greater than the other.
* 'less': the distribution underlying `x` is stochastically less
than the distribution underlying `y`.
* 'greater': the distribution underlying `x` is stochastically greater
than the distribution underlying `y`.
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed.
pvalue : float
The p-value of the test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
Examples
--------
We can test the hypothesis that two independent unequal-sized samples are
drawn from the same distribution with computing the Wilcoxon rank-sum
statistic.
>>> import numpy as np
>>> from scipy.stats import ranksums
>>> rng = np.random.default_rng()
>>> sample1 = rng.uniform(-1, 1, 200)
>>> sample2 = rng.uniform(-0.5, 1.5, 300) # a shifted distribution
>>> ranksums(sample1, sample2)
RanksumsResult(statistic=-7.887059, pvalue=3.09390448e-15) # may vary
>>> ranksums(sample1, sample2, alternative='less')
RanksumsResult(statistic=-7.750585297581713, pvalue=4.573497606342543e-15) # may vary
>>> ranksums(sample1, sample2, alternative='greater')
RanksumsResult(statistic=-7.750585297581713, pvalue=0.9999999999999954) # may vary
The p-value of less than ``0.05`` indicates that this test rejects the
hypothesis at the 5% significance level.
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
z, prob = _normtest_finish(z, alternative)
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
@_axis_nan_policy_factory(KruskalResult, n_samples=None)
def kruskal(*samples, nan_policy='propagate'):
"""Compute the Kruskal-Wallis H-test for independent samples.
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments. Samples must be one-dimensional.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties.
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution. The p-value returned is the survival function of
the chi square distribution evaluated at H.
See Also
--------
f_oneway : 1-way ANOVA.
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements.
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
samples = list(map(np.asarray, samples))
num_groups = len(samples)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for sample in samples:
if sample.size == 0:
NaN = _get_nan(*samples)
return KruskalResult(NaN, NaN)
elif sample.ndim != 1:
raise ValueError("Samples must be one-dimensional.")
n = np.asarray(list(map(len, samples)))
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', 'raise' or 'omit'")
contains_nan = False
for sample in samples:
cn = _contains_nan(sample, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for sample in samples:
sample = ma.masked_invalid(sample)
return mstats_basic.kruskal(*samples)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(samples)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n, dtype=float)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*samples):
"""Compute the Friedman test for repeated samples.
The Friedman test tests the null hypothesis that repeated samples of
the same individuals have the same distribution. It is often used
to test for consistency among samples obtained in different ways.
For example, if two sampling techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
sampling techniques are consistent.
Parameters
----------
sample1, sample2, sample3... : array_like
Arrays of observations. All of the arrays must have the same number
of elements. At least three samples must be given.
Returns
-------
statistic : float
The test statistic, correcting for ties.
pvalue : float
The associated p-value assuming that the test statistic has a chi
squared distribution.
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated samples.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
.. [2] P. Sprent and N.C. Smeeton, "Applied Nonparametric Statistical
Methods, Third Edition". Chapter 6, Section 6.3.2.
Examples
--------
In [2]_, the pulse rate (per minute) of a group of seven students was
measured before exercise, immediately after exercise and 5 minutes
after exercise. Is there evidence to suggest that the pulse rates on
these three occasions are similar?
We begin by formulating a null hypothesis :math:`H_0`:
The pulse rates are identical on these three occasions.
Let's assess the plausibility of this hypothesis with a Friedman test.
>>> from scipy.stats import friedmanchisquare
>>> before = [72, 96, 88, 92, 74, 76, 82]
>>> immediately_after = [120, 120, 132, 120, 101, 96, 112]
>>> five_min_after = [76, 95, 104, 96, 84, 72, 76]
>>> res = friedmanchisquare(before, immediately_after, five_min_after)
>>> res.statistic
10.57142857142857
>>> res.pvalue
0.005063414171757498
Using a significance level of 5%, we would reject the null hypothesis in
favor of the alternative hypothesis: "the pulse rates are different on
these three occasions".
"""
k = len(samples)
if k < 3:
raise ValueError('At least 3 sets of samples must be given '
'for Friedman test, got {}.'.format(k))
n = len(samples[0])
for i in range(1, k):
if len(samples[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(samples).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for d in data:
replist, repnum = find_repeats(array(d))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""Compute the Brunner-Munzel test on samples x and y.
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
distribution : {'t', 'normal'}, optional
Defines how to get the p-value.
The following options are available (default is 't'):
* 't': get the p-value by t-distribution
* 'normal': get the p-value by standard normal distribution.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
if (df_numer == 0) and (df_denom == 0):
message = ("p-value cannot be estimated with `distribution='t' "
"because degrees of freedom parameter is undefined "
"(0/0). Try using `distribution='normal'")
warnings.warn(message, RuntimeWarning)
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests that bear upon the same hypothesis.
These methods are intended only for combining p-values from hypothesis
tests based upon continuous distributions.
Each method assumes that under the null hypothesis, the p-values are
sampled independently and uniformly from the interval [0, 1]. A test
statistic (different for each method) is computed and a combined
p-value is calculated based upon the distribution of this test statistic
under the null hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests based on
continuous distributions.
method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}
Name of method to use to combine p-values.
The available methods are (see Notes for details):
* 'fisher': Fisher's method (Fisher's combined probability test)
* 'pearson': Pearson's method
* 'mudholkar_george': Mudholkar's and George's method
* 'tippett': Tippett's method
* 'stouffer': Stouffer's Z-score method
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
res : SignificanceResult
An object containing attributes:
statistic : float
The statistic calculated by the specified method.
pvalue : float
The combined p-value.
Notes
-----
If this function is applied to tests with a discrete statistics such as
any rank test or contingency-table test, it will yield systematically
wrong results, e.g. Fisher's method will systematically overestimate the
p-value [1]_. This problem becomes less severe for large sample sizes
when the discrete distributions become approximately continuous.
The differences between the methods can be best illustrated by their
statistics and what aspects of a combination of p-values they emphasise
when considering significance [2]_. For example, methods emphasising large
p-values are more sensitive to strong false and true negatives; conversely
methods focussing on small p-values are sensitive to positives.
* The statistics of Fisher's method (also known as Fisher's combined
probability test) [3]_ is :math:`-2\\sum_i \\log(p_i)`, which is
equivalent (as a test statistics) to the product of individual p-values:
:math:`\\prod_i p_i`. Under the null hypothesis, this statistics follows
a :math:`\\chi^2` distribution. This method emphasises small p-values.
* Pearson's method uses :math:`-2\\sum_i\\log(1-p_i)`, which is equivalent
to :math:`\\prod_i \\frac{1}{1-p_i}` [2]_.
It thus emphasises large p-values.
* Mudholkar and George compromise between Fisher's and Pearson's method by
averaging their statistics [4]_. Their method emphasises extreme
p-values, both close to 1 and 0.
* Stouffer's method [5]_ uses Z-scores and the statistic:
:math:`\\sum_i \\Phi^{-1} (p_i)`, where :math:`\\Phi` is the CDF of the
standard normal distribution. The advantage of this method is that it is
straightforward to introduce weights, which can make Stouffer's method
more powerful than Fisher's method when the p-values are from studies
of different size [6]_ [7]_.
* Tippett's method uses the smallest p-value as a statistic.
(Mind that this minimum is not the combined p-value.)
Fisher's method may be extended to combine p-values from dependent tests
[8]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] Kincaid, W. M., "The Combination of Tests Based on Discrete
Distributions." Journal of the American Statistical Association 57,
no. 297 (1962), 10-19.
.. [2] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [3] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [4] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [5] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [6] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [7] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [8] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = 2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.cdf(-statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.cdf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
f"Invalid method {method!r}. Valid methods are 'fisher', "
"'pearson', 'mudholkar_george', 'tippett', and 'stouffer'"
)
return SignificanceResult(statistic, pval)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the Wasserstein-1 distance between two discrete distributions.
The Wasserstein distance, also called the Earth mover's distance or the
optimal transport distance, is a similarity metric between two probability
distributions. In the discrete case, the Wasserstein distance can be
understood as the cost of an optimal transport plan to convert one
distribution into the other. The cost is calculated as the product of the
amount of probability mass being moved and the distance it is being moved.
A brief and intuitive introduction can be found at [2]_.
.. versionadded:: 1.0.0
Parameters
----------
u_values : 1d or 2d array_like
A sample from a probability distribution or the support (set of all
possible values) of a probability distribution. Each element along
axis 0 is an observation or possible value. If two-dimensional, axis
1 represents the dimensionality of the distribution; i.e., each row is
a vector observation or possible value.
v_values : 1d or 2d array_like
A sample from or the support of a second distribution.
u_weights, v_weights : 1d array_like, optional
Weights or counts corresponding with the sample or probability masses
corresponding with the support values. Sum of elements must be positive
and finite. If unspecified, each value is assigned the same weight.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
Given two probability mass functions, :math:`u`
and :math:`v`, the first Wasserstein distance between the distributions
is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively. For a given value
:math:`x`, :math:`u(x)` gives the probabilty of :math:`u` at position
:math:`x`, and the same for :math:`v(x)`.
In the 1-dimensional case, let :math:`U` and :math:`V` denote the
respective CDFs of :math:`u` and :math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [3]_ for a proof of the equivalence of both definitions.
In the more general (higher dimensional) and discrete case, it is also
called the optimal transport problem or the Monge problem.
Let the finite point sets :math:`\{x_i\}` and :math:`\{y_j\}` denote
the support set of probability mass function :math:`u` and :math:`v`
respectively. The Monge problem can be expressed as follows,
Let :math:`\Gamma` denote the transport plan, :math:`D` denote the
distance matrix and,
.. math::
x = \text{vec}(\Gamma) \\
c = \text{vec}(D) \\
b = \begin{bmatrix}
u\\
v\\
\end{bmatrix}
The :math:`\text{vec}()` function denotes the Vectorization function
that transforms a matrix into a column vector by vertically stacking
the columns of the matrix.
The tranport plan :math:`\Gamma` is a matrix :math:`[\gamma_{ij}]` in
which :math:`\gamma_{ij}` is a positive value representing the amount of
probability mass transported from :math:`u(x_i)` to :math:`v(y_i)`.
Summing over the rows of :math:`\Gamma` should give the source distribution
:math:`u` : :math:`\sum_j \gamma_{ij} = u(x_i)` holds for all :math:`i`
and summing over the columns of :math:`\Gamma` should give the target
distribution :math:`v`: :math:`\sum_i \gamma_{ij} = v(y_j)` holds for all
:math:`j`.
The distance matrix :math:`D` is a matrix :math:`[d_{ij}]`, in which
:math:`d_{ij} = d(x_i, y_j)`.
Given :math:`\Gamma`, :math:`D`, :math:`b`, the Monge problem can be
tranformed into a linear programming problem by
taking :math:`A x = b` as constraints and :math:`z = c^T x` as minimization
target (sum of costs) , where matrix :math:`A` has the form
.. math::
\begin{array} {rrrr|rrrr|r|rrrr}
1 & 1 & \dots & 1 & 0 & 0 & \dots & 0 & \dots & 0 & 0 & \dots &
0 \cr
0 & 0 & \dots & 0 & 1 & 1 & \dots & 1 & \dots & 0 & 0 &\dots &
0 \cr
\vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots
& \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \cr
0 & 0 & \dots & 0 & 0 & 0 & \dots & 0 & \dots & 1 & 1 & \dots &
1 \cr \hline
1 & 0 & \dots & 0 & 1 & 0 & \dots & \dots & \dots & 1 & 0 & \dots &
0 \cr
0 & 1 & \dots & 0 & 0 & 1 & \dots & \dots & \dots & 0 & 1 & \dots &
0 \cr
\vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots &
\vdots & \vdots & \vdots & \vdots & \ddots & \vdots \cr
0 & 0 & \dots & 1 & 0 & 0 & \dots & 1 & \dots & 0 & 0 & \dots & 1
\end{array}
By solving the dual form of the above linear programming problem (with
solution :math:`y^*`), the Wasserstein distance :math:`l_1 (u, v)` can
be computed as :math:`b^T y^*`.
The above solution is inspired by Vincent Herrmann's blog [5]_ . For a
more thorough explanation, see [4]_ .
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric",
https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Lili Weng, "What is Wasserstein distance?", Lil'log,
https://lilianweng.github.io/posts/2017-08-20-gan/#what-is-
wasserstein-distance.
.. [3] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and
Related Families of Nonparametric Tests" (2015).
:arXiv:`1509.02237`.
.. [4] Peyré, Gabriel, and Marco Cuturi. "Computational optimal
transport." Center for Research in Economics and Statistics
Working Papers 2017-86 (2017).
.. [5] Hermann, Vincent. "Wasserstein GAN and the Kantorovich-Rubinstein
Duality". https://vincentherrmann.github.io/blog/wasserstein/.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
Compute the Wasserstein distance between two three-dimensional samples,
each with two observations.
>>> wasserstein_distance([[0, 2, 3], [1, 2, 5]], [[3, 2, 3], [4, 2, 5]])
3.0
Compute the Wasserstein distance between two two-dimensional distributions
with three and two weighted observations, respectively.
>>> wasserstein_distance([[0, 2.75], [2, 209.3], [0, 0]],
... [[0.2, 0.322], [4.5, 25.1808]],
... [0.4, 5.2, 0.114], [0.8, 1.5])
174.15840245217169
"""
m, n = len(u_values), len(v_values)
u_values = asarray(u_values)
v_values = asarray(v_values)
if u_values.ndim > 2 or v_values.ndim > 2:
raise ValueError('Invalid input values. The inputs must have either '
'one or two dimensions.')
# if dimensions are not equal throw error
if u_values.ndim != v_values.ndim:
raise ValueError('Invalid input values. Dimensions of inputs must be '
'equal.')
# if data is 1D then call the cdf_distance function
if u_values.ndim == 1 and v_values.ndim == 1:
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
# if number of columns is not equal throw error
if u_values.shape[1] != v_values.shape[1]:
raise ValueError('Invalid input values. If two-dimensional, '
'`u_values` and `v_values` must have the same '
'number of columns.')
# if data contains np.inf then return inf or nan
if np.any(np.isinf(u_values)) ^ np.any(np.isinf(v_values)):
return np.inf
elif np.any(np.isinf(u_values)) and np.any(np.isinf(v_values)):
return np.nan
# create constraints
A_upper_part = sparse.block_diag((np.ones((1, n)), ) * m)
A_lower_part = sparse.hstack((sparse.eye(n), ) * m)
# sparse constraint matrix of size (m + n)*(m * n)
A = sparse.vstack((A_upper_part, A_lower_part))
A = sparse.coo_array(A)
# get cost matrix
D = distance_matrix(u_values, v_values, p=2)
cost = D.ravel()
# create the minimization target
p_u = np.full(m, 1/m) if u_weights is None else u_weights/np.sum(u_weights)
p_v = np.full(n, 1/n) if v_weights is None else v_weights/np.sum(v_weights)
b = np.concatenate((p_u, p_v), axis=0)
# solving LP
constraints = LinearConstraint(A=A.T, ub=cost)
opt_res = milp(c=-b, constraints=constraints, bounds=(-np.inf, np.inf))
return -opt_res.fun
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
Sometimes the square of this quantity is referred to as the "energy
distance" (e.g. in [2]_, [4]_), but as noted in [1]_ and [3]_, only the
definition above satisfies the axioms of a distance function (metric).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramér-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramér-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average', *, axis=None, nan_policy='propagate'):
"""Assign ranks to data, dealing with ties appropriately.
By default (``axis=None``), the data array is first flattened, and a flat
array of ranks is returned. Separately reshape the rank array to the
shape of the data array if desired (see Examples).
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
axis : {None, int}, optional
Axis along which to perform the ranking. If ``None``, the data array
is first flattened.
nan_policy : {'propagate', 'omit', 'raise'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': propagates nans through the rank calculation
* 'omit': performs the calculations ignoring nan values
* 'raise': raises an error
.. note::
When `nan_policy` is 'propagate', the output is an array of *all*
nans because ranks relative to nans in the input are undefined.
When `nan_policy` is 'omit', nans in `a` are ignored when ranking
the other values, and the corresponding locations of the output
are nan.
.. versionadded:: 1.10
Returns
-------
ranks : ndarray
An array of size equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> import numpy as np
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
>>> rankdata([[0, 2], [3, 2]]).reshape(2,2)
array([[1. , 2.5],
[4. , 2.5]])
>>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1)
array([[1. , 2.5, 2.5],
[2. , 1. , 3. ]])
>>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="propagate")
array([nan, nan, nan, nan, nan, nan])
>>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="omit")
array([ 2., 3., 4., nan, 1., nan])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError(f'unknown method "{method}"')
a = np.asarray(a)
if axis is not None:
if a.size == 0:
# The return values of `normalize_axis_index` are ignored. The
# call validates `axis`, even though we won't use it.
normalize_axis_index(axis, a.ndim)
dt = np.float64 if method == 'average' else np.int_
return np.empty(a.shape, dtype=dt)
return np.apply_along_axis(rankdata, axis, a, method,
nan_policy=nan_policy)
arr = np.ravel(a)
contains_nan, nan_policy = _contains_nan(arr, nan_policy)
nan_indexes = None
if contains_nan:
if nan_policy == 'omit':
nan_indexes = np.isnan(arr)
if nan_policy == 'propagate':
return np.full_like(arr, np.nan)
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
result = inv + 1
else:
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
result = dense
else:
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
result = count[dense]
if method == 'min':
result = count[dense - 1] + 1
if method == 'average':
result = .5 * (count[dense] + count[dense - 1] + 1)
if nan_indexes is not None:
result = result.astype('float64')
result[nan_indexes] = np.nan
return result
def expectile(a, alpha=0.5, *, weights=None):
r"""Compute the expectile at the specified level.
Expectiles are a generalization of the expectation in the same way as
quantiles are a generalization of the median. The expectile at level
`alpha = 0.5` is the mean (average). See Notes for more details.
Parameters
----------
a : array_like
Array containing numbers whose expectile is desired.
alpha : float, default: 0.5
The level of the expectile; e.g., `alpha=0.5` gives the mean.
weights : array_like, optional
An array of weights associated with the values in `a`.
The `weights` must be broadcastable to the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
An integer valued weight element acts like repeating the corresponding
observation in `a` that many times. See Notes for more details.
Returns
-------
expectile : ndarray
The empirical expectile at level `alpha`.
See Also
--------
numpy.mean : Arithmetic average
numpy.quantile : Quantile
Notes
-----
In general, the expectile at level :math:`\alpha` of a random variable
:math:`X` with cumulative distribution function (CDF) :math:`F` is given
by the unique solution :math:`t` of:
.. math::
\alpha E((X - t)_+) = (1 - \alpha) E((t - X)_+) \,.
Here, :math:`(x)_+ = \max(0, x)` is the positive part of :math:`x`.
This equation can be equivalently written as:
.. math::
\alpha \int_t^\infty (x - t)\mathrm{d}F(x)
= (1 - \alpha) \int_{-\infty}^t (t - x)\mathrm{d}F(x) \,.
The empirical expectile at level :math:`\alpha` (`alpha`) of a sample
:math:`a_i` (the array `a`) is defined by plugging in the empirical CDF of
`a`. Given sample or case weights :math:`w` (the array `weights`), it
reads :math:`F_a(x) = \frac{1}{\sum_i w_i} \sum_i w_i 1_{a_i \leq x}`
with indicator function :math:`1_{A}`. This leads to the definition of the
empirical expectile at level `alpha` as the unique solution :math:`t` of:
.. math::
\alpha \sum_{i=1}^n w_i (a_i - t)_+ =
(1 - \alpha) \sum_{i=1}^n w_i (t - a_i)_+ \,.
For :math:`\alpha=0.5`, this simplifies to the weighted average.
Furthermore, the larger :math:`\alpha`, the larger the value of the
expectile.
As a final remark, the expectile at level :math:`\alpha` can also be
written as a minimization problem. One often used choice is
.. math::
\operatorname{argmin}_t
E(\lvert 1_{t\geq X} - \alpha\rvert(t - X)^2) \,.
References
----------
.. [1] W. K. Newey and J. L. Powell (1987), "Asymmetric Least Squares
Estimation and Testing," Econometrica, 55, 819-847.
.. [2] T. Gneiting (2009). "Making and Evaluating Point Forecasts,"
Journal of the American Statistical Association, 106, 746 - 762.
:doi:`10.48550/arXiv.0912.0902`
Examples
--------
>>> import numpy as np
>>> from scipy.stats import expectile
>>> a = [1, 4, 2, -1]
>>> expectile(a, alpha=0.5) == np.mean(a)
True
>>> expectile(a, alpha=0.2)
0.42857142857142855
>>> expectile(a, alpha=0.8)
2.5714285714285716
>>> weights = [1, 3, 1, 1]
"""
if alpha < 0 or alpha > 1:
raise ValueError(
"The expectile level alpha must be in the range [0, 1]."
)
a = np.asarray(a)
if weights is not None:
weights = np.broadcast_to(weights, a.shape)
# This is the empirical equivalent of Eq. (13) with identification
# function from Table 9 (omitting a factor of 2) in [2] (their y is our
# data a, their x is our t)
def first_order(t):
return np.average(np.abs((a <= t) - alpha) * (t - a), weights=weights)
if alpha >= 0.5:
x0 = np.average(a, weights=weights)
x1 = np.amax(a)
else:
x1 = np.average(a, weights=weights)
x0 = np.amin(a)
if x0 == x1:
# a has a single unique element
return x0
# Note that the expectile is the unique solution, so no worries about
# finding a wrong root.
res = root_scalar(first_order, x0=x0, x1=x1)
return res.root
| 400,769
| 37.0129
| 109
|
py
|
scipy
|
scipy-main/scipy/stats/stats.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.stats` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'find_repeats', 'gmean', 'hmean', 'pmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd',
'median_abs_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
'f_oneway',
'pearsonr', 'fisher_exact',
'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau', 'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel',
'kstest', 'ks_1samp', 'ks_2samp',
'chisquare', 'power_divergence',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'alexandergovern', 'gcd', 'namedtuple', 'array',
'ma', 'cdist', 'check_random_state', 'MapWrapper',
'rng_integers', 'float_factorial', 'linalg', 'distributions',
'mstats_basic', 'ModeResult', 'DescribeResult',
'SkewtestResult', 'KurtosistestResult', 'NormaltestResult',
'HistogramResult', 'CumfreqResult',
'RelfreqResult', 'SigmaclipResult', 'F_onewayResult',
'AlexanderGovernResult',
'PointbiserialrResult',
'MGCResult', 'Ttest_1sampResult', 'Ttest_indResult',
'Ttest_relResult', 'Power_divergenceResult', 'KstestResult',
'Ks_2sampResult', 'RanksumsResult', 'KruskalResult',
'FriedmanchisquareResult', 'BrunnerMunzelResult', 'RepeatedResults'
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="stats", module="stats",
private_module="_stats_py", all=__all__,
attribute=name)
| 2,137
| 39.339623
| 76
|
py
|
scipy
|
scipy-main/scipy/stats/_qmc.py
|
"""Quasi-Monte Carlo engines and helpers."""
from __future__ import annotations
import copy
import math
import numbers
import os
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import (
Callable,
ClassVar,
Literal,
overload,
TYPE_CHECKING,
)
import numpy as np
if TYPE_CHECKING:
import numpy.typing as npt
from scipy._lib._util import (
DecimalNumber, GeneratorType, IntNumber, SeedType
)
import scipy.stats as stats
from scipy._lib._util import rng_integers, _rng_spawn
from scipy.spatial import distance, Voronoi
from scipy.special import gammainc
from ._sobol import (
_initialize_v, _cscramble, _fill_p_cumulative, _draw, _fast_forward,
_categorize, _MAXDIM
)
from ._qmc_cy import (
_cy_wrapper_centered_discrepancy,
_cy_wrapper_wrap_around_discrepancy,
_cy_wrapper_mixture_discrepancy,
_cy_wrapper_l2_star_discrepancy,
_cy_wrapper_update_discrepancy,
_cy_van_der_corput_scrambled,
_cy_van_der_corput,
)
__all__ = ['scale', 'discrepancy', 'update_discrepancy',
'QMCEngine', 'Sobol', 'Halton', 'LatinHypercube', 'PoissonDisk',
'MultinomialQMC', 'MultivariateNormalQMC']
@overload
def check_random_state(seed: IntNumber | None = ...) -> np.random.Generator:
...
@overload
def check_random_state(seed: GeneratorType) -> GeneratorType:
...
# Based on scipy._lib._util.check_random_state
def check_random_state(seed=None):
"""Turn `seed` into a `numpy.random.Generator` instance.
Parameters
----------
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional # noqa
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` or ``RandomState`` instance, then
the provided instance is used.
Returns
-------
seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
Random number generator.
"""
if seed is None or isinstance(seed, (numbers.Integral, np.integer)):
return np.random.default_rng(seed)
elif isinstance(seed, (np.random.RandomState, np.random.Generator)):
return seed
else:
raise ValueError(f'{seed!r} cannot be used to seed a'
' numpy.random.Generator instance')
def scale(
sample: npt.ArrayLike,
l_bounds: npt.ArrayLike,
u_bounds: npt.ArrayLike,
*,
reverse: bool = False
) -> np.ndarray:
r"""Sample scaling from unit hypercube to different bounds.
To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
with :math:`a` the lower bounds and :math:`b` the upper bounds.
The following transformation is used:
.. math::
(b - a) \cdot \text{sample} + a
Parameters
----------
sample : array_like (n, d)
Sample to scale.
l_bounds, u_bounds : array_like (d,)
Lower and upper bounds (resp. :math:`a`, :math:`b`) of transformed
data. If `reverse` is True, range of the original data to transform
to the unit hypercube.
reverse : bool, optional
Reverse the transformation from different bounds to the unit hypercube.
Default is False.
Returns
-------
sample : array_like (n, d)
Scaled sample.
Examples
--------
Transform 3 samples in the unit hypercube to bounds:
>>> from scipy.stats import qmc
>>> l_bounds = [-2, 0]
>>> u_bounds = [6, 5]
>>> sample = [[0.5 , 0.75],
... [0.5 , 0.5],
... [0.75, 0.25]]
>>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
>>> sample_scaled
array([[2. , 3.75],
[2. , 2.5 ],
[4. , 1.25]])
And convert back to the unit hypercube:
>>> sample_ = qmc.scale(sample_scaled, l_bounds, u_bounds, reverse=True)
>>> sample_
array([[0.5 , 0.75],
[0.5 , 0.5 ],
[0.75, 0.25]])
"""
sample = np.asarray(sample)
# Checking bounds and sample
if not sample.ndim == 2:
raise ValueError('Sample is not a 2D array')
lower, upper = _validate_bounds(
l_bounds=l_bounds, u_bounds=u_bounds, d=sample.shape[1]
)
if not reverse:
# Checking that sample is within the hypercube
if (sample.max() > 1.) or (sample.min() < 0.):
raise ValueError('Sample is not in unit hypercube')
return sample * (upper - lower) + lower
else:
# Checking that sample is within the bounds
if not (np.all(sample >= lower) and np.all(sample <= upper)):
raise ValueError('Sample is out of bounds')
return (sample - lower) / (upper - lower)
def discrepancy(
sample: npt.ArrayLike,
*,
iterative: bool = False,
method: Literal["CD", "WD", "MD", "L2-star"] = "CD",
workers: IntNumber = 1) -> float:
"""Discrepancy of a given sample.
Parameters
----------
sample : array_like (n, d)
The sample to compute the discrepancy from.
iterative : bool, optional
Must be False if not using it for updating the discrepancy.
Default is False. Refer to the notes for more details.
method : str, optional
Type of discrepancy, can be ``CD``, ``WD``, ``MD`` or ``L2-star``.
Refer to the notes for more details. Default is ``CD``.
workers : int, optional
Number of workers to use for parallel processing. If -1 is given all
CPU threads are used. Default is 1.
Returns
-------
discrepancy : float
Discrepancy.
Notes
-----
The discrepancy is a uniformity criterion used to assess the space filling
of a number of samples in a hypercube. A discrepancy quantifies the
distance between the continuous uniform distribution on a hypercube and the
discrete uniform distribution on :math:`n` distinct sample points.
The lower the value is, the better the coverage of the parameter space is.
For a collection of subsets of the hypercube, the discrepancy is the
difference between the fraction of sample points in one of those
subsets and the volume of that subset. There are different definitions of
discrepancy corresponding to different collections of subsets. Some
versions take a root mean square difference over subsets instead of
a maximum.
A measure of uniformity is reasonable if it satisfies the following
criteria [1]_:
1. It is invariant under permuting factors and/or runs.
2. It is invariant under rotation of the coordinates.
3. It can measure not only uniformity of the sample over the hypercube,
but also the projection uniformity of the sample over non-empty
subset of lower dimension hypercubes.
4. There is some reasonable geometric meaning.
5. It is easy to compute.
6. It satisfies the Koksma-Hlawka-like inequality.
7. It is consistent with other criteria in experimental design.
Four methods are available:
* ``CD``: Centered Discrepancy - subspace involves a corner of the
hypercube
* ``WD``: Wrap-around Discrepancy - subspace can wrap around bounds
* ``MD``: Mixture Discrepancy - mix between CD/WD covering more criteria
* ``L2-star``: L2-star discrepancy - like CD BUT variant to rotation
See [2]_ for precise definitions of each method.
Lastly, using ``iterative=True``, it is possible to compute the
discrepancy as if we had :math:`n+1` samples. This is useful if we want
to add a point to a sampling and check the candidate which would give the
lowest discrepancy. Then you could just update the discrepancy with
each candidate using `update_discrepancy`. This method is faster than
computing the discrepancy for a large number of candidates.
References
----------
.. [1] Fang et al. "Design and modeling for computer experiments".
Computer Science and Data Analysis Series, 2006.
.. [2] Zhou Y.-D. et al. "Mixture discrepancy for quasi-random point sets."
Journal of Complexity, 29 (3-4) , pp. 283-301, 2013.
.. [3] T. T. Warnock. "Computational investigations of low discrepancy
point sets." Applications of Number Theory to Numerical
Analysis, Academic Press, pp. 319-343, 1972.
Examples
--------
Calculate the quality of the sample using the discrepancy:
>>> import numpy as np
>>> from scipy.stats import qmc
>>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
>>> l_bounds = [0.5, 0.5]
>>> u_bounds = [6.5, 6.5]
>>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
>>> space
array([[0.08333333, 0.41666667],
[0.25 , 0.91666667],
[0.41666667, 0.25 ],
[0.58333333, 0.75 ],
[0.75 , 0.08333333],
[0.91666667, 0.58333333]])
>>> qmc.discrepancy(space)
0.008142039609053464
We can also compute iteratively the ``CD`` discrepancy by using
``iterative=True``.
>>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
>>> disc_init
0.04769081147119336
>>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
0.008142039609053513
"""
sample = np.asarray(sample, dtype=np.float64, order="C")
# Checking that sample is within the hypercube and 2D
if not sample.ndim == 2:
raise ValueError("Sample is not a 2D array")
if (sample.max() > 1.) or (sample.min() < 0.):
raise ValueError("Sample is not in unit hypercube")
workers = _validate_workers(workers)
methods = {
"CD": _cy_wrapper_centered_discrepancy,
"WD": _cy_wrapper_wrap_around_discrepancy,
"MD": _cy_wrapper_mixture_discrepancy,
"L2-star": _cy_wrapper_l2_star_discrepancy,
}
if method in methods:
return methods[method](sample, iterative, workers=workers)
else:
raise ValueError(f"{method!r} is not a valid method. It must be one of"
f" {set(methods)!r}")
def update_discrepancy(
x_new: npt.ArrayLike,
sample: npt.ArrayLike,
initial_disc: DecimalNumber) -> float:
"""Update the centered discrepancy with a new sample.
Parameters
----------
x_new : array_like (1, d)
The new sample to add in `sample`.
sample : array_like (n, d)
The initial sample.
initial_disc : float
Centered discrepancy of the `sample`.
Returns
-------
discrepancy : float
Centered discrepancy of the sample composed of `x_new` and `sample`.
Examples
--------
We can also compute iteratively the discrepancy by using
``iterative=True``.
>>> import numpy as np
>>> from scipy.stats import qmc
>>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
>>> l_bounds = [0.5, 0.5]
>>> u_bounds = [6.5, 6.5]
>>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
>>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
>>> disc_init
0.04769081147119336
>>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
0.008142039609053513
"""
sample = np.asarray(sample, dtype=np.float64, order="C")
x_new = np.asarray(x_new, dtype=np.float64, order="C")
# Checking that sample is within the hypercube and 2D
if not sample.ndim == 2:
raise ValueError('Sample is not a 2D array')
if (sample.max() > 1.) or (sample.min() < 0.):
raise ValueError('Sample is not in unit hypercube')
# Checking that x_new is within the hypercube and 1D
if not x_new.ndim == 1:
raise ValueError('x_new is not a 1D array')
if not (np.all(x_new >= 0) and np.all(x_new <= 1)):
raise ValueError('x_new is not in unit hypercube')
if x_new.shape[0] != sample.shape[1]:
raise ValueError("x_new and sample must be broadcastable")
return _cy_wrapper_update_discrepancy(x_new, sample, initial_disc)
def _perturb_discrepancy(sample: np.ndarray, i1: int, i2: int, k: int,
disc: float):
"""Centered discrepancy after an elementary perturbation of a LHS.
An elementary perturbation consists of an exchange of coordinates between
two points: ``sample[i1, k] <-> sample[i2, k]``. By construction,
this operation conserves the LHS properties.
Parameters
----------
sample : array_like (n, d)
The sample (before permutation) to compute the discrepancy from.
i1 : int
The first line of the elementary permutation.
i2 : int
The second line of the elementary permutation.
k : int
The column of the elementary permutation.
disc : float
Centered discrepancy of the design before permutation.
Returns
-------
discrepancy : float
Centered discrepancy of the design after permutation.
References
----------
.. [1] Jin et al. "An efficient algorithm for constructing optimal design
of computer experiments", Journal of Statistical Planning and
Inference, 2005.
"""
n = sample.shape[0]
z_ij = sample - 0.5
# Eq (19)
c_i1j = (1. / n ** 2.
* np.prod(0.5 * (2. + abs(z_ij[i1, :])
+ abs(z_ij) - abs(z_ij[i1, :] - z_ij)), axis=1))
c_i2j = (1. / n ** 2.
* np.prod(0.5 * (2. + abs(z_ij[i2, :])
+ abs(z_ij) - abs(z_ij[i2, :] - z_ij)), axis=1))
# Eq (20)
c_i1i1 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i1, :]))
- 2. / n * np.prod(1. + 0.5 * abs(z_ij[i1, :])
- 0.5 * z_ij[i1, :] ** 2))
c_i2i2 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i2, :]))
- 2. / n * np.prod(1. + 0.5 * abs(z_ij[i2, :])
- 0.5 * z_ij[i2, :] ** 2))
# Eq (22), typo in the article in the denominator i2 -> i1
num = (2 + abs(z_ij[i2, k]) + abs(z_ij[:, k])
- abs(z_ij[i2, k] - z_ij[:, k]))
denum = (2 + abs(z_ij[i1, k]) + abs(z_ij[:, k])
- abs(z_ij[i1, k] - z_ij[:, k]))
gamma = num / denum
# Eq (23)
c_p_i1j = gamma * c_i1j
# Eq (24)
c_p_i2j = c_i2j / gamma
alpha = (1 + abs(z_ij[i2, k])) / (1 + abs(z_ij[i1, k]))
beta = (2 - abs(z_ij[i2, k])) / (2 - abs(z_ij[i1, k]))
g_i1 = np.prod(1. + abs(z_ij[i1, :]))
g_i2 = np.prod(1. + abs(z_ij[i2, :]))
h_i1 = np.prod(1. + 0.5 * abs(z_ij[i1, :]) - 0.5 * (z_ij[i1, :] ** 2))
h_i2 = np.prod(1. + 0.5 * abs(z_ij[i2, :]) - 0.5 * (z_ij[i2, :] ** 2))
# Eq (25), typo in the article g is missing
c_p_i1i1 = ((g_i1 * alpha) / (n ** 2) - 2. * alpha * beta * h_i1 / n)
# Eq (26), typo in the article n ** 2
c_p_i2i2 = ((g_i2 / ((n ** 2) * alpha)) - (2. * h_i2 / (n * alpha * beta)))
# Eq (26)
sum_ = c_p_i1j - c_i1j + c_p_i2j - c_i2j
mask = np.ones(n, dtype=bool)
mask[[i1, i2]] = False
sum_ = sum(sum_[mask])
disc_ep = (disc + c_p_i1i1 - c_i1i1 + c_p_i2i2 - c_i2i2 + 2 * sum_)
return disc_ep
def primes_from_2_to(n: int) -> np.ndarray:
"""Prime numbers from 2 to *n*.
Parameters
----------
n : int
Sup bound with ``n >= 6``.
Returns
-------
primes : list(int)
Primes in ``2 <= p < n``.
Notes
-----
Taken from [1]_ by P.T. Roy, written consent given on 23.04.2021
by the original author, Bruno Astrolino, for free use in SciPy under
the 3-clause BSD.
References
----------
.. [1] `StackOverflow <https://stackoverflow.com/questions/2068372>`_.
"""
sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)
for i in range(1, int(n ** 0.5) // 3 + 1):
k = 3 * i + 1 | 1
sieve[k * k // 3::2 * k] = False
sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False
return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
def n_primes(n: IntNumber) -> list[int]:
"""List of the n-first prime numbers.
Parameters
----------
n : int
Number of prime numbers wanted.
Returns
-------
primes : list(int)
List of primes.
"""
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,
271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,
601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,
677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,
953, 967, 971, 977, 983, 991, 997][:n] # type: ignore[misc]
if len(primes) < n:
big_number = 2000
while 'Not enough primes':
primes = primes_from_2_to(big_number)[:n] # type: ignore
if len(primes) == n:
break
big_number += 1000
return primes
def _van_der_corput_permutations(
base: IntNumber, *, random_state: SeedType = None
) -> np.ndarray:
"""Permutations for scrambling a Van der Corput sequence.
Parameters
----------
base : int
Base of the sequence.
random_state : {None, int, `numpy.random.Generator`}, optional
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` instance, then the provided
instance is used.
Returns
-------
permutations : array_like
Permutation indices.
Notes
-----
In Algorithm 1 of Owen 2017, a permutation of `np.arange(base)` is
created for each positive integer `k` such that `1 - base**-k < 1`
using floating-point arithmetic. For double precision floats, the
condition `1 - base**-k < 1` can also be written as `base**-k >
2**-54`, which makes it more apparent how many permutations we need
to create.
"""
rng = check_random_state(random_state)
count = math.ceil(54 / math.log2(base)) - 1
permutations = np.repeat(np.arange(base)[None], count, axis=0)
for perm in permutations:
rng.shuffle(perm)
return permutations
def van_der_corput(
n: IntNumber,
base: IntNumber = 2,
*,
start_index: IntNumber = 0,
scramble: bool = False,
permutations: npt.ArrayLike | None = None,
seed: SeedType = None,
workers: IntNumber = 1) -> np.ndarray:
"""Van der Corput sequence.
Pseudo-random number generator based on a b-adic expansion.
Scrambling uses permutations of the remainders (see [1]_). Multiple
permutations are applied to construct a point. The sequence of
permutations has to be the same for all points of the sequence.
Parameters
----------
n : int
Number of element of the sequence.
base : int, optional
Base of the sequence. Default is 2.
start_index : int, optional
Index to start the sequence from. Default is 0.
scramble : bool, optional
If True, use Owen scrambling. Otherwise no scrambling is done.
Default is True.
permutations : array_like, optional
Permutations used for scrambling.
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` instance, then the provided
instance is used.
workers : int, optional
Number of workers to use for parallel processing. If -1 is
given all CPU threads are used. Default is 1.
Returns
-------
sequence : list (n,)
Sequence of Van der Corput.
References
----------
.. [1] A. B. Owen. "A randomized Halton algorithm in R",
:arxiv:`1706.02808`, 2017.
"""
if base < 2:
raise ValueError("'base' must be at least 2")
if scramble:
if permutations is None:
permutations = _van_der_corput_permutations(
base=base, random_state=seed
)
else:
permutations = np.asarray(permutations)
return _cy_van_der_corput_scrambled(n, base, start_index,
permutations, workers)
else:
return _cy_van_der_corput(n, base, start_index, workers)
class QMCEngine(ABC):
"""A generic Quasi-Monte Carlo sampler class meant for subclassing.
QMCEngine is a base class to construct a specific Quasi-Monte Carlo
sampler. It cannot be used directly as a sampler.
Parameters
----------
d : int
Dimension of the parameter space.
optimization : {None, "random-cd", "lloyd"}, optional
Whether to use an optimization scheme to improve the quality after
sampling. Note that this is a post-processing step that does not
guarantee that all properties of the sample will be conserved.
Default is None.
* ``random-cd``: random permutations of coordinates to lower the
centered discrepancy. The best sample based on the centered
discrepancy is constantly updated. Centered discrepancy-based
sampling shows better space-filling robustness toward 2D and 3D
subprojections compared to using other discrepancy measures.
* ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
The process converges to equally spaced samples.
.. versionadded:: 1.10.0
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` instance, then the provided
instance is used.
Notes
-----
By convention samples are distributed over the half-open interval
``[0, 1)``. Instances of the class can access the attributes: ``d`` for
the dimension; and ``rng`` for the random number generator (used for the
``seed``).
**Subclassing**
When subclassing `QMCEngine` to create a new sampler, ``__init__`` and
``random`` must be redefined.
* ``__init__(d, seed=None)``: at least fix the dimension. If the sampler
does not take advantage of a ``seed`` (deterministic methods like
Halton), this parameter can be omitted.
* ``_random(n, *, workers=1)``: draw ``n`` from the engine. ``workers``
is used for parallelism. See `Halton` for example.
Optionally, two other methods can be overwritten by subclasses:
* ``reset``: Reset the engine to its original state.
* ``fast_forward``: If the sequence is deterministic (like Halton
sequence), then ``fast_forward(n)`` is skipping the ``n`` first draw.
Examples
--------
To create a random sampler based on ``np.random.random``, we would do the
following:
>>> from scipy.stats import qmc
>>> class RandomEngine(qmc.QMCEngine):
... def __init__(self, d, seed=None):
... super().__init__(d=d, seed=seed)
...
...
... def _random(self, n=1, *, workers=1):
... return self.rng.random((n, self.d))
...
...
... def reset(self):
... super().__init__(d=self.d, seed=self.rng_seed)
... return self
...
...
... def fast_forward(self, n):
... self.random(n)
... return self
After subclassing `QMCEngine` to define the sampling strategy we want to
use, we can create an instance to sample from.
>>> engine = RandomEngine(2)
>>> engine.random(5)
array([[0.22733602, 0.31675834], # random
[0.79736546, 0.67625467],
[0.39110955, 0.33281393],
[0.59830875, 0.18673419],
[0.67275604, 0.94180287]])
We can also reset the state of the generator and resample again.
>>> _ = engine.reset()
>>> engine.random(5)
array([[0.22733602, 0.31675834], # random
[0.79736546, 0.67625467],
[0.39110955, 0.33281393],
[0.59830875, 0.18673419],
[0.67275604, 0.94180287]])
"""
@abstractmethod
def __init__(
self,
d: IntNumber,
*,
optimization: Literal["random-cd", "lloyd"] | None = None,
seed: SeedType = None
) -> None:
if not np.issubdtype(type(d), np.integer) or d < 0:
raise ValueError('d must be a non-negative integer value')
self.d = d
if isinstance(seed, np.random.Generator):
# Spawn a Generator that we can own and reset.
self.rng = _rng_spawn(seed, 1)[0]
else:
# Create our instance of Generator, does not need spawning
# Also catch RandomState which cannot be spawned
self.rng = check_random_state(seed)
self.rng_seed = copy.deepcopy(self.rng)
self.num_generated = 0
config = {
# random-cd
"n_nochange": 100,
"n_iters": 10_000,
"rng": self.rng,
# lloyd
"tol": 1e-5,
"maxiter": 10,
"qhull_options": None,
}
self.optimization_method = _select_optimizer(optimization, config)
@abstractmethod
def _random(
self, n: IntNumber = 1, *, workers: IntNumber = 1
) -> np.ndarray:
...
def random(
self, n: IntNumber = 1, *, workers: IntNumber = 1
) -> np.ndarray:
"""Draw `n` in the half-open interval ``[0, 1)``.
Parameters
----------
n : int, optional
Number of samples to generate in the parameter space.
Default is 1.
workers : int, optional
Only supported with `Halton`.
Number of workers to use for parallel processing. If -1 is
given all CPU threads are used. Default is 1. It becomes faster
than one worker for `n` greater than :math:`10^3`.
Returns
-------
sample : array_like (n, d)
QMC sample.
"""
sample = self._random(n, workers=workers)
if self.optimization_method is not None:
sample = self.optimization_method(sample)
self.num_generated += n
return sample
def integers(
self,
l_bounds: npt.ArrayLike,
*,
u_bounds: npt.ArrayLike | None = None,
n: IntNumber = 1,
endpoint: bool = False,
workers: IntNumber = 1
) -> np.ndarray:
r"""
Draw `n` integers from `l_bounds` (inclusive) to `u_bounds`
(exclusive), or if endpoint=True, `l_bounds` (inclusive) to
`u_bounds` (inclusive).
Parameters
----------
l_bounds : int or array-like of ints
Lowest (signed) integers to be drawn (unless ``u_bounds=None``,
in which case this parameter is 0 and this value is used for
`u_bounds`).
u_bounds : int or array-like of ints, optional
If provided, one above the largest (signed) integer to be drawn
(see above for behavior if ``u_bounds=None``).
If array-like, must contain integer values.
n : int, optional
Number of samples to generate in the parameter space.
Default is 1.
endpoint : bool, optional
If true, sample from the interval ``[l_bounds, u_bounds]`` instead
of the default ``[l_bounds, u_bounds)``. Defaults is False.
workers : int, optional
Number of workers to use for parallel processing. If -1 is
given all CPU threads are used. Only supported when using `Halton`
Default is 1.
Returns
-------
sample : array_like (n, d)
QMC sample.
Notes
-----
It is safe to just use the same ``[0, 1)`` to integer mapping
with QMC that you would use with MC. You still get unbiasedness,
a strong law of large numbers, an asymptotically infinite variance
reduction and a finite sample variance bound.
To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
with :math:`a` the lower bounds and :math:`b` the upper bounds,
the following transformation is used:
.. math::
\text{floor}((b - a) \cdot \text{sample} + a)
"""
if u_bounds is None:
u_bounds = l_bounds
l_bounds = 0
u_bounds = np.atleast_1d(u_bounds)
l_bounds = np.atleast_1d(l_bounds)
if endpoint:
u_bounds = u_bounds + 1
if (not np.issubdtype(l_bounds.dtype, np.integer) or
not np.issubdtype(u_bounds.dtype, np.integer)):
message = ("'u_bounds' and 'l_bounds' must be integers or"
" array-like of integers")
raise ValueError(message)
if isinstance(self, Halton):
sample = self.random(n=n, workers=workers)
else:
sample = self.random(n=n)
sample = scale(sample, l_bounds=l_bounds, u_bounds=u_bounds)
sample = np.floor(sample).astype(np.int64)
return sample
def reset(self) -> QMCEngine:
"""Reset the engine to base state.
Returns
-------
engine : QMCEngine
Engine reset to its base state.
"""
seed = copy.deepcopy(self.rng_seed)
self.rng = check_random_state(seed)
self.num_generated = 0
return self
def fast_forward(self, n: IntNumber) -> QMCEngine:
"""Fast-forward the sequence by `n` positions.
Parameters
----------
n : int
Number of points to skip in the sequence.
Returns
-------
engine : QMCEngine
Engine reset to its base state.
"""
self.random(n=n)
return self
class Halton(QMCEngine):
"""Halton sequence.
Pseudo-random number generator that generalize the Van der Corput sequence
for multiple dimensions. The Halton sequence uses the base-two Van der
Corput sequence for the first dimension, base-three for its second and
base-:math:`n` for its n-dimension.
Parameters
----------
d : int
Dimension of the parameter space.
scramble : bool, optional
If True, use Owen scrambling. Otherwise no scrambling is done.
Default is True.
optimization : {None, "random-cd", "lloyd"}, optional
Whether to use an optimization scheme to improve the quality after
sampling. Note that this is a post-processing step that does not
guarantee that all properties of the sample will be conserved.
Default is None.
* ``random-cd``: random permutations of coordinates to lower the
centered discrepancy. The best sample based on the centered
discrepancy is constantly updated. Centered discrepancy-based
sampling shows better space-filling robustness toward 2D and 3D
subprojections compared to using other discrepancy measures.
* ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
The process converges to equally spaced samples.
.. versionadded:: 1.10.0
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` instance, then the provided
instance is used.
Notes
-----
The Halton sequence has severe striping artifacts for even modestly
large dimensions. These can be ameliorated by scrambling. Scrambling
also supports replication-based error estimates and extends
applicabiltiy to unbounded integrands.
References
----------
.. [1] Halton, "On the efficiency of certain quasi-random sequences of
points in evaluating multi-dimensional integrals", Numerische
Mathematik, 1960.
.. [2] A. B. Owen. "A randomized Halton algorithm in R",
:arxiv:`1706.02808`, 2017.
Examples
--------
Generate samples from a low discrepancy sequence of Halton.
>>> from scipy.stats import qmc
>>> sampler = qmc.Halton(d=2, scramble=False)
>>> sample = sampler.random(n=5)
>>> sample
array([[0. , 0. ],
[0.5 , 0.33333333],
[0.25 , 0.66666667],
[0.75 , 0.11111111],
[0.125 , 0.44444444]])
Compute the quality of the sample using the discrepancy criterion.
>>> qmc.discrepancy(sample)
0.088893711419753
If some wants to continue an existing design, extra points can be obtained
by calling again `random`. Alternatively, you can skip some points like:
>>> _ = sampler.fast_forward(5)
>>> sample_continued = sampler.random(n=5)
>>> sample_continued
array([[0.3125 , 0.37037037],
[0.8125 , 0.7037037 ],
[0.1875 , 0.14814815],
[0.6875 , 0.48148148],
[0.4375 , 0.81481481]])
Finally, samples can be scaled to bounds.
>>> l_bounds = [0, 2]
>>> u_bounds = [10, 5]
>>> qmc.scale(sample_continued, l_bounds, u_bounds)
array([[3.125 , 3.11111111],
[8.125 , 4.11111111],
[1.875 , 2.44444444],
[6.875 , 3.44444444],
[4.375 , 4.44444444]])
"""
def __init__(
self, d: IntNumber, *, scramble: bool = True,
optimization: Literal["random-cd", "lloyd"] | None = None,
seed: SeedType = None
) -> None:
# Used in `scipy.integrate.qmc_quad`
self._init_quad = {'d': d, 'scramble': True,
'optimization': optimization}
super().__init__(d=d, optimization=optimization, seed=seed)
self.seed = seed
# important to have ``type(bdim) == int`` for performance reason
self.base = [int(bdim) for bdim in n_primes(d)]
self.scramble = scramble
self._initialize_permutations()
def _initialize_permutations(self) -> None:
"""Initialize permutations for all Van der Corput sequences.
Permutations are only needed for scrambling.
"""
self._permutations: list = [None] * len(self.base)
if self.scramble:
for i, bdim in enumerate(self.base):
permutations = _van_der_corput_permutations(
base=bdim, random_state=self.rng
)
self._permutations[i] = permutations
def _random(
self, n: IntNumber = 1, *, workers: IntNumber = 1
) -> np.ndarray:
"""Draw `n` in the half-open interval ``[0, 1)``.
Parameters
----------
n : int, optional
Number of samples to generate in the parameter space. Default is 1.
workers : int, optional
Number of workers to use for parallel processing. If -1 is
given all CPU threads are used. Default is 1. It becomes faster
than one worker for `n` greater than :math:`10^3`.
Returns
-------
sample : array_like (n, d)
QMC sample.
"""
workers = _validate_workers(workers)
# Generate a sample using a Van der Corput sequence per dimension.
sample = [van_der_corput(n, bdim, start_index=self.num_generated,
scramble=self.scramble,
permutations=self._permutations[i],
workers=workers)
for i, bdim in enumerate(self.base)]
return np.array(sample).T.reshape(n, self.d)
class LatinHypercube(QMCEngine):
r"""Latin hypercube sampling (LHS).
A Latin hypercube sample [1]_ generates :math:`n` points in
:math:`[0,1)^{d}`. Each univariate marginal distribution is stratified,
placing exactly one point in :math:`[j/n, (j+1)/n)` for
:math:`j=0,1,...,n-1`. They are still applicable when :math:`n << d`.
Parameters
----------
d : int
Dimension of the parameter space.
scramble : bool, optional
When False, center samples within cells of a multi-dimensional grid.
Otherwise, samples are randomly placed within cells of the grid.
.. note::
Setting ``scramble=False`` does not ensure deterministic output.
For that, use the `seed` parameter.
Default is True.
.. versionadded:: 1.10.0
optimization : {None, "random-cd", "lloyd"}, optional
Whether to use an optimization scheme to improve the quality after
sampling. Note that this is a post-processing step that does not
guarantee that all properties of the sample will be conserved.
Default is None.
* ``random-cd``: random permutations of coordinates to lower the
centered discrepancy. The best sample based on the centered
discrepancy is constantly updated. Centered discrepancy-based
sampling shows better space-filling robustness toward 2D and 3D
subprojections compared to using other discrepancy measures.
* ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
The process converges to equally spaced samples.
.. versionadded:: 1.8.0
.. versionchanged:: 1.10.0
Add ``lloyd``.
strength : {1, 2}, optional
Strength of the LHS. ``strength=1`` produces a plain LHS while
``strength=2`` produces an orthogonal array based LHS of strength 2
[7]_, [8]_. In that case, only ``n=p**2`` points can be sampled,
with ``p`` a prime number. It also constrains ``d <= p + 1``.
Default is 1.
.. versionadded:: 1.8.0
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` instance, then the provided
instance is used.
Notes
-----
When LHS is used for integrating a function :math:`f` over :math:`n`,
LHS is extremely effective on integrands that are nearly additive [2]_.
With a LHS of :math:`n` points, the variance of the integral is always
lower than plain MC on :math:`n-1` points [3]_. There is a central limit
theorem for LHS on the mean and variance of the integral [4]_, but not
necessarily for optimized LHS due to the randomization.
:math:`A` is called an orthogonal array of strength :math:`t` if in each
n-row-by-t-column submatrix of :math:`A`: all :math:`p^t` possible
distinct rows occur the same number of times. The elements of :math:`A`
are in the set :math:`\{0, 1, ..., p-1\}`, also called symbols.
The constraint that :math:`p` must be a prime number is to allow modular
arithmetic. Increasing strength adds some symmetry to the sub-projections
of a sample. With strength 2, samples are symmetric along the diagonals of
2D sub-projections. This may be undesirable, but on the other hand, the
sample dispersion is improved.
Strength 1 (plain LHS) brings an advantage over strength 0 (MC) and
strength 2 is a useful increment over strength 1. Going to strength 3 is
a smaller increment and scrambled QMC like Sobol', Halton are more
performant [7]_.
To create a LHS of strength 2, the orthogonal array :math:`A` is
randomized by applying a random, bijective map of the set of symbols onto
itself. For example, in column 0, all 0s might become 2; in column 1,
all 0s might become 1, etc.
Then, for each column :math:`i` and symbol :math:`j`, we add a plain,
one-dimensional LHS of size :math:`p` to the subarray where
:math:`A^i = j`. The resulting matrix is finally divided by :math:`p`.
References
----------
.. [1] Mckay et al., "A Comparison of Three Methods for Selecting Values
of Input Variables in the Analysis of Output from a Computer Code."
Technometrics, 1979.
.. [2] M. Stein, "Large sample properties of simulations using Latin
hypercube sampling." Technometrics 29, no. 2: 143-151, 1987.
.. [3] A. B. Owen, "Monte Carlo variance of scrambled net quadrature."
SIAM Journal on Numerical Analysis 34, no. 5: 1884-1910, 1997
.. [4] Loh, W.-L. "On Latin hypercube sampling." The annals of statistics
24, no. 5: 2058-2080, 1996.
.. [5] Fang et al. "Design and modeling for computer experiments".
Computer Science and Data Analysis Series, 2006.
.. [6] Damblin et al., "Numerical studies of space filling designs:
optimization of Latin Hypercube Samples and subprojection properties."
Journal of Simulation, 2013.
.. [7] A. B. Owen , "Orthogonal arrays for computer experiments,
integration and visualization." Statistica Sinica, 1992.
.. [8] B. Tang, "Orthogonal Array-Based Latin Hypercubes."
Journal of the American Statistical Association, 1993.
.. [9] Susan K. Seaholm et al. "Latin hypercube sampling and the
sensitivity analysis of a Monte Carlo epidemic model".
Int J Biomed Comput, 23(1-2), 97-112,
:doi:`10.1016/0020-7101(88)90067-0`, 1988.
Examples
--------
In [9]_, a Latin Hypercube sampling strategy was used to sample a
parameter space to study the importance of each parameter of an epidemic
model. Such analysis is also called a sensitivity analysis.
Since the dimensionality of the problem is high (6), it is computationally
expensive to cover the space. When numerical experiments are costly,
QMC enables analysis that may not be possible if using a grid.
The six parameters of the model represented the probability of illness,
the probability of withdrawal, and four contact probabilities,
The authors assumed uniform distributions for all parameters and generated
50 samples.
Using `scipy.stats.qmc.LatinHypercube` to replicate the protocol, the
first step is to create a sample in the unit hypercube:
>>> from scipy.stats import qmc
>>> sampler = qmc.LatinHypercube(d=6)
>>> sample = sampler.random(n=50)
Then the sample can be scaled to the appropriate bounds:
>>> l_bounds = [0.000125, 0.01, 0.0025, 0.05, 0.47, 0.7]
>>> u_bounds = [0.000375, 0.03, 0.0075, 0.15, 0.87, 0.9]
>>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
Such a sample was used to run the model 50 times, and a polynomial
response surface was constructed. This allowed the authors to study the
relative importance of each parameter across the range of
possibilities of every other parameter.
In this computer experiment, they showed a 14-fold reduction in the number
of samples required to maintain an error below 2% on their response surface
when compared to a grid sampling.
Below are other examples showing alternative ways to construct LHS
with even better coverage of the space.
Using a base LHS as a baseline.
>>> sampler = qmc.LatinHypercube(d=2)
>>> sample = sampler.random(n=5)
>>> qmc.discrepancy(sample)
0.0196... # random
Use the `optimization` keyword argument to produce a LHS with
lower discrepancy at higher computational cost.
>>> sampler = qmc.LatinHypercube(d=2, optimization="random-cd")
>>> sample = sampler.random(n=5)
>>> qmc.discrepancy(sample)
0.0176... # random
Use the `strength` keyword argument to produce an orthogonal array based
LHS of strength 2. In this case, the number of sample points must be the
square of a prime number.
>>> sampler = qmc.LatinHypercube(d=2, strength=2)
>>> sample = sampler.random(n=9)
>>> qmc.discrepancy(sample)
0.00526... # random
Options could be combined to produce an optimized centered
orthogonal array based LHS. After optimization, the result would not
be guaranteed to be of strength 2.
"""
def __init__(
self, d: IntNumber, *,
scramble: bool = True,
strength: int = 1,
optimization: Literal["random-cd", "lloyd"] | None = None,
seed: SeedType = None
) -> None:
# Used in `scipy.integrate.qmc_quad`
self._init_quad = {'d': d, 'scramble': True, 'strength': strength,
'optimization': optimization}
super().__init__(d=d, seed=seed, optimization=optimization)
self.scramble = scramble
lhs_method_strength = {
1: self._random_lhs,
2: self._random_oa_lhs
}
try:
self.lhs_method: Callable = lhs_method_strength[strength]
except KeyError as exc:
message = (f"{strength!r} is not a valid strength. It must be one"
f" of {set(lhs_method_strength)!r}")
raise ValueError(message) from exc
def _random(
self, n: IntNumber = 1, *, workers: IntNumber = 1
) -> np.ndarray:
lhs = self.lhs_method(n)
return lhs
def _random_lhs(self, n: IntNumber = 1) -> np.ndarray:
"""Base LHS algorithm."""
if not self.scramble:
samples: np.ndarray | float = 0.5
else:
samples = self.rng.uniform(size=(n, self.d))
perms = np.tile(np.arange(1, n + 1),
(self.d, 1)) # type: ignore[arg-type]
for i in range(self.d):
self.rng.shuffle(perms[i, :])
perms = perms.T
samples = (perms - samples) / n
return samples
def _random_oa_lhs(self, n: IntNumber = 4) -> np.ndarray:
"""Orthogonal array based LHS of strength 2."""
p = np.sqrt(n).astype(int)
n_row = p**2
n_col = p + 1
primes = primes_from_2_to(p + 1)
if p not in primes or n != n_row:
raise ValueError(
"n is not the square of a prime number. Close"
f" values are {primes[-2:]**2}"
)
if self.d > p + 1:
raise ValueError("n is too small for d. Must be n > (d-1)**2")
oa_sample = np.zeros(shape=(n_row, n_col), dtype=int)
# OA of strength 2
arrays = np.tile(np.arange(p), (2, 1))
oa_sample[:, :2] = np.stack(np.meshgrid(*arrays),
axis=-1).reshape(-1, 2)
for p_ in range(1, p):
oa_sample[:, 2+p_-1] = np.mod(oa_sample[:, 0]
+ p_*oa_sample[:, 1], p)
# scramble the OA
oa_sample_ = np.empty(shape=(n_row, n_col), dtype=int)
for j in range(n_col):
perms = self.rng.permutation(p)
oa_sample_[:, j] = perms[oa_sample[:, j]]
# following is making a scrambled OA into an OA-LHS
oa_lhs_sample = np.zeros(shape=(n_row, n_col))
lhs_engine = LatinHypercube(d=1, scramble=self.scramble, strength=1,
seed=self.rng) # type: QMCEngine
for j in range(n_col):
for k in range(p):
idx = oa_sample[:, j] == k
lhs = lhs_engine.random(p).flatten()
oa_lhs_sample[:, j][idx] = lhs + oa_sample[:, j][idx]
lhs_engine = lhs_engine.reset()
oa_lhs_sample /= p
return oa_lhs_sample[:, :self.d] # type: ignore
class Sobol(QMCEngine):
"""Engine for generating (scrambled) Sobol' sequences.
Sobol' sequences are low-discrepancy, quasi-random numbers. Points
can be drawn using two methods:
* `random_base2`: safely draw :math:`n=2^m` points. This method
guarantees the balance properties of the sequence.
* `random`: draw an arbitrary number of points from the
sequence. See warning below.
Parameters
----------
d : int
Dimensionality of the sequence. Max dimensionality is 21201.
scramble : bool, optional
If True, use LMS+shift scrambling. Otherwise, no scrambling is done.
Default is True.
bits : int, optional
Number of bits of the generator. Control the maximum number of points
that can be generated, which is ``2**bits``. Maximal value is 64.
It does not correspond to the return type, which is always
``np.float64`` to prevent points from repeating themselves.
Default is None, which for backward compatibility, corresponds to 30.
.. versionadded:: 1.9.0
optimization : {None, "random-cd", "lloyd"}, optional
Whether to use an optimization scheme to improve the quality after
sampling. Note that this is a post-processing step that does not
guarantee that all properties of the sample will be conserved.
Default is None.
* ``random-cd``: random permutations of coordinates to lower the
centered discrepancy. The best sample based on the centered
discrepancy is constantly updated. Centered discrepancy-based
sampling shows better space-filling robustness toward 2D and 3D
subprojections compared to using other discrepancy measures.
* ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
The process converges to equally spaced samples.
.. versionadded:: 1.10.0
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` instance, then the provided
instance is used.
Notes
-----
Sobol' sequences [1]_ provide :math:`n=2^m` low discrepancy points in
:math:`[0,1)^{d}`. Scrambling them [3]_ makes them suitable for singular
integrands, provides a means of error estimation, and can improve their
rate of convergence. The scrambling strategy which is implemented is a
(left) linear matrix scramble (LMS) followed by a digital random shift
(LMS+shift) [2]_.
There are many versions of Sobol' sequences depending on their
'direction numbers'. This code uses direction numbers from [4]_. Hence,
the maximum number of dimension is 21201. The direction numbers have been
precomputed with search criterion 6 and can be retrieved at
https://web.maths.unsw.edu.au/~fkuo/sobol/.
.. warning::
Sobol' sequences are a quadrature rule and they lose their balance
properties if one uses a sample size that is not a power of 2, or skips
the first point, or thins the sequence [5]_.
If :math:`n=2^m` points are not enough then one should take :math:`2^M`
points for :math:`M>m`. When scrambling, the number R of independent
replicates does not have to be a power of 2.
Sobol' sequences are generated to some number :math:`B` of bits.
After :math:`2^B` points have been generated, the sequence would
repeat. Hence, an error is raised.
The number of bits can be controlled with the parameter `bits`.
References
----------
.. [1] I. M. Sobol', "The distribution of points in a cube and the accurate
evaluation of integrals." Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
1967.
.. [2] J. Matousek, "On the L2-discrepancy for anchored boxes."
J. of Complexity 14, 527-556, 1998.
.. [3] Art B. Owen, "Scrambling Sobol and Niederreiter-Xing points."
Journal of Complexity, 14(4):466-489, December 1998.
.. [4] S. Joe and F. Y. Kuo, "Constructing sobol sequences with better
two-dimensional projections." SIAM Journal on Scientific Computing,
30(5):2635-2654, 2008.
.. [5] Art B. Owen, "On dropping the first Sobol' point."
:arxiv:`2008.08051`, 2020.
Examples
--------
Generate samples from a low discrepancy sequence of Sobol'.
>>> from scipy.stats import qmc
>>> sampler = qmc.Sobol(d=2, scramble=False)
>>> sample = sampler.random_base2(m=3)
>>> sample
array([[0. , 0. ],
[0.5 , 0.5 ],
[0.75 , 0.25 ],
[0.25 , 0.75 ],
[0.375, 0.375],
[0.875, 0.875],
[0.625, 0.125],
[0.125, 0.625]])
Compute the quality of the sample using the discrepancy criterion.
>>> qmc.discrepancy(sample)
0.013882107204860938
To continue an existing design, extra points can be obtained
by calling again `random_base2`. Alternatively, you can skip some
points like:
>>> _ = sampler.reset()
>>> _ = sampler.fast_forward(4)
>>> sample_continued = sampler.random_base2(m=2)
>>> sample_continued
array([[0.375, 0.375],
[0.875, 0.875],
[0.625, 0.125],
[0.125, 0.625]])
Finally, samples can be scaled to bounds.
>>> l_bounds = [0, 2]
>>> u_bounds = [10, 5]
>>> qmc.scale(sample_continued, l_bounds, u_bounds)
array([[3.75 , 3.125],
[8.75 , 4.625],
[6.25 , 2.375],
[1.25 , 3.875]])
"""
MAXDIM: ClassVar[int] = _MAXDIM
def __init__(
self, d: IntNumber, *, scramble: bool = True,
bits: IntNumber | None = None, seed: SeedType = None,
optimization: Literal["random-cd", "lloyd"] | None = None
) -> None:
# Used in `scipy.integrate.qmc_quad`
self._init_quad = {'d': d, 'scramble': True, 'bits': bits,
'optimization': optimization}
super().__init__(d=d, optimization=optimization, seed=seed)
if d > self.MAXDIM:
raise ValueError(
f"Maximum supported dimensionality is {self.MAXDIM}."
)
self.bits = bits
self.dtype_i: type
if self.bits is None:
self.bits = 30
if self.bits <= 32:
self.dtype_i = np.uint32
elif 32 < self.bits <= 64:
self.dtype_i = np.uint64
else:
raise ValueError("Maximum supported 'bits' is 64")
self.maxn = 2**self.bits
# v is d x maxbit matrix
self._sv: np.ndarray = np.zeros((d, self.bits), dtype=self.dtype_i)
_initialize_v(self._sv, dim=d, bits=self.bits)
if not scramble:
self._shift: np.ndarray = np.zeros(d, dtype=self.dtype_i)
else:
# scramble self._shift and self._sv
self._scramble()
self._quasi = self._shift.copy()
# normalization constant with the largest possible number
# calculate in Python to not overflow int with 2**64
self._scale = 1.0 / 2 ** self.bits
self._first_point = (self._quasi * self._scale).reshape(1, -1)
# explicit casting to float64
self._first_point = self._first_point.astype(np.float64)
def _scramble(self) -> None:
"""Scramble the sequence using LMS+shift."""
# Generate shift vector
self._shift = np.dot(
rng_integers(self.rng, 2, size=(self.d, self.bits),
dtype=self.dtype_i),
2 ** np.arange(self.bits, dtype=self.dtype_i),
)
# Generate lower triangular matrices (stacked across dimensions)
ltm = np.tril(rng_integers(self.rng, 2,
size=(self.d, self.bits, self.bits),
dtype=self.dtype_i))
_cscramble(
dim=self.d, bits=self.bits, # type: ignore[arg-type]
ltm=ltm, sv=self._sv
)
def _random(
self, n: IntNumber = 1, *, workers: IntNumber = 1
) -> np.ndarray:
"""Draw next point(s) in the Sobol' sequence.
Parameters
----------
n : int, optional
Number of samples to generate in the parameter space. Default is 1.
Returns
-------
sample : array_like (n, d)
Sobol' sample.
"""
sample: np.ndarray = np.empty((n, self.d), dtype=np.float64)
if n == 0:
return sample
total_n = self.num_generated + n
if total_n > self.maxn:
msg = (
f"At most 2**{self.bits}={self.maxn} distinct points can be "
f"generated. {self.num_generated} points have been previously "
f"generated, then: n={self.num_generated}+{n}={total_n}. "
)
if self.bits != 64:
msg += "Consider increasing `bits`."
raise ValueError(msg)
if self.num_generated == 0:
# verify n is 2**n
if not (n & (n - 1) == 0):
warnings.warn("The balance properties of Sobol' points require"
" n to be a power of 2.", stacklevel=2)
if n == 1:
sample = self._first_point
else:
_draw(
n=n - 1, num_gen=self.num_generated, dim=self.d,
scale=self._scale, sv=self._sv, quasi=self._quasi,
sample=sample
)
sample = np.concatenate(
[self._first_point, sample]
)[:n] # type: ignore[misc]
else:
_draw(
n=n, num_gen=self.num_generated - 1, dim=self.d,
scale=self._scale, sv=self._sv, quasi=self._quasi,
sample=sample
)
return sample
def random_base2(self, m: IntNumber) -> np.ndarray:
"""Draw point(s) from the Sobol' sequence.
This function draws :math:`n=2^m` points in the parameter space
ensuring the balance properties of the sequence.
Parameters
----------
m : int
Logarithm in base 2 of the number of samples; i.e., n = 2^m.
Returns
-------
sample : array_like (n, d)
Sobol' sample.
"""
n = 2 ** m
total_n = self.num_generated + n
if not (total_n & (total_n - 1) == 0):
raise ValueError("The balance properties of Sobol' points require "
"n to be a power of 2. {0} points have been "
"previously generated, then: n={0}+2**{1}={2}. "
"If you still want to do this, the function "
"'Sobol.random()' can be used."
.format(self.num_generated, m, total_n))
return self.random(n)
def reset(self) -> Sobol:
"""Reset the engine to base state.
Returns
-------
engine : Sobol
Engine reset to its base state.
"""
super().reset()
self._quasi = self._shift.copy()
return self
def fast_forward(self, n: IntNumber) -> Sobol:
"""Fast-forward the sequence by `n` positions.
Parameters
----------
n : int
Number of points to skip in the sequence.
Returns
-------
engine : Sobol
The fast-forwarded engine.
"""
if self.num_generated == 0:
_fast_forward(
n=n - 1, num_gen=self.num_generated, dim=self.d,
sv=self._sv, quasi=self._quasi
)
else:
_fast_forward(
n=n, num_gen=self.num_generated - 1, dim=self.d,
sv=self._sv, quasi=self._quasi
)
self.num_generated += n
return self
class PoissonDisk(QMCEngine):
"""Poisson disk sampling.
Parameters
----------
d : int
Dimension of the parameter space.
radius : float
Minimal distance to keep between points when sampling new candidates.
hypersphere : {"volume", "surface"}, optional
Sampling strategy to generate potential candidates to be added in the
final sample. Default is "volume".
* ``volume``: original Bridson algorithm as described in [1]_.
New candidates are sampled *within* the hypersphere.
* ``surface``: only sample the surface of the hypersphere.
ncandidates : int
Number of candidates to sample per iteration. More candidates result
in a denser sampling as more candidates can be accepted per iteration.
optimization : {None, "random-cd", "lloyd"}, optional
Whether to use an optimization scheme to improve the quality after
sampling. Note that this is a post-processing step that does not
guarantee that all properties of the sample will be conserved.
Default is None.
* ``random-cd``: random permutations of coordinates to lower the
centered discrepancy. The best sample based on the centered
discrepancy is constantly updated. Centered discrepancy-based
sampling shows better space-filling robustness toward 2D and 3D
subprojections compared to using other discrepancy measures.
* ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
The process converges to equally spaced samples.
.. versionadded:: 1.10.0
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` instance, then the provided
instance is used.
Notes
-----
Poisson disk sampling is an iterative sampling strategy. Starting from
a seed sample, `ncandidates` are sampled in the hypersphere
surrounding the seed. Candidates bellow a certain `radius` or outside the
domain are rejected. New samples are added in a pool of sample seed. The
process stops when the pool is empty or when the number of required
samples is reached.
The maximum number of point that a sample can contain is directly linked
to the `radius`. As the dimension of the space increases, a higher radius
spreads the points further and help overcome the curse of dimensionality.
See the :ref:`quasi monte carlo tutorial <quasi-monte-carlo>` for more
details.
.. warning::
The algorithm is more suitable for low dimensions and sampling size
due to its iterative nature and memory requirements.
Selecting a small radius with a high dimension would
mean that the space could contain more samples than using lower
dimension or a bigger radius.
Some code taken from [2]_, written consent given on 31.03.2021
by the original author, Shamis, for free use in SciPy under
the 3-clause BSD.
References
----------
.. [1] Robert Bridson, "Fast Poisson Disk Sampling in Arbitrary
Dimensions." SIGGRAPH, 2007.
.. [2] `StackOverflow <https://stackoverflow.com/questions/66047540>`__.
Examples
--------
Generate a 2D sample using a `radius` of 0.2.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from matplotlib.collections import PatchCollection
>>> from scipy.stats import qmc
>>>
>>> rng = np.random.default_rng()
>>> radius = 0.2
>>> engine = qmc.PoissonDisk(d=2, radius=radius, seed=rng)
>>> sample = engine.random(20)
Visualizing the 2D sample and showing that no points are closer than
`radius`. ``radius/2`` is used to visualize non-intersecting circles.
If two samples are exactly at `radius` from each other, then their circle
of radius ``radius/2`` will touch.
>>> fig, ax = plt.subplots()
>>> _ = ax.scatter(sample[:, 0], sample[:, 1])
>>> circles = [plt.Circle((xi, yi), radius=radius/2, fill=False)
... for xi, yi in sample]
>>> collection = PatchCollection(circles, match_original=True)
>>> ax.add_collection(collection)
>>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$',
... xlim=[0, 1], ylim=[0, 1])
>>> plt.show()
Such visualization can be seen as circle packing: how many circle can
we put in the space. It is a np-hard problem. The method `fill_space`
can be used to add samples until no more samples can be added. This is
a hard problem and parameters may need to be adjusted manually. Beware of
the dimension: as the dimensionality increases, the number of samples
required to fill the space increases exponentially
(curse-of-dimensionality).
"""
def __init__(
self,
d: IntNumber,
*,
radius: DecimalNumber = 0.05,
hypersphere: Literal["volume", "surface"] = "volume",
ncandidates: IntNumber = 30,
optimization: Literal["random-cd", "lloyd"] | None = None,
seed: SeedType = None
) -> None:
# Used in `scipy.integrate.qmc_quad`
self._init_quad = {'d': d, 'radius': radius,
'hypersphere': hypersphere,
'ncandidates': ncandidates,
'optimization': optimization}
super().__init__(d=d, optimization=optimization, seed=seed)
hypersphere_sample = {
"volume": self._hypersphere_volume_sample,
"surface": self._hypersphere_surface_sample
}
try:
self.hypersphere_method = hypersphere_sample[hypersphere]
except KeyError as exc:
message = (
f"{hypersphere!r} is not a valid hypersphere sampling"
f" method. It must be one of {set(hypersphere_sample)!r}")
raise ValueError(message) from exc
# size of the sphere from which the samples are drawn relative to the
# size of a disk (radius)
# for the surface sampler, all new points are almost exactly 1 radius
# away from at least one existing sample +eps to avoid rejection
self.radius_factor = 2 if hypersphere == "volume" else 1.001
self.radius = radius
self.radius_squared = self.radius**2
# sample to generate per iteration in the hypersphere around center
self.ncandidates = ncandidates
with np.errstate(divide='ignore'):
self.cell_size = self.radius / np.sqrt(self.d)
self.grid_size = (
np.ceil(np.ones(self.d) / self.cell_size)
).astype(int)
self._initialize_grid_pool()
def _initialize_grid_pool(self):
"""Sampling pool and sample grid."""
self.sample_pool = []
# Positions of cells
# n-dim value for each grid cell
self.sample_grid = np.empty(
np.append(self.grid_size, self.d),
dtype=np.float32
)
# Initialise empty cells with NaNs
self.sample_grid.fill(np.nan)
def _random(
self, n: IntNumber = 1, *, workers: IntNumber = 1
) -> np.ndarray:
"""Draw `n` in the interval ``[0, 1]``.
Note that it can return fewer samples if the space is full.
See the note section of the class.
Parameters
----------
n : int, optional
Number of samples to generate in the parameter space. Default is 1.
Returns
-------
sample : array_like (n, d)
QMC sample.
"""
if n == 0 or self.d == 0:
return np.empty((n, self.d))
def in_limits(sample: np.ndarray) -> bool:
return (sample.max() <= 1.) and (sample.min() >= 0.)
def in_neighborhood(candidate: np.ndarray, n: int = 2) -> bool:
"""
Check if there are samples closer than ``radius_squared`` to the
`candidate` sample.
"""
indices = (candidate / self.cell_size).astype(int)
ind_min = np.maximum(indices - n, np.zeros(self.d, dtype=int))
ind_max = np.minimum(indices + n + 1, self.grid_size)
# Check if the center cell is empty
if not np.isnan(self.sample_grid[tuple(indices)][0]):
return True
a = [slice(ind_min[i], ind_max[i]) for i in range(self.d)]
# guards against: invalid value encountered in less as we are
# comparing with nan and returns False. Which is wanted.
with np.errstate(invalid='ignore'):
if np.any(
np.sum(
np.square(candidate - self.sample_grid[tuple(a)]),
axis=self.d
) < self.radius_squared
):
return True
return False
def add_sample(candidate: np.ndarray) -> None:
self.sample_pool.append(candidate)
indices = (candidate / self.cell_size).astype(int)
self.sample_grid[tuple(indices)] = candidate
curr_sample.append(candidate)
curr_sample: list[np.ndarray] = []
if len(self.sample_pool) == 0:
# the pool is being initialized with a single random sample
add_sample(self.rng.random(self.d))
num_drawn = 1
else:
num_drawn = 0
# exhaust sample pool to have up to n sample
while len(self.sample_pool) and num_drawn < n:
# select a sample from the available pool
idx_center = rng_integers(self.rng, len(self.sample_pool))
center = self.sample_pool[idx_center]
del self.sample_pool[idx_center]
# generate candidates around the center sample
candidates = self.hypersphere_method(
center, self.radius * self.radius_factor, self.ncandidates
)
# keep candidates that satisfy some conditions
for candidate in candidates:
if in_limits(candidate) and not in_neighborhood(candidate):
add_sample(candidate)
num_drawn += 1
if num_drawn >= n:
break
self.num_generated += num_drawn
return np.array(curr_sample)
def fill_space(self) -> np.ndarray:
"""Draw ``n`` samples in the interval ``[0, 1]``.
Unlike `random`, this method will try to add points until
the space is full. Depending on ``candidates`` (and to a lesser extent
other parameters), some empty areas can still be present in the sample.
.. warning::
This can be extremely slow in high dimensions or if the
``radius`` is very small-with respect to the dimensionality.
Returns
-------
sample : array_like (n, d)
QMC sample.
"""
return self.random(np.inf) # type: ignore[arg-type]
def reset(self) -> PoissonDisk:
"""Reset the engine to base state.
Returns
-------
engine : PoissonDisk
Engine reset to its base state.
"""
super().reset()
self._initialize_grid_pool()
return self
def _hypersphere_volume_sample(
self, center: np.ndarray, radius: DecimalNumber,
candidates: IntNumber = 1
) -> np.ndarray:
"""Uniform sampling within hypersphere."""
# should remove samples within r/2
x = self.rng.standard_normal(size=(candidates, self.d))
ssq = np.sum(x**2, axis=1)
fr = radius * gammainc(self.d/2, ssq/2)**(1/self.d) / np.sqrt(ssq)
fr_tiled = np.tile(
fr.reshape(-1, 1), (1, self.d) # type: ignore[arg-type]
)
p = center + np.multiply(x, fr_tiled)
return p
def _hypersphere_surface_sample(
self, center: np.ndarray, radius: DecimalNumber,
candidates: IntNumber = 1
) -> np.ndarray:
"""Uniform sampling on the hypersphere's surface."""
vec = self.rng.standard_normal(size=(candidates, self.d))
vec /= np.linalg.norm(vec, axis=1)[:, None]
p = center + np.multiply(vec, radius)
return p
class MultivariateNormalQMC:
r"""QMC sampling from a multivariate Normal :math:`N(\mu, \Sigma)`.
Parameters
----------
mean : array_like (d,)
The mean vector. Where ``d`` is the dimension.
cov : array_like (d, d), optional
The covariance matrix. If omitted, use `cov_root` instead.
If both `cov` and `cov_root` are omitted, use the identity matrix.
cov_root : array_like (d, d'), optional
A root decomposition of the covariance matrix, where ``d'`` may be less
than ``d`` if the covariance is not full rank. If omitted, use `cov`.
inv_transform : bool, optional
If True, use inverse transform instead of Box-Muller. Default is True.
engine : QMCEngine, optional
Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
seed : {None, int, `numpy.random.Generator`}, optional
Used only if `engine` is None.
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` instance, then the provided
instance is used.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import qmc
>>> dist = qmc.MultivariateNormalQMC(mean=[0, 5], cov=[[1, 0], [0, 1]])
>>> sample = dist.random(512)
>>> _ = plt.scatter(sample[:, 0], sample[:, 1])
>>> plt.show()
"""
def __init__(
self, mean: npt.ArrayLike, cov: npt.ArrayLike | None = None, *,
cov_root: npt.ArrayLike | None = None,
inv_transform: bool = True,
engine: QMCEngine | None = None,
seed: SeedType = None
) -> None:
mean = np.array(mean, copy=False, ndmin=1)
d = mean.shape[0]
if cov is not None:
# covariance matrix provided
cov = np.array(cov, copy=False, ndmin=2)
# check for square/symmetric cov matrix and mean vector has the
# same d
if not mean.shape[0] == cov.shape[0]:
raise ValueError("Dimension mismatch between mean and "
"covariance.")
if not np.allclose(cov, cov.transpose()):
raise ValueError("Covariance matrix is not symmetric.")
# compute Cholesky decomp; if it fails, do the eigen decomposition
try:
cov_root = np.linalg.cholesky(cov).transpose()
except np.linalg.LinAlgError:
eigval, eigvec = np.linalg.eigh(cov)
if not np.all(eigval >= -1.0e-8):
raise ValueError("Covariance matrix not PSD.")
eigval = np.clip(eigval, 0.0, None)
cov_root = (eigvec * np.sqrt(eigval)).transpose()
elif cov_root is not None:
# root decomposition provided
cov_root = np.atleast_2d(cov_root)
if not mean.shape[0] == cov_root.shape[0]:
raise ValueError("Dimension mismatch between mean and "
"covariance.")
else:
# corresponds to identity covariance matrix
cov_root = None
self._inv_transform = inv_transform
if not inv_transform:
# to apply Box-Muller, we need an even number of dimensions
engine_dim = 2 * math.ceil(d / 2)
else:
engine_dim = d
if engine is None:
self.engine = Sobol(
d=engine_dim, scramble=True, bits=30, seed=seed
) # type: QMCEngine
elif isinstance(engine, QMCEngine):
if engine.d != engine_dim:
raise ValueError("Dimension of `engine` must be consistent"
" with dimensions of mean and covariance."
" If `inv_transform` is False, it must be"
" an even number.")
self.engine = engine
else:
raise ValueError("`engine` must be an instance of "
"`scipy.stats.qmc.QMCEngine` or `None`.")
self._mean = mean
self._corr_matrix = cov_root
self._d = d
def random(self, n: IntNumber = 1) -> np.ndarray:
"""Draw `n` QMC samples from the multivariate Normal.
Parameters
----------
n : int, optional
Number of samples to generate in the parameter space. Default is 1.
Returns
-------
sample : array_like (n, d)
Sample.
"""
base_samples = self._standard_normal_samples(n)
return self._correlate(base_samples)
def _correlate(self, base_samples: np.ndarray) -> np.ndarray:
if self._corr_matrix is not None:
return base_samples @ self._corr_matrix + self._mean
else:
# avoid multiplying with identity here
return base_samples + self._mean
def _standard_normal_samples(self, n: IntNumber = 1) -> np.ndarray:
"""Draw `n` QMC samples from the standard Normal :math:`N(0, I_d)`.
Parameters
----------
n : int, optional
Number of samples to generate in the parameter space. Default is 1.
Returns
-------
sample : array_like (n, d)
Sample.
"""
# get base samples
samples = self.engine.random(n)
if self._inv_transform:
# apply inverse transform
# (values to close to 0/1 result in inf values)
return stats.norm.ppf(0.5 + (1 - 1e-10) * (samples - 0.5)) # type: ignore[attr-defined]
else:
# apply Box-Muller transform (note: indexes starting from 1)
even = np.arange(0, samples.shape[-1], 2)
Rs = np.sqrt(-2 * np.log(samples[:, even]))
thetas = 2 * math.pi * samples[:, 1 + even]
cos = np.cos(thetas)
sin = np.sin(thetas)
transf_samples = np.stack([Rs * cos, Rs * sin],
-1).reshape(n, -1)
# make sure we only return the number of dimension requested
return transf_samples[:, : self._d]
class MultinomialQMC:
r"""QMC sampling from a multinomial distribution.
Parameters
----------
pvals : array_like (k,)
Vector of probabilities of size ``k``, where ``k`` is the number
of categories. Elements must be non-negative and sum to 1.
n_trials : int
Number of trials.
engine : QMCEngine, optional
Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
seed : {None, int, `numpy.random.Generator`}, optional
Used only if `engine` is None.
If `seed` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(seed)``.
If `seed` is already a ``Generator`` instance, then the provided
instance is used.
Examples
--------
Let's define 3 categories and for a given sample, the sum of the trials
of each category is 8. The number of trials per category is determined
by the `pvals` associated to each category.
Then, we sample this distribution 64 times.
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import qmc
>>> dist = qmc.MultinomialQMC(
... pvals=[0.2, 0.4, 0.4], n_trials=10, engine=qmc.Halton(d=1)
... )
>>> sample = dist.random(64)
We can plot the sample and verify that the median of number of trials
for each category is following the `pvals`. That would be
``pvals * n_trials = [2, 4, 4]``.
>>> fig, ax = plt.subplots()
>>> ax.yaxis.get_major_locator().set_params(integer=True)
>>> _ = ax.boxplot(sample)
>>> ax.set(xlabel="Categories", ylabel="Trials")
>>> plt.show()
"""
def __init__(
self, pvals: npt.ArrayLike, n_trials: IntNumber,
*, engine: QMCEngine | None = None,
seed: SeedType = None
) -> None:
self.pvals = np.array(pvals, copy=False, ndmin=1)
if np.min(pvals) < 0:
raise ValueError('Elements of pvals must be non-negative.')
if not np.isclose(np.sum(pvals), 1):
raise ValueError('Elements of pvals must sum to 1.')
self.n_trials = n_trials
if engine is None:
self.engine = Sobol(
d=1, scramble=True, bits=30, seed=seed
) # type: QMCEngine
elif isinstance(engine, QMCEngine):
if engine.d != 1:
raise ValueError("Dimension of `engine` must be 1.")
self.engine = engine
else:
raise ValueError("`engine` must be an instance of "
"`scipy.stats.qmc.QMCEngine` or `None`.")
def random(self, n: IntNumber = 1) -> np.ndarray:
"""Draw `n` QMC samples from the multinomial distribution.
Parameters
----------
n : int, optional
Number of samples to generate in the parameter space. Default is 1.
Returns
-------
samples : array_like (n, pvals)
Sample.
"""
sample = np.empty((n, len(self.pvals)))
for i in range(n):
base_draws = self.engine.random(self.n_trials).ravel()
p_cumulative = np.empty_like(self.pvals, dtype=float)
_fill_p_cumulative(np.array(self.pvals, dtype=float), p_cumulative)
sample_ = np.zeros_like(self.pvals, dtype=int)
_categorize(base_draws, p_cumulative, sample_)
sample[i] = sample_
return sample
def _select_optimizer(
optimization: Literal["random-cd", "lloyd"] | None, config: dict
) -> Callable | None:
"""A factory for optimization methods."""
optimization_method: dict[str, Callable] = {
"random-cd": _random_cd,
"lloyd": _lloyd_centroidal_voronoi_tessellation
}
optimizer: partial | None
if optimization is not None:
try:
optimization = optimization.lower() # type: ignore[assignment]
optimizer_ = optimization_method[optimization]
except KeyError as exc:
message = (f"{optimization!r} is not a valid optimization"
f" method. It must be one of"
f" {set(optimization_method)!r}")
raise ValueError(message) from exc
# config
optimizer = partial(optimizer_, **config)
else:
optimizer = None
return optimizer
def _random_cd(
best_sample: np.ndarray, n_iters: int, n_nochange: int, rng: GeneratorType,
**kwargs: dict
) -> np.ndarray:
"""Optimal LHS on CD.
Create a base LHS and do random permutations of coordinates to
lower the centered discrepancy.
Because it starts with a normal LHS, it also works with the
`scramble` keyword argument.
Two stopping criterion are used to stop the algorithm: at most,
`n_iters` iterations are performed; or if there is no improvement
for `n_nochange` consecutive iterations.
"""
del kwargs # only use keywords which are defined, needed by factory
n, d = best_sample.shape
if d == 0 or n == 0:
return np.empty((n, d))
if d == 1 or n == 1:
# discrepancy measures are invariant under permuting factors and runs
return best_sample
best_disc = discrepancy(best_sample)
bounds = ([0, d - 1],
[0, n - 1],
[0, n - 1])
n_nochange_ = 0
n_iters_ = 0
while n_nochange_ < n_nochange and n_iters_ < n_iters:
n_iters_ += 1
col = rng_integers(rng, *bounds[0], endpoint=True) # type: ignore[misc]
row_1 = rng_integers(rng, *bounds[1], endpoint=True) # type: ignore[misc]
row_2 = rng_integers(rng, *bounds[2], endpoint=True) # type: ignore[misc]
disc = _perturb_discrepancy(best_sample,
row_1, row_2, col,
best_disc)
if disc < best_disc:
best_sample[row_1, col], best_sample[row_2, col] = (
best_sample[row_2, col], best_sample[row_1, col])
best_disc = disc
n_nochange_ = 0
else:
n_nochange_ += 1
return best_sample
def _l1_norm(sample: np.ndarray) -> float:
return distance.pdist(sample, 'cityblock').min()
def _lloyd_iteration(
sample: np.ndarray,
decay: float,
qhull_options: str
) -> np.ndarray:
"""Lloyd-Max algorithm iteration.
Based on the implementation of Stéfan van der Walt:
https://github.com/stefanv/lloyd
which is:
Copyright (c) 2021-04-21 Stéfan van der Walt
https://github.com/stefanv/lloyd
MIT License
Parameters
----------
sample : array_like (n, d)
The sample to iterate on.
decay : float
Relaxation decay. A positive value would move the samples toward
their centroid, and negative value would move them away.
1 would move the samples to their centroid.
qhull_options : str
Additional options to pass to Qhull. See Qhull manual
for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
"Qbb Qc Qz Qj" otherwise.)
Returns
-------
sample : array_like (n, d)
The sample after an iteration of Lloyd's algorithm.
"""
new_sample = np.empty_like(sample)
voronoi = Voronoi(sample, qhull_options=qhull_options)
for ii, idx in enumerate(voronoi.point_region):
# the region is a series of indices into self.voronoi.vertices
# remove samples at infinity, designated by index -1
region = [i for i in voronoi.regions[idx] if i != -1]
# get the vertices for this region
verts = voronoi.vertices[region]
# clipping would be wrong, we need to intersect
# verts = np.clip(verts, 0, 1)
# move samples towards centroids:
# Centroid in n-D is the mean for uniformly distributed nodes
# of a geometry.
centroid = np.mean(verts, axis=0)
new_sample[ii] = sample[ii] + (centroid - sample[ii]) * decay
# only update sample to centroid within the region
is_valid = np.all(np.logical_and(new_sample >= 0, new_sample <= 1), axis=1)
sample[is_valid] = new_sample[is_valid]
return sample
def _lloyd_centroidal_voronoi_tessellation(
sample: npt.ArrayLike,
*,
tol: DecimalNumber = 1e-5,
maxiter: IntNumber = 10,
qhull_options: str | None = None,
**kwargs: dict
) -> np.ndarray:
"""Approximate Centroidal Voronoi Tessellation.
Perturb samples in N-dimensions using Lloyd-Max algorithm.
Parameters
----------
sample : array_like (n, d)
The sample to iterate on. With ``n`` the number of samples and ``d``
the dimension. Samples must be in :math:`[0, 1]^d`, with ``d>=2``.
tol : float, optional
Tolerance for termination. If the min of the L1-norm over the samples
changes less than `tol`, it stops the algorithm. Default is 1e-5.
maxiter : int, optional
Maximum number of iterations. It will stop the algorithm even if
`tol` is above the threshold.
Too many iterations tend to cluster the samples as a hypersphere.
Default is 10.
qhull_options : str, optional
Additional options to pass to Qhull. See Qhull manual
for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
"Qbb Qc Qz Qj" otherwise.)
Returns
-------
sample : array_like (n, d)
The sample after being processed by Lloyd-Max algorithm.
Notes
-----
Lloyd-Max algorithm is an iterative process with the purpose of improving
the dispersion of samples. For given sample: (i) compute a Voronoi
Tessellation; (ii) find the centroid of each Voronoi cell; (iii) move the
samples toward the centroid of their respective cell. See [1]_, [2]_.
A relaxation factor is used to control how fast samples can move at each
iteration. This factor is starting at 2 and ending at 1 after `maxiter`
following an exponential decay.
The process converges to equally spaced samples. It implies that measures
like the discrepancy could suffer from too many iterations. On the other
hand, L1 and L2 distances should improve. This is especially true with
QMC methods which tend to favor the discrepancy over other criteria.
.. note::
The current implementation does not intersect the Voronoi Tessellation
with the boundaries. This implies that for a low number of samples,
empirically below 20, no Voronoi cell is touching the boundaries.
Hence, samples cannot be moved close to the boundaries.
Further improvements could consider the samples at infinity so that
all boundaries are segments of some Voronoi cells. This would fix
the computation of the centroid position.
.. warning::
The Voronoi Tessellation step is expensive and quickly becomes
intractable with dimensions as low as 10 even for a sample
of size as low as 1000.
.. versionadded:: 1.9.0
References
----------
.. [1] Lloyd. "Least Squares Quantization in PCM".
IEEE Transactions on Information Theory, 1982.
.. [2] Max J. "Quantizing for minimum distortion".
IEEE Transactions on Information Theory, 1960.
Examples
--------
>>> import numpy as np
>>> from scipy.spatial import distance
>>> rng = np.random.default_rng()
>>> sample = rng.random((128, 2))
.. note::
The samples need to be in :math:`[0, 1]^d`. `scipy.stats.qmc.scale`
can be used to scale the samples from their
original bounds to :math:`[0, 1]^d`. And back to their original bounds.
Compute the quality of the sample using the L1 criterion.
>>> def l1_norm(sample):
... return distance.pdist(sample, 'cityblock').min()
>>> l1_norm(sample)
0.00161... # random
Now process the sample using Lloyd's algorithm and check the improvement
on the L1. The value should increase.
>>> sample = _lloyd_centroidal_voronoi_tessellation(sample)
>>> l1_norm(sample)
0.0278... # random
"""
del kwargs # only use keywords which are defined, needed by factory
sample = np.asarray(sample).copy()
if not sample.ndim == 2:
raise ValueError('`sample` is not a 2D array')
if not sample.shape[1] >= 2:
raise ValueError('`sample` dimension is not >= 2')
# Checking that sample is within the hypercube
if (sample.max() > 1.) or (sample.min() < 0.):
raise ValueError('`sample` is not in unit hypercube')
if qhull_options is None:
qhull_options = 'Qbb Qc Qz QJ'
if sample.shape[1] >= 5:
qhull_options += ' Qx'
# Fit an exponential to be 2 at 0 and 1 at `maxiter`.
# The decay is used for relaxation.
# analytical solution for y=exp(-maxiter/x) - 0.1
root = -maxiter / np.log(0.1)
decay = [np.exp(-x / root)+0.9 for x in range(maxiter)]
l1_old = _l1_norm(sample=sample)
for i in range(maxiter):
sample = _lloyd_iteration(
sample=sample, decay=decay[i],
qhull_options=qhull_options,
)
l1_new = _l1_norm(sample=sample)
if abs(l1_new - l1_old) < tol:
break
else:
l1_old = l1_new
return sample
def _validate_workers(workers: IntNumber = 1) -> IntNumber:
"""Validate `workers` based on platform and value.
Parameters
----------
workers : int, optional
Number of workers to use for parallel processing. If -1 is
given all CPU threads are used. Default is 1.
Returns
-------
Workers : int
Number of CPU used by the algorithm
"""
workers = int(workers)
if workers == -1:
workers = os.cpu_count() # type: ignore[assignment]
if workers is None:
raise NotImplementedError(
"Cannot determine the number of cpus using os.cpu_count(), "
"cannot use -1 for the number of workers"
)
elif workers <= 0:
raise ValueError(f"Invalid number of workers: {workers}, must be -1 "
"or > 0")
return workers
def _validate_bounds(
l_bounds: npt.ArrayLike, u_bounds: npt.ArrayLike, d: int
) -> tuple[np.ndarray, ...]:
"""Bounds input validation.
Parameters
----------
l_bounds, u_bounds : array_like (d,)
Lower and upper bounds.
d : int
Dimension to use for broadcasting.
Returns
-------
l_bounds, u_bounds : array_like (d,)
Lower and upper bounds.
"""
try:
lower = np.broadcast_to(l_bounds, d)
upper = np.broadcast_to(u_bounds, d)
except ValueError as exc:
msg = ("'l_bounds' and 'u_bounds' must be broadcastable and respect"
" the sample dimension")
raise ValueError(msg) from exc
if not np.all(lower < upper):
raise ValueError("Bounds are not consistent 'l_bounds' < 'u_bounds'")
return lower, upper
| 93,618
| 34.569529
| 100
|
py
|
scipy
|
scipy-main/scipy/stats/_binomtest.py
|
from math import sqrt
import numpy as np
from scipy._lib._util import _validate_int
from scipy.optimize import brentq
from scipy.special import ndtri
from ._discrete_distns import binom
from ._common import ConfidenceInterval
class BinomTestResult:
"""
Result of `scipy.stats.binomtest`.
Attributes
----------
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
statistic: float
The estimate of the proportion of successes.
pvalue : float
The p-value of the hypothesis test.
"""
def __init__(self, k, n, alternative, statistic, pvalue):
self.k = k
self.n = n
self.alternative = alternative
self.statistic = statistic
self.pvalue = pvalue
# add alias for backward compatibility
self.proportion_estimate = statistic
def __repr__(self):
s = ("BinomTestResult("
f"k={self.k}, "
f"n={self.n}, "
f"alternative={self.alternative!r}, "
f"statistic={self.statistic}, "
f"pvalue={self.pvalue})")
return s
def proportion_ci(self, confidence_level=0.95, method='exact'):
"""
Compute the confidence interval for ``statistic``.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval
of the estimated proportion. Default is 0.95.
method : {'exact', 'wilson', 'wilsoncc'}, optional
Selects the method used to compute the confidence interval
for the estimate of the proportion:
'exact' :
Use the Clopper-Pearson exact method [1]_.
'wilson' :
Wilson's method, without continuity correction ([2]_, [3]_).
'wilsoncc' :
Wilson's method, with continuity correction ([2]_, [3]_).
Default is ``'exact'``.
Returns
-------
ci : ``ConfidenceInterval`` object
The object has attributes ``low`` and ``high`` that hold the
lower and upper bounds of the confidence interval.
References
----------
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
fiducial limits illustrated in the case of the binomial,
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
.. [2] E. B. Wilson, Probable inference, the law of succession, and
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
(1927).
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
single proportion: comparison of seven methods, Statistics
in Medicine, 17, pp 857-872 (1998).
Examples
--------
>>> from scipy.stats import binomtest
>>> result = binomtest(k=7, n=50, p=0.1)
>>> result.statistic
0.14
>>> result.proportion_ci()
ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846)
"""
if method not in ('exact', 'wilson', 'wilsoncc'):
raise ValueError("method must be one of 'exact', 'wilson' or "
"'wilsoncc'.")
if not (0 <= confidence_level <= 1):
raise ValueError('confidence_level must be in the interval '
'[0, 1].')
if method == 'exact':
low, high = _binom_exact_conf_int(self.k, self.n,
confidence_level,
self.alternative)
else:
# method is 'wilson' or 'wilsoncc'
low, high = _binom_wilson_conf_int(self.k, self.n,
confidence_level,
self.alternative,
correction=method == 'wilsoncc')
return ConfidenceInterval(low=low, high=high)
def _findp(func):
try:
p = brentq(func, 0, 1)
except RuntimeError:
raise RuntimeError('numerical solver failed to converge when '
'computing the confidence limits') from None
except ValueError as exc:
raise ValueError('brentq raised a ValueError; report this to the '
'SciPy developers') from exc
return p
def _binom_exact_conf_int(k, n, confidence_level, alternative):
"""
Compute the estimate and confidence interval for the binomial test.
Returns proportion, prop_low, prop_high
"""
if alternative == 'two-sided':
alpha = (1 - confidence_level) / 2
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'less':
alpha = 1 - confidence_level
plow = 0.0
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'greater':
alpha = 1 - confidence_level
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
phigh = 1.0
return plow, phigh
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
# This function assumes that the arguments have already been validated.
# In particular, `alternative` must be one of 'two-sided', 'less' or
# 'greater'.
p = k / n
if alternative == 'two-sided':
z = ndtri(0.5 + 0.5*confidence_level)
else:
z = ndtri(confidence_level)
# For reference, the formulas implemented here are from
# Newcombe (1998) (ref. [3] in the proportion_ci docstring).
denom = 2*(n + z**2)
center = (2*n*p + z**2)/denom
q = 1 - p
if correction:
if alternative == 'less' or k == 0:
lo = 0.0
else:
dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom
lo = center - dlo
if alternative == 'greater' or k == n:
hi = 1.0
else:
dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom
hi = center + dhi
else:
delta = z/denom * sqrt(4*n*p*q + z**2)
if alternative == 'less' or k == 0:
lo = 0.0
else:
lo = center - delta
if alternative == 'greater' or k == n:
hi = 1.0
else:
hi = center + delta
return lo, hi
def binomtest(k, n, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
The binomial test [1]_ is a test of the null hypothesis that the
probability of success in a Bernoulli experiment is `p`.
Details of the test can be found in many texts on statistics, such
as section 24.5 of [2]_.
Parameters
----------
k : int
The number of successes.
n : int
The number of trials.
p : float, optional
The hypothesized probability of success, i.e. the expected
proportion of successes. The value must be in the interval
``0 <= p <= 1``. The default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
result : `~scipy.stats._result_classes.BinomTestResult` instance
The return value is an object with the following attributes:
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
statistic : float
The estimate of the proportion of successes.
pvalue : float
The p-value of the hypothesis test.
The object has the following methods:
proportion_ci(confidence_level=0.95, method='exact') :
Compute the confidence interval for ``statistic``.
Notes
-----
.. versionadded:: 1.7.0
References
----------
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
Examples
--------
>>> from scipy.stats import binomtest
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
>>> result.pvalue
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
The test statistic is equal to the estimated proportion, which is simply
``3/15``:
>>> result.statistic
0.2
We can use the `proportion_ci()` method of the result to compute the
confidence interval of the estimate:
>>> result.proportion_ci(confidence_level=0.95)
ConfidenceInterval(low=0.05684686759024681, high=1.0)
"""
k = _validate_int(k, 'k', minimum=0)
n = _validate_int(n, 'n', minimum=1)
if k > n:
raise ValueError('k must not be greater than n.')
if not (0 <= p <= 1):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized; \n"
"must be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = binom.cdf(k, n, p)
elif alternative == 'greater':
pval = binom.sf(k-1, n, p)
else:
# alternative is 'two-sided'
d = binom.pmf(k, n, p)
rerr = 1 + 1e-7
if k == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif k < p * n:
ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p),
-d*rerr, np.ceil(p * n), n)
# y is the number of terms between mode and n that are <= d*rerr.
# ix gave us the first term where a(ix) <= d*rerr < a(ix-1)
# if the first equality doesn't hold, y=n-ix. Otherwise, we
# need to include ix as well as the equality holds. Note that
# the equality will hold in very very rare situations due to rerr.
y = n - ix + int(d*rerr == binom.pmf(ix, n, p))
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
else:
ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p),
d*rerr, 0, np.floor(p * n))
# y is the number of terms between 0 and mode that are <= d*rerr.
# we need to add a 1 to account for the 0 index.
# For comparing this with old behavior, see
# tst_binary_srch_for_binom_tst method in test_morestats.
y = ix + 1
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
pval = min(1.0, pval)
result = BinomTestResult(k=k, n=n, alternative=alternative,
statistic=k/n, pvalue=pval)
return result
def _binary_search_for_binom_tst(a, d, lo, hi):
"""
Conducts an implicit binary search on a function specified by `a`.
Meant to be used on the binomial PMF for the case of two-sided tests
to obtain the value on the other side of the mode where the tail
probability should be computed. The values on either side of
the mode are always in order, meaning binary search is applicable.
Parameters
----------
a : callable
The function over which to perform binary search. Its values
for inputs lo and hi should be in ascending order.
d : float
The value to search.
lo : int
The lower end of range to search.
hi : int
The higher end of the range to search.
Returns
-------
int
The index, i between lo and hi
such that a(i)<=d<a(i+1)
"""
while lo < hi:
mid = lo + (hi-lo)//2
midval = a(mid)
if midval < d:
lo = mid+1
elif midval > d:
hi = mid-1
else:
return mid
if a(lo) <= d:
return lo
else:
return lo-1
| 13,043
| 33.691489
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_stats_mstats_common.py
|
import warnings
import numpy as np
import scipy.stats._stats_py
from . import distributions
from .._lib._bunch import _make_tuple_bunch
from ._stats_pythran import siegelslopes as siegelslopes_pythran
__all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes']
# This is not a namedtuple for backwards compatibility. See PR #12983
LinregressResult = _make_tuple_bunch('LinregressResult',
['slope', 'intercept', 'rvalue',
'pvalue', 'stderr'],
extra_field_names=['intercept_stderr'])
TheilslopesResult = _make_tuple_bunch('TheilslopesResult',
['slope', 'intercept',
'low_slope', 'high_slope'])
SiegelslopesResult = _make_tuple_bunch('SiegelslopesResult',
['slope', 'intercept'])
def linregress(x, y=None, alternative='two-sided'):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length. If
only `x` is given (and ``y=None``), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension. In
the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is
equivalent to ``linregress(x[0], x[1])``.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the slope of the regression line is nonzero
* 'less': the slope of the regression line is less than zero
* 'greater': the slope of the regression line is greater than zero
.. versionadded:: 1.7.0
Returns
-------
result : ``LinregressResult`` instance
The return value is an object with the following attributes:
slope : float
Slope of the regression line.
intercept : float
Intercept of the regression line.
rvalue : float
The Pearson correlation coefficient. The square of ``rvalue``
is equal to the coefficient of determination.
pvalue : float
The p-value for a hypothesis test whose null hypothesis is
that the slope is zero, using Wald Test with t-distribution of
the test statistic. See `alternative` above for alternative
hypotheses.
stderr : float
Standard error of the estimated slope (gradient), under the
assumption of residual normality.
intercept_stderr : float
Standard error of the estimated intercept, under the assumption
of residual normality.
See Also
--------
scipy.optimize.curve_fit :
Use non-linear least squares to fit a function to data.
scipy.optimize.leastsq :
Minimize the sum of squares of a set of equations.
Notes
-----
Missing values are considered pair-wise: if a value is missing in `x`,
the corresponding value in `y` is masked.
For compatibility with older versions of SciPy, the return value acts
like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``,
``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write::
slope, intercept, r, p, se = linregress(x, y)
With that style, however, the standard error of the intercept is not
available. To have access to all the computed values, including the
standard error of the intercept, use the return value as an object
with attributes, e.g.::
result = linregress(x, y)
print(result.intercept, result.intercept_stderr)
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> rng = np.random.default_rng()
Generate some data:
>>> x = rng.random(10)
>>> y = 1.6*x + rng.random(10)
Perform the linear regression:
>>> res = stats.linregress(x, y)
Coefficient of determination (R-squared):
>>> print(f"R-squared: {res.rvalue**2:.6f}")
R-squared: 0.717533
Plot the data along with the fitted line:
>>> plt.plot(x, y, 'o', label='original data')
>>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line')
>>> plt.legend()
>>> plt.show()
Calculate 95% confidence interval on slope and intercept:
>>> # Two-sided inverse Students t-distribution
>>> # p - probability, df - degrees of freedom
>>> from scipy.stats import t
>>> tinv = lambda p, df: abs(t.ppf(p/2, df))
>>> ts = tinv(0.05, len(x)-2)
>>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}")
slope (95%): 1.453392 +/- 0.743465
>>> print(f"intercept (95%): {res.intercept:.6f}"
... f" +/- {ts*res.intercept_stderr:.6f}")
intercept (95%): 0.616950 +/- 0.544475
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
raise ValueError("If only `x` is given as input, it has to "
"be of shape (2, N) or (N, 2); provided shape "
f"was {x.shape}.")
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
if np.amax(x) == np.amin(x) and len(x) > 1:
raise ValueError("Cannot calculate a linear regression "
"if all x values are identical")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# Average sums of square differences from the mean
# ssxm = mean( (x-mean(x))^2 )
# ssxym = mean( (x-mean(x)) * (y-mean(y)) )
ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat
# R-value
# r = ssxym / sqrt( ssxm * ssym )
if ssxm == 0.0 or ssym == 0.0:
# If the denominator was going to be 0
r = 0.0
else:
r = ssxym / np.sqrt(ssxm * ssym)
# Test for numerical error propagation (make sure -1 < r < 1)
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
slope = ssxym / ssxm
intercept = ymean - slope*xmean
if n == 2:
# handle case when only two points are passed in
if y[0] == y[1]:
prob = 1.0
else:
prob = 0.0
slope_stderr = 0.0
intercept_stderr = 0.0
else:
df = n - 2 # Number of degrees of freedom
# n-2 degrees of freedom because 2 has been used up
# to estimate the mean and standard deviation
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
t, prob = scipy.stats._stats_py._ttest_finish(df, t, alternative)
slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df)
# Also calculate the standard error of the intercept
# The following relationship is used:
# ssxm = mean( (x-mean(x))^2 )
# = ssx - sx*sx
# = mean( x^2 ) - mean(x)^2
intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2)
return LinregressResult(slope=slope, intercept=intercept, rvalue=r,
pvalue=prob, stderr=slope_stderr,
intercept_stderr=intercept_stderr)
def theilslopes(y, x=None, alpha=0.95, method='separate'):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
method : {'joint', 'separate'}, optional
Method to be used for computing estimate for intercept.
Following methods are supported,
* 'joint': Uses np.median(y - slope * x) as intercept.
* 'separate': Uses np.median(y) - slope * np.median(x)
as intercept.
The default is 'separate'.
.. versionadded:: 1.8.0
Returns
-------
result : ``TheilslopesResult`` instance
The return value is an object with the following attributes:
slope : float
Theil slope.
intercept : float
Intercept of the Theil line.
low_slope : float
Lower bound of the confidence interval on `slope`.
high_slope : float
Upper bound of the confidence interval on `slope`.
See Also
--------
siegelslopes : a similar technique using repeated medians
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
slope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature such as ``median(y - slope*x)``
in [4]_. The approach to compute the intercept can be determined by the
parameter ``method``. A confidence interval for the intercept is not
given as this question is not addressed in [1]_.
For compatibility with older versions of SciPy, the return value acts
like a ``namedtuple`` of length 4, with fields ``slope``, ``intercept``,
``low_slope``, and ``high_slope``, so one can continue to write::
slope, intercept, low_slope, high_slope = theilslopes(y, x)
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on
Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
.. [4] https://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90, method='separate')
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
if method not in ['joint', 'separate']:
raise ValueError("method must be either 'joint' or 'separate'."
"'{}' is invalid.".format(method))
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" %
(len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
if not slopes.size:
msg = "All `x` coordinates are identical."
warnings.warn(msg, RuntimeWarning, stacklevel=2)
slopes.sort()
medslope = np.median(slopes)
if method == 'joint':
medinter = np.median(y - medslope * x)
else:
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
sum(k * (k-1) * (2*k + 5) for k in nxreps) -
sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
try:
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
except (ValueError, IndexError):
delta = (np.nan, np.nan)
return TheilslopesResult(slope=medslope, intercept=medinter,
low_slope=delta[0], high_slope=delta[1])
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
def siegelslopes(y, x=None, method="hierarchical"):
r"""
Computes the Siegel estimator for a set of points (x, y).
`siegelslopes` implements a method for robust linear regression
using repeated medians (see [1]_) to fit a line to the points (x, y).
The method is robust to outliers with an asymptotic breakdown point
of 50%.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
method : {'hierarchical', 'separate'}
If 'hierarchical', estimate the intercept using the estimated
slope ``slope`` (default option).
If 'separate', estimate the intercept independent of the estimated
slope. See Notes for details.
Returns
-------
result : ``SiegelslopesResult`` instance
The return value is an object with the following attributes:
slope : float
Estimate of the slope of the regression line.
intercept : float
Estimate of the intercept of the regression line.
See Also
--------
theilslopes : a similar technique without repeated medians
Notes
-----
With ``n = len(y)``, compute ``m_j`` as the median of
the slopes from the point ``(x[j], y[j])`` to all other `n-1` points.
``slope`` is then the median of all slopes ``m_j``.
Two ways are given to estimate the intercept in [1]_ which can be chosen
via the parameter ``method``.
The hierarchical approach uses the estimated slope ``slope``
and computes ``intercept`` as the median of ``y - slope*x``.
The other approach estimates the intercept separately as follows: for
each point ``(x[j], y[j])``, compute the intercepts of all the `n-1`
lines through the remaining points and take the median ``i_j``.
``intercept`` is the median of the ``i_j``.
The implementation computes `n` times the median of a vector of size `n`
which can be slow for large vectors. There are more efficient algorithms
(see [2]_) which are not implemented here.
For compatibility with older versions of SciPy, the return value acts
like a ``namedtuple`` of length 2, with fields ``slope`` and
``intercept``, so one can continue to write::
slope, intercept = siegelslopes(y, x)
References
----------
.. [1] A. Siegel, "Robust Regression Using Repeated Medians",
Biometrika, Vol. 69, pp. 242-244, 1982.
.. [2] A. Stein and M. Werman, "Finding the repeated median regression
line", Proceedings of the Third Annual ACM-SIAM Symposium on
Discrete Algorithms, pp. 409-413, 1992.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope and intercept. For comparison, also compute the
least-squares fit with `linregress`:
>>> res = stats.siegelslopes(y, x)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Siegel regression line is shown in red. The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
if method not in ['hierarchical', 'separate']:
raise ValueError("method can only be 'hierarchical' or 'separate'")
y = np.asarray(y).ravel()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.asarray(x, dtype=float).ravel()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" %
(len(y), len(x)))
dtype = np.result_type(x, y, np.float32) # use at least float32
y, x = y.astype(dtype), x.astype(dtype)
medslope, medinter = siegelslopes_pythran(y, x, method)
return SiegelslopesResult(slope=medslope, intercept=medinter)
| 18,649
| 36.151394
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_qmvnt.py
|
# Integration of multivariate normal and t distributions.
# Adapted from the MATLAB original implementations by Dr. Alan Genz.
# http://www.math.wsu.edu/faculty/genz/software/software.html
# Copyright (C) 2013, Alan Genz, All rights reserved.
# Python implementation is copyright (C) 2022, Robert Kern, All rights
# reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. The contributor name(s) may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from scipy.fft import fft, ifft
from scipy.special import gammaincinv, ndtr, ndtri
from scipy.stats._qmc import primes_from_2_to
phi = ndtr
phinv = ndtri
def _factorize_int(n):
"""Return a sorted list of the unique prime factors of a positive integer.
"""
# NOTE: There are lots faster ways to do this, but this isn't terrible.
factors = set()
for p in primes_from_2_to(int(np.sqrt(n)) + 1):
while not (n % p):
factors.add(p)
n //= p
if n == 1:
break
if n != 1:
factors.add(n)
return sorted(factors)
def _primitive_root(p):
"""Compute a primitive root of the prime number `p`.
Used in the CBC lattice construction.
References
----------
.. [1] https://en.wikipedia.org/wiki/Primitive_root_modulo_n
"""
# p is prime
pm = p - 1
factors = _factorize_int(pm)
n = len(factors)
r = 2
k = 0
while k < n:
d = pm // factors[k]
# pow() doesn't like numpy scalar types.
rd = pow(int(r), int(d), int(p))
if rd == 1:
r += 1
k = 0
else:
k += 1
return r
def _cbc_lattice(n_dim, n_qmc_samples):
"""Compute a QMC lattice generator using a Fast CBC construction.
Parameters
----------
n_dim : int > 0
The number of dimensions for the lattice.
n_qmc_samples : int > 0
The desired number of QMC samples. This will be rounded down to the
nearest prime to enable the CBC construction.
Returns
-------
q : float array : shape=(n_dim,)
The lattice generator vector. All values are in the open interval
`(0, 1)`.
actual_n_qmc_samples : int
The prime number of QMC samples that must be used with this lattice,
no more, no less.
References
----------
.. [1] Nuyens, D. and Cools, R. "Fast Component-by-Component Construction,
a Reprise for Different Kernels", In H. Niederreiter and D. Talay,
editors, Monte-Carlo and Quasi-Monte Carlo Methods 2004,
Springer-Verlag, 2006, 371-385.
"""
# Round down to the nearest prime number.
primes = primes_from_2_to(n_qmc_samples + 1)
n_qmc_samples = primes[-1]
bt = np.ones(n_dim)
gm = np.hstack([1.0, 0.8 ** np.arange(n_dim - 1)])
q = 1
w = 0
z = np.arange(1, n_dim + 1)
m = (n_qmc_samples - 1) // 2
g = _primitive_root(n_qmc_samples)
# Slightly faster way to compute perm[j] = pow(g, j, n_qmc_samples)
# Shame that we don't have modulo pow() implemented as a ufunc.
perm = np.ones(m, dtype=int)
for j in range(m - 1):
perm[j + 1] = (g * perm[j]) % n_qmc_samples
perm = np.minimum(n_qmc_samples - perm, perm)
pn = perm / n_qmc_samples
c = pn * pn - pn + 1.0 / 6
fc = fft(c)
for s in range(1, n_dim):
reordered = np.hstack([
c[:w+1][::-1],
c[w+1:m][::-1],
])
q = q * (bt[s-1] + gm[s-1] * reordered)
w = ifft(fc * fft(q)).real.argmin()
z[s] = perm[w]
q = z / n_qmc_samples
return q, n_qmc_samples
# Note: this function is not currently used or tested by any SciPy code. It is
# included in this file to facilitate the development of a parameter for users
# to set the desired CDF accuracy, but must be reviewed and tested before use.
def _qauto(func, covar, low, high, rng, error=1e-3, limit=10_000, **kwds):
"""Automatically rerun the integration to get the required error bound.
Parameters
----------
func : callable
Either :func:`_qmvn` or :func:`_qmvt`.
covar, low, high : array
As specified in :func:`_qmvn` and :func:`_qmvt`.
rng : Generator, optional
default_rng(), yada, yada
error : float > 0
The desired error bound.
limit : int > 0:
The rough limit of the number of integration points to consider. The
integration will stop looping once this limit has been *exceeded*.
**kwds :
Other keyword arguments to pass to `func`. When using :func:`_qmvt`, be
sure to include ``nu=`` as one of these.
Returns
-------
prob : float
The estimated probability mass within the bounds.
est_error : float
3 times the standard error of the batch estimates.
n_samples : int
The number of integration points actually used.
"""
n = len(covar)
n_samples = 0
if n == 1:
prob = phi(high) - phi(low)
# More or less
est_error = 1e-15
else:
mi = min(limit, n * 1000)
prob = 0.0
est_error = 1.0
ei = 0.0
while est_error > error and n_samples < limit:
mi = round(np.sqrt(2) * mi)
pi, ei, ni = func(mi, covar, low, high, rng=rng, **kwds)
n_samples += ni
wt = 1.0 / (1 + (ei / est_error)**2)
prob += wt * (pi - prob)
est_error = np.sqrt(wt) * ei
return prob, est_error, n_samples
# Note: this function is not currently used or tested by any SciPy code. It is
# included in this file to facilitate the resolution of gh-8367, gh-16142, and
# possibly gh-14286, but must be reviewed and tested before use.
def _qmvn(m, covar, low, high, rng, lattice='cbc', n_batches=10):
"""Multivariate normal integration over box bounds.
Parameters
----------
m : int > n_batches
The number of points to sample. This number will be divided into
`n_batches` batches that apply random offsets of the sampling lattice
for each batch in order to estimate the error.
covar : (n, n) float array
Possibly singular, positive semidefinite symmetric covariance matrix.
low, high : (n,) float array
The low and high integration bounds.
rng : Generator, optional
default_rng(), yada, yada
lattice : 'cbc' or callable
The type of lattice rule to use to construct the integration points.
n_batches : int > 0, optional
The number of QMC batches to apply.
Returns
-------
prob : float
The estimated probability mass within the bounds.
est_error : float
3 times the standard error of the batch estimates.
"""
cho, lo, hi = _permuted_cholesky(covar, low, high)
n = cho.shape[0]
ct = cho[0, 0]
c = phi(lo[0] / ct)
d = phi(hi[0] / ct)
ci = c
dci = d - ci
prob = 0.0
error_var = 0.0
q, n_qmc_samples = _cbc_lattice(n - 1, max(m // n_batches, 1))
y = np.zeros((n - 1, n_qmc_samples))
i_samples = np.arange(n_qmc_samples) + 1
for j in range(n_batches):
c = np.full(n_qmc_samples, ci)
dc = np.full(n_qmc_samples, dci)
pv = dc.copy()
for i in range(1, n):
# Pseudorandomly-shifted lattice coordinate.
z = q[i - 1] * i_samples + rng.random()
# Fast remainder(z, 1.0)
z -= z.astype(int)
# Tent periodization transform.
x = abs(2 * z - 1)
y[i - 1, :] = phinv(c + x * dc)
s = cho[i, :i] @ y[:i, :]
ct = cho[i, i]
c = phi((lo[i] - s) / ct)
d = phi((hi[i] - s) / ct)
dc = d - c
pv = pv * dc
# Accumulate the mean and error variances with online formulations.
d = (pv.mean() - prob) / (j + 1)
prob += d
error_var = (j - 1) * error_var / (j + 1) + d * d
# Error bounds are 3 times the standard error of the estimates.
est_error = 3 * np.sqrt(error_var)
n_samples = n_qmc_samples * n_batches
return prob, est_error, n_samples
# Note: this function is not currently used or tested by any SciPy code. It is
# included in this file to facilitate the resolution of gh-8367, gh-16142, and
# possibly gh-14286, but must be reviewed and tested before use.
def _mvn_qmc_integrand(covar, low, high, use_tent=False):
"""Transform the multivariate normal integration into a QMC integrand over
a unit hypercube.
The dimensionality of the resulting hypercube integration domain is one
less than the dimensionality of the original integrand. Note that this
transformation subsumes the integration bounds in order to account for
infinite bounds. The QMC integration one does with the returned integrand
should be on the unit hypercube.
Parameters
----------
covar : (n, n) float array
Possibly singular, positive semidefinite symmetric covariance matrix.
low, high : (n,) float array
The low and high integration bounds.
use_tent : bool, optional
If True, then use tent periodization. Only helpful for lattice rules.
Returns
-------
integrand : Callable[[NDArray], NDArray]
The QMC-integrable integrand. It takes an
``(n_qmc_samples, ndim_integrand)`` array of QMC samples in the unit
hypercube and returns the ``(n_qmc_samples,)`` evaluations of at these
QMC points.
ndim_integrand : int
The dimensionality of the integrand. Equal to ``n-1``.
"""
cho, lo, hi = _permuted_cholesky(covar, low, high)
n = cho.shape[0]
ndim_integrand = n - 1
ct = cho[0, 0]
c = phi(lo[0] / ct)
d = phi(hi[0] / ct)
ci = c
dci = d - ci
def integrand(*zs):
ndim_qmc = len(zs)
n_qmc_samples = len(np.atleast_1d(zs[0]))
assert ndim_qmc == ndim_integrand
y = np.zeros((ndim_qmc, n_qmc_samples))
c = np.full(n_qmc_samples, ci)
dc = np.full(n_qmc_samples, dci)
pv = dc.copy()
for i in range(1, n):
if use_tent:
# Tent periodization transform.
x = abs(2 * zs[i-1] - 1)
else:
x = zs[i-1]
y[i - 1, :] = phinv(c + x * dc)
s = cho[i, :i] @ y[:i, :]
ct = cho[i, i]
c = phi((lo[i] - s) / ct)
d = phi((hi[i] - s) / ct)
dc = d - c
pv = pv * dc
return pv
return integrand, ndim_integrand
def _qmvt(m, nu, covar, low, high, rng, lattice='cbc', n_batches=10):
"""Multivariate t integration over box bounds.
Parameters
----------
m : int > n_batches
The number of points to sample. This number will be divided into
`n_batches` batches that apply random offsets of the sampling lattice
for each batch in order to estimate the error.
nu : float >= 0
The shape parameter of the multivariate t distribution.
covar : (n, n) float array
Possibly singular, positive semidefinite symmetric covariance matrix.
low, high : (n,) float array
The low and high integration bounds.
rng : Generator, optional
default_rng(), yada, yada
lattice : 'cbc' or callable
The type of lattice rule to use to construct the integration points.
n_batches : int > 0, optional
The number of QMC batches to apply.
Returns
-------
prob : float
The estimated probability mass within the bounds.
est_error : float
3 times the standard error of the batch estimates.
n_samples : int
The number of samples actually used.
"""
sn = max(1.0, np.sqrt(nu))
low = np.asarray(low, dtype=np.float64)
high = np.asarray(high, dtype=np.float64)
cho, lo, hi = _permuted_cholesky(covar, low / sn, high / sn)
n = cho.shape[0]
prob = 0.0
error_var = 0.0
q, n_qmc_samples = _cbc_lattice(n, max(m // n_batches, 1))
i_samples = np.arange(n_qmc_samples) + 1
for j in range(n_batches):
pv = np.ones(n_qmc_samples)
s = np.zeros((n, n_qmc_samples))
for i in range(n):
# Pseudorandomly-shifted lattice coordinate.
z = q[i] * i_samples + rng.random()
# Fast remainder(z, 1.0)
z -= z.astype(int)
# Tent periodization transform.
x = abs(2 * z - 1)
# FIXME: Lift the i==0 case out of the loop to make the logic
# easier to follow.
if i == 0:
# We'll use one of the QR variates to pull out the
# t-distribution scaling.
if nu > 0:
r = np.sqrt(2 * gammaincinv(nu / 2, x))
else:
r = np.ones_like(x)
else:
y = phinv(c + x * dc) # noqa: F821
with np.errstate(invalid='ignore'):
s[i:, :] += cho[i:, i - 1][:, np.newaxis] * y
si = s[i, :]
c = np.ones(n_qmc_samples)
d = np.ones(n_qmc_samples)
with np.errstate(invalid='ignore'):
lois = lo[i] * r - si
hiis = hi[i] * r - si
c[lois < -9] = 0.0
d[hiis < -9] = 0.0
lo_mask = abs(lois) < 9
hi_mask = abs(hiis) < 9
c[lo_mask] = phi(lois[lo_mask])
d[hi_mask] = phi(hiis[hi_mask])
dc = d - c
pv *= dc
# Accumulate the mean and error variances with online formulations.
d = (pv.mean() - prob) / (j + 1)
prob += d
error_var = (j - 1) * error_var / (j + 1) + d * d
# Error bounds are 3 times the standard error of the estimates.
est_error = 3 * np.sqrt(error_var)
n_samples = n_qmc_samples * n_batches
return prob, est_error, n_samples
def _permuted_cholesky(covar, low, high, tol=1e-10):
"""Compute a scaled, permuted Cholesky factor, with integration bounds.
The scaling and permuting of the dimensions accomplishes part of the
transformation of the original integration problem into a more numerically
tractable form. The lower-triangular Cholesky factor will then be used in
the subsequent integration. The integration bounds will be scaled and
permuted as well.
Parameters
----------
covar : (n, n) float array
Possibly singular, positive semidefinite symmetric covariance matrix.
low, high : (n,) float array
The low and high integration bounds.
tol : float, optional
The singularity tolerance.
Returns
-------
cho : (n, n) float array
Lower Cholesky factor, scaled and permuted.
new_low, new_high : (n,) float array
The scaled and permuted low and high integration bounds.
"""
# Make copies for outputting.
cho = np.array(covar, dtype=np.float64)
new_lo = np.array(low, dtype=np.float64)
new_hi = np.array(high, dtype=np.float64)
n = cho.shape[0]
if cho.shape != (n, n):
raise ValueError("expected a square symmetric array")
if new_lo.shape != (n,) or new_hi.shape != (n,):
raise ValueError(
"expected integration boundaries the same dimensions "
"as the covariance matrix"
)
# Scale by the sqrt of the diagonal.
dc = np.sqrt(np.maximum(np.diag(cho), 0.0))
# But don't divide by 0.
dc[dc == 0.0] = 1.0
new_lo /= dc
new_hi /= dc
cho /= dc
cho /= dc[:, np.newaxis]
y = np.zeros(n)
sqtp = np.sqrt(2 * np.pi)
for k in range(n):
epk = (k + 1) * tol
im = k
ck = 0.0
dem = 1.0
s = 0.0
lo_m = 0.0
hi_m = 0.0
for i in range(k, n):
if cho[i, i] > tol:
ci = np.sqrt(cho[i, i])
if i > 0:
s = cho[i, :k] @ y[:k]
lo_i = (new_lo[i] - s) / ci
hi_i = (new_hi[i] - s) / ci
de = phi(hi_i) - phi(lo_i)
if de <= dem:
ck = ci
dem = de
lo_m = lo_i
hi_m = hi_i
im = i
if im > k:
# Swap im and k
cho[im, im] = cho[k, k]
_swap_slices(cho, np.s_[im, :k], np.s_[k, :k])
_swap_slices(cho, np.s_[im + 1:, im], np.s_[im + 1:, k])
_swap_slices(cho, np.s_[k + 1:im, k], np.s_[im, k + 1:im])
_swap_slices(new_lo, k, im)
_swap_slices(new_hi, k, im)
if ck > epk:
cho[k, k] = ck
cho[k, k + 1:] = 0.0
for i in range(k + 1, n):
cho[i, k] /= ck
cho[i, k + 1:i + 1] -= cho[i, k] * cho[k + 1:i + 1, k]
if abs(dem) > tol:
y[k] = ((np.exp(-lo_m * lo_m / 2) - np.exp(-hi_m * hi_m / 2)) /
(sqtp * dem))
else:
y[k] = (lo_m + hi_m) / 2
if lo_m < -10:
y[k] = hi_m
elif hi_m > 10:
y[k] = lo_m
cho[k, :k + 1] /= ck
new_lo[k] /= ck
new_hi[k] /= ck
else:
cho[k:, k] = 0.0
y[k] = (new_lo[k] + new_hi[k]) / 2
return cho, new_lo, new_hi
def _swap_slices(x, slc1, slc2):
t = x[slc1].copy()
x[slc1] = x[slc2].copy()
x[slc2] = t
| 18,767
| 34.146067
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/mvn.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.stats` namespace for importing the functions
# included below.
import warnings
from . import _mvn # type: ignore
__all__ = [ # noqa: F822
'mvnun',
'mvnun_weighted',
'mvndst',
'dkblck'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.stats.mvn is deprecated and has no attribute "
f"{name}. Try looking in scipy.stats instead.")
warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
"the `scipy.stats.mvn` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mvn, name)
| 784
| 23.53125
| 76
|
py
|
scipy
|
scipy-main/scipy/stats/_mstats_basic.py
|
"""
An extension of scipy.stats._stats_py to support masked arrays
"""
# Original author (2007): Pierre GF Gerard-Marchant
__all__ = ['argstoarray',
'count_tied_groups',
'describe',
'f_oneway', 'find_repeats','friedmanchisquare',
'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
'ks_twosamp', 'ks_2samp', 'kurtosis', 'kurtosistest',
'ks_1samp', 'kstest',
'linregress',
'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
'normaltest',
'obrientransform',
'pearsonr','plotting_positions','pointbiserialr',
'rankdata',
'scoreatpercentile','sem',
'sen_seasonal_slopes','skew','skewtest','spearmanr',
'siegelslopes', 'theilslopes',
'tmax','tmean','tmin','trim','trimboth',
'trimtail','trima','trimr','trimmed_mean','trimmed_std',
'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
'ttest_ind','ttest_rel','tvar',
'variation',
'winsorize',
'brunnermunzel',
]
import numpy as np
from numpy import ndarray
import numpy.ma as ma
from numpy.ma import masked, nomask
import math
import itertools
import warnings
from collections import namedtuple
from . import distributions
from scipy._lib._util import _rename_parameter, _contains_nan
from scipy._lib._bunch import _make_tuple_bunch
import scipy.special as special
import scipy.stats._stats_py
from ._stats_mstats_common import (
_find_repeats,
linregress as stats_linregress,
LinregressResult as stats_LinregressResult,
theilslopes as stats_theilslopes,
siegelslopes as stats_siegelslopes
)
def _chk_asarray(a, axis):
# Always returns a masked array, raveled for axis=None
a = ma.asanyarray(a)
if axis is None:
a = ma.ravel(a)
outaxis = 0
else:
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
if axis is None:
a = ma.ravel(a)
b = ma.ravel(b)
outaxis = 0
else:
outaxis = axis
return a, b, outaxis
def _chk_size(a, b):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
(na, nb) = (a.size, b.size)
if na != nb:
raise ValueError("The size of the input array should match!"
" ({} <> {})".format(na, nb))
return (a, b, na)
def argstoarray(*args):
"""
Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
*args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
`numpy.ma.row_stack` has identical behavior, but is called with a sequence
of sequences.
Examples
--------
A 2D masked array constructed from a group of sequences is returned.
>>> from scipy.stats.mstats import argstoarray
>>> argstoarray([1, 2, 3], [4, 5, 6])
masked_array(
data=[[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]],
mask=[[False, False, False],
[False, False, False]],
fill_value=1e+20)
The returned masked array filled with missing values when the lengths of
sequences are different.
>>> argstoarray([1, 3], [4, 5, 6])
masked_array(
data=[[1.0, 3.0, --],
[4.0, 5.0, 6.0]],
mask=[[False, False, True],
[False, False, False]],
fill_value=1e+20)
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output
def find_repeats(arr):
"""Find repeats in arr and return a tuple (repeats, repeat_count).
The input is cast to float64. Masked values are discarded.
Parameters
----------
arr : sequence
Input array. The array is flattened if it is not 1D.
Returns
-------
repeats : ndarray
Array of repeated values.
counts : ndarray
Array of counts.
Examples
--------
>>> from scipy.stats import mstats
>>> mstats.find_repeats([2, 1, 2, 3, 2, 2, 5])
(array([2.]), array([4]))
In the above example, 2 repeats 4 times.
>>> mstats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
(array([4., 5.]), array([2, 2]))
In the above example, both 4 and 5 repeat 2 times.
"""
# Make sure we get a copy. ma.compressed promises a "new array", but can
# actually return a reference.
compr = np.asarray(ma.compressed(arr), dtype=np.float64)
try:
need_copy = np.may_share_memory(compr, arr)
except AttributeError:
# numpy < 1.8.2 bug: np.may_share_memory([], []) raises,
# while in numpy 1.8.2 and above it just (correctly) returns False.
need_copy = False
if need_copy:
compr = compr.copy()
return _find_repeats(compr)
def count_tied_groups(x, use_missing=False):
"""
Counts the number of tied values.
Parameters
----------
x : sequence
Sequence of data on which to counts the ties
use_missing : bool, optional
Whether to consider missing values as tied.
Returns
-------
count_tied_groups : dict
Returns a dictionary (nb of ties: nb of groups).
Examples
--------
>>> from scipy.stats import mstats
>>> import numpy as np
>>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
>>> mstats.count_tied_groups(z)
{2: 1, 3: 2}
In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x).
>>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
>>> mstats.count_tied_groups(z)
{2: 2, 3: 1}
>>> z[[1,-1]] = np.ma.masked
>>> mstats.count_tied_groups(z, use_missing=True)
{2: 2, 3: 1}
"""
nmasked = ma.getmask(x).sum()
# We need the copy as find_repeats will overwrite the initial data
data = ma.compressed(x).copy()
(ties, counts) = find_repeats(data)
nties = {}
if len(ties):
nties = dict(zip(np.unique(counts), itertools.repeat(1)))
nties.update(dict(zip(*find_repeats(counts))))
if nmasked and use_missing:
try:
nties[nmasked] += 1
except KeyError:
nties[nmasked] = 1
return nties
def rankdata(data, axis=None, use_missing=False):
"""Returns the rank (also known as order statistics) of each data point
along the given axis.
If some values are tied, their rank is averaged.
If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
----------
data : sequence
Input data. The data is transformed to a masked array
axis : {None,int}, optional
Axis along which to perform the ranking.
If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : bool, optional
Whether the masked values have a rank of 0 (False) or equal to the
average rank of the unmasked values (True).
"""
def _rank1d(data, use_missing=False):
n = data.count()
rk = np.empty(data.size, dtype=float)
idx = data.argsort()
rk[idx[:n]] = np.arange(1,n+1)
if use_missing:
rk[idx[n:]] = (n+1)/2.
else:
rk[idx[n:]] = 0
repeats = find_repeats(data.copy())
for r in repeats[0]:
condition = (data == r).filled(False)
rk[condition] = rk[condition].mean()
return rk
data = ma.array(data, copy=False)
if axis is None:
if data.ndim > 1:
return _rank1d(data.ravel(), use_missing).reshape(data.shape)
else:
return _rank1d(data, use_missing)
else:
return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0):
"""
Returns an array of the modal (most common) value in the passed array.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Notes
-----
For more details, see `scipy.stats.mode`.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> from scipy.stats import mstats
>>> m_arr = np.ma.array([1, 1, 0, 0, 0, 0], mask=[0, 0, 1, 1, 1, 0])
>>> mstats.mode(m_arr) # note that most zeros are masked
ModeResult(mode=array([1.]), count=array([2.]))
"""
return _mode(a, axis=axis, keepdims=True)
def _mode(a, axis=0, keepdims=True):
# Don't want to expose `keepdims` from the public `mstats.mode`
a, axis = _chk_asarray(a, axis)
def _mode1D(a):
(rep,cnt) = find_repeats(a)
if not cnt.ndim:
return (0, 0)
elif cnt.size:
return (rep[cnt.argmax()], cnt.max())
else:
return (a.min(), 1)
if axis is None:
output = _mode1D(ma.ravel(a))
output = (ma.array(output[0]), ma.array(output[1]))
else:
output = ma.apply_along_axis(_mode1D, axis, a)
if keepdims is None or keepdims:
newshape = list(a.shape)
newshape[axis] = 1
slices = [slice(None)] * output.ndim
slices[axis] = 0
modes = output[tuple(slices)].reshape(newshape)
slices[axis] = 1
counts = output[tuple(slices)].reshape(newshape)
output = (modes, counts)
else:
output = np.moveaxis(output, axis, 0)
return ModeResult(*output)
def _betai(a, b, x):
x = np.asanyarray(x)
x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
def msign(x):
"""Returns the sign of x, or 0 if x is masked."""
return ma.filled(np.sign(x), 0)
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector x and :math:`m_y` is
the mean of the vector y.
Under the assumption that x and y are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient r is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. The p-value
roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. More precisely, for a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> from scipy.stats import mstats
>>> mstats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
There is a linear dependence between x and y if y = a + b*x + e, where
a,b are constants and e is a random error term, assumed to be independent
of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
e follow a normal distribution with mean zero and standard deviation s>0.
>>> s = 0.5
>>> x = stats.norm.rvs(size=500)
>>> e = stats.norm.rvs(scale=s, size=500)
>>> y = x + e
>>> mstats.pearsonr(x, y)
(0.9029601878969703, 8.428978827629898e-185) # may vary
This should be close to the exact value given by
>>> 1/np.sqrt(1 + s**2)
0.8944271909999159
For s=0.5, we observe a high level of correlation. In general, a large
variance of the noise reduces the correlation, while the correlation
approaches one as the variance of the error goes to zero.
It is important to keep in mind that no correlation does not imply
independence unless (x, y) is jointly normal. Correlation can even be zero
when there is a very simple dependence structure: if X follows a
standard normal distribution, let y = abs(x). Note that the correlation
between x and y is zero. Indeed, since the expectation of x is zero,
cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
by symmetry. The following lines of code illustrate this observation:
>>> y = np.abs(x)
>>> mstats.pearsonr(x, y)
(-0.016172891856853524, 0.7182823678751942) # may vary
A non-zero correlation coefficient can be misleading. For example, if X has
a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
implying a high level of correlation:
>>> y = np.where(x < 0, x, 0)
>>> mstats.pearsonr(x, y)
(0.8537091583771509, 3.183461621422181e-143) # may vary
This is unintuitive since there is no dependence of x and y if x is larger
than zero which happens in about half of the cases if we sample x and y.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
# Get the common mask and the total nb of unmasked elements
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
df = n-2
if df < 0:
return (masked, masked)
return scipy.stats._stats_py.pearsonr(
ma.masked_array(x, mask=m).compressed(),
ma.masked_array(y, mask=m).compressed())
def spearmanr(x, y=None, use_ties=True, axis=None, nan_policy='propagate',
alternative='two-sided'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the linear
relationship between two datasets. Unlike the Pearson correlation, the
Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply a monotonic relationship. Positive correlations imply that
as `x` increases, so does `y`. Negative correlations imply that as `x`
increases, `y` decreases.
Missing values are discarded pair-wise: if a value is missing in `x`, the
corresponding value in `y` is masked.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x, y : 1D or 2D array_like, y is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
use_ties : bool, optional
DO NOT USE. Does not do anything, keyword is only left in place for
backwards compatibility reasons.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the correlation is nonzero
* 'less': the correlation is negative (less than zero)
* 'greater': the correlation is positive (greater than zero)
.. versionadded:: 1.7.0
Returns
-------
res : SignificanceResult
An object containing attributes:
statistic : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters). Correlation matrix is square
with length equal to total number of variables (columns or rows) in
``a`` and ``b`` combined.
pvalue : float
The p-value for a hypothesis test whose null hypothesis
is that two sets of data are linearly uncorrelated. See
`alternative` above for alternative hypotheses. `pvalue` has the
same shape as `statistic`.
References
----------
[CRCProbStat2000] section 14.7
"""
if not use_ties:
raise ValueError("`use_ties=False` is not supported in SciPy >= 1.2.0")
# Always returns a masked array, raveled if axis=None
x, axisout = _chk_asarray(x, axis)
if y is not None:
# Deal only with 2-D `x` case.
y, _ = _chk_asarray(y, axis)
if axisout == 0:
x = ma.column_stack((x, y))
else:
x = ma.row_stack((x, y))
if axisout == 1:
# To simplify the code that follow (always use `n_obs, n_vars` shape)
x = x.T
if nan_policy == 'omit':
x = ma.masked_invalid(x)
def _spearmanr_2cols(x):
# Mask the same observations for all variables, and then drop those
# observations (can't leave them masked, rankdata is weird).
x = ma.mask_rowcols(x, axis=0)
x = x[~x.mask.any(axis=1), :]
# If either column is entirely NaN or Inf
if not np.any(x.data):
res = scipy.stats._stats_py.SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
m = ma.getmask(x)
n_obs = x.shape[0]
dof = n_obs - 2 - int(m.sum(axis=0)[0])
if dof < 0:
raise ValueError("The input must have at least 3 entries!")
# Gets the ranks and rank differences
x_ranked = rankdata(x, axis=0)
rs = ma.corrcoef(x_ranked, rowvar=False).data
# rs can have elements equal to 1, so avoid zero division warnings
with np.errstate(divide='ignore'):
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof / ((rs+1.0) * (1.0-rs))).clip(0))
t, prob = scipy.stats._stats_py._ttest_finish(dof, t, alternative)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
res = scipy.stats._stats_py.SignificanceResult(rs[1, 0],
prob[1, 0])
res.correlation = rs[1, 0]
return res
else:
res = scipy.stats._stats_py.SignificanceResult(rs, prob)
res.correlation = rs
return res
# Need to do this per pair of variables, otherwise the dropped observations
# in a third column mess up the result for a pair.
n_vars = x.shape[1]
if n_vars == 2:
return _spearmanr_2cols(x)
else:
rs = np.ones((n_vars, n_vars), dtype=float)
prob = np.zeros((n_vars, n_vars), dtype=float)
for var1 in range(n_vars - 1):
for var2 in range(var1+1, n_vars):
result = _spearmanr_2cols(x[:, [var1, var2]])
rs[var1, var2] = result.correlation
rs[var2, var1] = result.correlation
prob[var1, var2] = result.pvalue
prob[var2, var1] = result.pvalue
res = scipy.stats._stats_py.SignificanceResult(rs, prob)
res.correlation = rs
return res
def _kendall_p_exact(n, c, alternative='two-sided'):
# Use the fact that distribution is symmetric: always calculate a CDF in
# the left tail.
# This will be the one-sided p-value if `c` is on the side of
# the null distribution predicted by the alternative hypothesis.
# The two-sided p-value will be twice this value.
# If `c` is on the other side of the null distribution, we'll need to
# take the complement and add back the probability mass at `c`.
in_right_tail = (c >= (n*(n-1))//2 - c)
alternative_greater = (alternative == 'greater')
c = int(min(c, (n*(n-1))//2 - c))
# Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods"
# (4th Edition), Charles Griffin & Co., 1970.
if n <= 0:
raise ValueError(f'n ({n}) must be positive')
elif c < 0 or 4*c > n*(n-1):
raise ValueError(f'c ({c}) must satisfy 0 <= 4c <= n(n-1) = {n*(n-1)}.')
elif n == 1:
prob = 1.0
p_mass_at_c = 1
elif n == 2:
prob = 1.0
p_mass_at_c = 0.5
elif c == 0:
prob = 2.0/math.factorial(n) if n < 171 else 0.0
p_mass_at_c = prob/2
elif c == 1:
prob = 2.0/math.factorial(n-1) if n < 172 else 0.0
p_mass_at_c = (n-1)/math.factorial(n)
elif 4*c == n*(n-1) and alternative == 'two-sided':
# I'm sure there's a simple formula for p_mass_at_c in this
# case, but I don't know it. Use generic formula for one-sided p-value.
prob = 1.0
elif n < 171:
new = np.zeros(c+1)
new[0:2] = 1.0
for j in range(3,n+1):
new = np.cumsum(new)
if j <= c:
new[j:] -= new[:c+1-j]
prob = 2.0*np.sum(new)/math.factorial(n)
p_mass_at_c = new[-1]/math.factorial(n)
else:
new = np.zeros(c+1)
new[0:2] = 1.0
for j in range(3, n+1):
new = np.cumsum(new)/j
if j <= c:
new[j:] -= new[:c+1-j]
prob = np.sum(new)
p_mass_at_c = new[-1]/2
if alternative != 'two-sided':
# if the alternative hypothesis and alternative agree,
# one-sided p-value is half the two-sided p-value
if in_right_tail == alternative_greater:
prob /= 2
else:
prob = 1 - prob/2 + p_mass_at_c
prob = np.clip(prob, 0, 1)
return prob
def kendalltau(x, y, use_ties=True, use_missing=False, method='auto',
alternative='two-sided'):
"""
Computes Kendall's rank correlation tau on two variables *x* and *y*.
Parameters
----------
x : sequence
First data list (for example, time).
y : sequence
Second data list.
use_ties : {True, False}, optional
Whether ties correction should be performed.
use_missing : {False, True}, optional
Whether missing data should be allocated a rank of 0 (False) or the
average rank (True)
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [1]_.
'asymptotic' uses a normal approximation valid for large samples.
'exact' computes the exact p-value, but can only be used if no ties
are present. As the sample size increases, the 'exact' computation
time may grow and the result may lose some precision.
'auto' is the default and selects the appropriate
method based on a trade-off between speed and accuracy.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the rank correlation is nonzero
* 'less': the rank correlation is negative (less than zero)
* 'greater': the rank correlation is positive (greater than zero)
Returns
-------
res : SignificanceResult
An object containing attributes:
statistic : float
The tau statistic.
pvalue : float
The p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
References
----------
.. [1] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.flatten(), y.flatten())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
# need int() here, otherwise numpy defaults to 32 bit
# integer on all Windows architectures, causing overflow.
# int() will keep it infinite precision.
n -= int(m.sum())
if n < 2:
res = scipy.stats._stats_py.SignificanceResult(np.nan, np.nan)
res.correlation = np.nan
return res
rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0)
ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0)
idx = rx.argsort()
(rx, ry) = (rx[idx], ry[idx])
C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
xties = count_tied_groups(x)
yties = count_tied_groups(y)
if use_ties:
corr_x = np.sum([v*k*(k-1) for (k,v) in xties.items()], dtype=float)
corr_y = np.sum([v*k*(k-1) for (k,v) in yties.items()], dtype=float)
denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
else:
denom = n*(n-1)/2.
tau = (C-D) / denom
if method == 'exact' and (xties or yties):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (not xties and not yties) and (n <= 33 or min(C, n*(n-1)/2.0-C) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if not xties and not yties and method == 'exact':
prob = _kendall_p_exact(n, C, alternative)
elif method == 'asymptotic':
var_s = n*(n-1)*(2*n+5)
if use_ties:
var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in xties.items()])
var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in yties.items()])
v1 = (np.sum([v*k*(k-1) for (k, v) in xties.items()], dtype=float) *
np.sum([v*k*(k-1) for (k, v) in yties.items()], dtype=float))
v1 /= 2.*n*(n-1)
if n > 2:
v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in xties.items()],
dtype=float) * \
np.sum([v*k*(k-1)*(k-2) for (k,v) in yties.items()],
dtype=float)
v2 /= 9.*n*(n-1)*(n-2)
else:
v2 = 0
else:
v1 = v2 = 0
var_s /= 18.
var_s += (v1 + v2)
z = (C-D)/np.sqrt(var_s)
_, prob = scipy.stats._stats_py._normtest_finish(z, alternative)
else:
raise ValueError("Unknown method "+str(method)+" specified, please "
"use auto, exact or asymptotic.")
res = scipy.stats._stats_py.SignificanceResult(tau, prob)
res.correlation = tau
return res
def kendalltau_seasonal(x):
"""
Computes a multivariate Kendall's rank correlation tau, for seasonal data.
Parameters
----------
x : 2-D ndarray
Array of seasonal data, with seasons in columns.
"""
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,m) = x.shape
n_p = x.count(0)
S_szn = sum(msign(x[i:]-x[i]).sum(0) for i in range(n))
S_tot = S_szn.sum()
n_tot = x.count()
ties = count_tied_groups(x.compressed())
corr_ties = sum(v*k*(k-1) for (k,v) in ties.items())
denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2.
R = rankdata(x, axis=0, use_missing=True)
K = ma.empty((m,m), dtype=int)
covmat = ma.empty((m,m), dtype=float)
denom_szn = ma.empty(m, dtype=float)
for j in range(m):
ties_j = count_tied_groups(x[:,j].compressed())
corr_j = sum(v*k*(k-1) for (k,v) in ties_j.items())
cmb = n_p[j]*(n_p[j]-1)
for k in range(j,m,1):
K[j,k] = sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
for i in range(n))
covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() -
n*(n_p[j]+1)*(n_p[k]+1))/3.
K[k,j] = K[j,k]
covmat[k,j] = covmat[j,k]
denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2.
var_szn = covmat.diagonal()
z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn)
z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum())
z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum())
prob_szn = special.erfc(abs(z_szn)/np.sqrt(2))
prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2))
prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2))
chi2_tot = (z_szn*z_szn).sum()
chi2_trd = m * z_szn.mean()**2
output = {'seasonal tau': S_szn/denom_szn,
'global tau': S_tot/denom_tot,
'global tau (alt)': S_tot/denom_szn.sum(),
'seasonal p-value': prob_szn,
'global p-value (indep)': prob_tot_ind,
'global p-value (dep)': prob_tot_dep,
'chi2 total': chi2_tot,
'chi2 trend': chi2_trd,
}
return output
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
def pointbiserialr(x, y):
"""Calculates a point biserial correlation coefficient and its p-value.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
For more details on `pointbiserialr`, see `scipy.stats.pointbiserialr`.
"""
x = ma.fix_invalid(x, copy=True).astype(bool)
y = ma.fix_invalid(y, copy=True).astype(float)
# Get rid of the missing data
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
unmask = np.logical_not(m)
x = x[unmask]
y = y[unmask]
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(n)
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
df = n-2
t = rpb*ma.sqrt(df/(1.0-rpb**2))
prob = _betai(0.5*df, 0.5, df/(df+t*t))
return PointbiserialrResult(rpb, prob)
def linregress(x, y=None):
r"""
Linear regression calculation
Note that the non-masked version is used, and that this docstring is
replaced by the non-masked docstring + some info on missing data.
"""
if y is None:
x = ma.array(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
raise ValueError("If only `x` is given as input, "
"it has to be of shape (2, N) or (N, 2), "
f"provided shape was {x.shape}")
else:
x = ma.array(x)
y = ma.array(y)
x = x.flatten()
y = y.flatten()
if np.amax(x) == np.amin(x) and len(x) > 1:
raise ValueError("Cannot calculate a linear regression "
"if all x values are identical")
m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
if m is not nomask:
x = ma.array(x, mask=m)
y = ma.array(y, mask=m)
if np.any(~m):
result = stats_linregress(x.data[~m], y.data[~m])
else:
# All data is masked
result = stats_LinregressResult(slope=None, intercept=None,
rvalue=None, pvalue=None,
stderr=None,
intercept_stderr=None)
else:
result = stats_linregress(x.data, y.data)
return result
def theilslopes(y, x=None, alpha=0.95, method='separate'):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
method : {'joint', 'separate'}, optional
Method to be used for computing estimate for intercept.
Following methods are supported,
* 'joint': Uses np.median(y - slope * x) as intercept.
* 'separate': Uses np.median(y) - slope * np.median(x)
as intercept.
The default is 'separate'.
.. versionadded:: 1.8.0
Returns
-------
result : ``TheilslopesResult`` instance
The return value is an object with the following attributes:
slope : float
Theil slope.
intercept : float
Intercept of the Theil line.
low_slope : float
Lower bound of the confidence interval on `slope`.
high_slope : float
Upper bound of the confidence interval on `slope`.
See Also
--------
siegelslopes : a similar technique using repeated medians
Notes
-----
For more details on `theilslopes`, see `scipy.stats.theilslopes`.
"""
y = ma.asarray(y).flatten()
if x is None:
x = ma.arange(len(y), dtype=float)
else:
x = ma.asarray(x).flatten()
if len(x) != len(y):
raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})")
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
y._mask = x._mask = m
# Disregard any masked elements of x or y
y = y.compressed()
x = x.compressed().astype(float)
# We now have unmasked arrays so can use `scipy.stats.theilslopes`
return stats_theilslopes(y, x, alpha=alpha, method=method)
def siegelslopes(y, x=None, method="hierarchical"):
r"""
Computes the Siegel estimator for a set of points (x, y).
`siegelslopes` implements a method for robust linear regression
using repeated medians to fit a line to the points (x, y).
The method is robust to outliers with an asymptotic breakdown point
of 50%.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
method : {'hierarchical', 'separate'}
If 'hierarchical', estimate the intercept using the estimated
slope ``slope`` (default option).
If 'separate', estimate the intercept independent of the estimated
slope. See Notes for details.
Returns
-------
result : ``SiegelslopesResult`` instance
The return value is an object with the following attributes:
slope : float
Estimate of the slope of the regression line.
intercept : float
Estimate of the intercept of the regression line.
See Also
--------
theilslopes : a similar technique without repeated medians
Notes
-----
For more details on `siegelslopes`, see `scipy.stats.siegelslopes`.
"""
y = ma.asarray(y).ravel()
if x is None:
x = ma.arange(len(y), dtype=float)
else:
x = ma.asarray(x).ravel()
if len(x) != len(y):
raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})")
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
y._mask = x._mask = m
# Disregard any masked elements of x or y
y = y.compressed()
x = x.compressed().astype(float)
# We now have unmasked arrays so can use `scipy.stats.siegelslopes`
return stats_siegelslopes(y, x, method=method)
SenSeasonalSlopesResult = _make_tuple_bunch('SenSeasonalSlopesResult',
['intra_slope', 'inter_slope'])
def sen_seasonal_slopes(x):
r"""
Computes seasonal Theil-Sen and Kendall slope estimators.
The seasonal generalization of Sen's slope computes the slopes between all
pairs of values within a "season" (column) of a 2D array. It returns an
array containing the median of these "within-season" slopes for each
season (the Theil-Sen slope estimator of each season), and it returns the
median of the within-season slopes across all seasons (the seasonal Kendall
slope estimator).
Parameters
----------
x : 2D array_like
Each column of `x` contains measurements of the dependent variable
within a season. The independent variable (usually time) of each season
is assumed to be ``np.arange(x.shape[0])``.
Returns
-------
result : ``SenSeasonalSlopesResult`` instance
The return value is an object with the following attributes:
intra_slope : ndarray
For each season, the Theil-Sen slope estimator: the median of
within-season slopes.
inter_slope : float
The seasonal Kendall slope estimateor: the median of within-season
slopes *across all* seasons.
See Also
--------
theilslopes : the analogous function for non-seasonal data
scipy.stats.theilslopes : non-seasonal slopes for non-masked arrays
Notes
-----
The slopes :math:`d_{ijk}` within season :math:`i` are:
.. math::
d_{ijk} = \frac{x_{ij} - x_{ik}}
{j - k}
for pairs of distinct integer indices :math:`j, k` of :math:`x`.
Element :math:`i` of the returned `intra_slope` array is the median of the
:math:`d_{ijk}` over all :math:`j < k`; this is the Theil-Sen slope
estimator of season :math:`i`. The returned `inter_slope` value, better
known as the seasonal Kendall slope estimator, is the median of the
:math:`d_{ijk}` over all :math:`i, j, k`.
References
----------
.. [1] Hirsch, Robert M., James R. Slack, and Richard A. Smith.
"Techniques of trend analysis for monthly water quality data."
*Water Resources Research* 18.1 (1982): 107-121.
Examples
--------
Suppose we have 100 observations of a dependent variable for each of four
seasons:
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> x = rng.random(size=(100, 4))
We compute the seasonal slopes as:
>>> from scipy import stats
>>> intra_slope, inter_slope = stats.mstats.sen_seasonal_slopes(x)
If we define a function to compute all slopes between observations within
a season:
>>> def dijk(yi):
... n = len(yi)
... x = np.arange(n)
... dy = yi - yi[:, np.newaxis]
... dx = x - x[:, np.newaxis]
... # we only want unique pairs of distinct indices
... mask = np.triu(np.ones((n, n), dtype=bool), k=1)
... return dy[mask]/dx[mask]
then element ``i`` of ``intra_slope`` is the median of ``dijk[x[:, i]]``:
>>> i = 2
>>> np.allclose(np.median(dijk(x[:, i])), intra_slope[i])
True
and ``inter_slope`` is the median of the values returned by ``dijk`` for
all seasons:
>>> all_slopes = np.concatenate([dijk(x[:, i]) for i in range(x.shape[1])])
>>> np.allclose(np.median(all_slopes), inter_slope)
True
Because the data are randomly generated, we would expect the median slopes
to be nearly zero both within and across all seasons, and indeed they are:
>>> intra_slope.data
array([ 0.00124504, -0.00277761, -0.00221245, -0.00036338])
>>> inter_slope
-0.0010511779872922058
"""
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,_) = x.shape
# Get list of slopes per season
szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
for i in range(n)])
szn_medslopes = ma.median(szn_slopes, axis=0)
medslope = ma.median(szn_slopes, axis=None)
return SenSeasonalSlopesResult(szn_medslopes, medslope)
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, alternative='two-sided'):
"""
Calculates the T-test for the mean of ONE group of scores.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the mean of the underlying distribution of the sample
is different than the given population mean (`popmean`)
* 'less': the mean of the underlying distribution of the sample is
less than the given population mean (`popmean`)
* 'greater': the mean of the underlying distribution of the sample is
greater than the given population mean (`popmean`)
.. versionadded:: 1.7.0
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
The p-value
Notes
-----
For more details on `ttest_1samp`, see `scipy.stats.ttest_1samp`.
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return (np.nan, np.nan)
x = a.mean(axis=axis)
v = a.var(axis=axis, ddof=1)
n = a.count(axis=axis)
# force df to be an array for masked division not to throw a warning
df = ma.asanyarray(n - 1.0)
svar = ((n - 1.0) * v) / df
with np.errstate(divide='ignore', invalid='ignore'):
t = (x - popmean) / ma.sqrt(svar / n)
t, prob = scipy.stats._stats_py._ttest_finish(df, t, alternative)
return Ttest_1sampResult(t, prob)
ttest_onesamp = ttest_1samp
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind(a, b, axis=0, equal_var=True, alternative='two-sided'):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True, perform a standard independent 2 sample test that assumes equal
population variances.
If False, perform Welch's t-test, which does not assume equal population
variance.
.. versionadded:: 0.17.0
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.7.0
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
Notes
-----
For more details on `ttest_ind`, see `scipy.stats.ttest_ind`.
"""
a, b, axis = _chk2_asarray(a, b, axis)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
(x1, x2) = (a.mean(axis), b.mean(axis))
(v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
(n1, n2) = (a.count(axis), b.count(axis))
if equal_var:
# force df to be an array for masked division not to throw a warning
df = ma.asanyarray(n1 + n2 - 2.0)
svar = ((n1-1)*v1+(n2-1)*v2) / df
denom = ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here!
else:
vn1 = v1/n1
vn2 = v2/n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero.
# It doesn't matter what df is as long as it is not NaN.
df = np.where(np.isnan(df), 1, df)
denom = ma.sqrt(vn1 + vn2)
with np.errstate(divide='ignore', invalid='ignore'):
t = (x1-x2) / denom
t, prob = scipy.stats._stats_py._ttest_finish(df, t, alternative)
return Ttest_indResult(t, prob)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, alternative='two-sided'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.7.0
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
For more details on `ttest_rel`, see `scipy.stats.ttest_rel`.
"""
a, b, axis = _chk2_asarray(a, b, axis)
if len(a) != len(b):
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return Ttest_relResult(np.nan, np.nan)
n = a.count(axis)
df = ma.asanyarray(n-1.0)
d = (a-b).astype('d')
dm = d.mean(axis)
v = d.var(axis=axis, ddof=1)
denom = ma.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = dm / denom
t, prob = scipy.stats._stats_py._ttest_finish(df, t, alternative)
return Ttest_relResult(t, prob)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
def mannwhitneyu(x,y, use_continuity=True):
"""
Computes the Mann-Whitney statistic
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x : sequence
Input
y : sequence
Input
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
statistic : float
The minimum of the Mann-Whitney statistics
pvalue : float
Approximate two-sided p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= sum(v*(k**3-k) for (k,v) in ties.items())/12.
sigsq *= nx*ny/float(nt*(nt-1))
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
return MannwhitneyuResult(u, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args):
"""
Compute the Kruskal-Wallis H-test for independent samples
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
Notes
-----
For more details on `kruskal`, see `scipy.stats.kruskal`.
Examples
--------
>>> from scipy.stats.mstats import kruskal
Random samples from three different brands of batteries were tested
to see how long the charge lasted. Results were as follows:
>>> a = [6.3, 5.4, 5.7, 5.2, 5.0]
>>> b = [6.9, 7.0, 6.1, 7.9]
>>> c = [7.2, 6.9, 6.1, 6.5]
Test the hypotesis that the distribution functions for all of the brands'
durations are identical. Use 5% level of significance.
>>> kruskal(a, b, c)
KruskalResult(statistic=7.113812154696133, pvalue=0.028526948491942164)
The null hypothesis is rejected at the 5% level of significance
because the returned p-value is less than the critical value of 5%.
"""
output = argstoarray(*args)
ranks = ma.masked_equal(rankdata(output, use_missing=False), 0)
sumrk = ranks.sum(-1)
ngrp = ranks.count(-1)
ntot = ranks.count()
H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1)
# Tie correction
ties = count_tied_groups(ranks)
T = 1. - sum(v*(k**3-k) for (k,v) in ties.items())/float(ntot**3-ntot)
if T == 0:
raise ValueError('All numbers are identical in kruskal')
H /= T
df = len(output) - 1
prob = distributions.chi2.sf(H, df)
return KruskalResult(H, prob)
kruskalwallis = kruskal
@_rename_parameter("mode", "method")
def ks_1samp(x, cdf, args=(), alternative="two-sided", method='auto'):
"""
Computes the Kolmogorov-Smirnov test on one sample of masked values.
Missing values in `x` are discarded.
Parameters
----------
x : array_like
a 1-D array of observations of random variables.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `cdf` is a string.
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis. Default is 'two-sided'.
method : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use approximation to exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
return scipy.stats._stats_py.ks_1samp(
x, cdf, args=args, alternative=alternative, method=method)
@_rename_parameter("mode", "method")
def ks_2samp(data1, data2, alternative="two-sided", method='auto'):
"""
Computes the Kolmogorov-Smirnov test on two samples.
Missing values in `x` and/or `y` are discarded.
Parameters
----------
data1 : array_like
First data set
data2 : array_like
Second data set
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis. Default is 'two-sided'.
method : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use approximation to exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
# Ideally this would be accomplished by
# ks_2samp = scipy.stats._stats_py.ks_2samp
# but the circular dependencies between _mstats_basic and stats prevent that.
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
return scipy.stats._stats_py.ks_2samp(data1, data2,
alternative=alternative,
method=method)
ks_twosamp = ks_2samp
@_rename_parameter("mode", "method")
def kstest(data1, data2, args=(), alternative='two-sided', method='auto'):
"""
Parameters
----------
data1 : array_like
data2 : str, callable or array_like
args : tuple, sequence, optional
Distribution parameters, used if `data1` or `data2` are strings.
alternative : str, as documented in stats.kstest
method : str, as documented in stats.kstest
Returns
-------
tuple of (K-S statistic, probability)
"""
return scipy.stats._stats_py.kstest(data1, data2, args,
alternative=alternative, method=method)
def trima(a, limits=None, inclusive=(True,True)):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
Parameters
----------
a : array_like
Input array.
limits : {None, tuple}, optional
Tuple of (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit
will be masked. A limit is None indicates an open interval.
inclusive : (bool, bool) tuple, optional
Tuple of (lower flag, upper flag), indicating whether values exactly
equal to the lower (upper) limit are allowed.
Examples
--------
>>> from scipy.stats.mstats import trima
>>> import numpy as np
>>> a = np.arange(10)
The interval is left-closed and right-open, i.e., `[2, 8)`.
Trim the array by keeping only values in the interval.
>>> trima(a, limits=(2, 8), inclusive=(True, False))
masked_array(data=[--, --, 2, 3, 4, 5, 6, 7, --, --],
mask=[ True, True, False, False, False, False, False, False,
True, True],
fill_value=999999)
"""
a = ma.asarray(a)
a.unshare_mask()
if (limits is None) or (limits == (None, None)):
return a
(lower_lim, upper_lim) = limits
(lower_in, upper_in) = inclusive
condition = False
if lower_lim is not None:
if lower_in:
condition |= (a < lower_lim)
else:
condition |= (a <= lower_lim)
if upper_lim is not None:
if upper_in:
condition |= (a > upper_lim)
else:
condition |= (a >= upper_lim)
a[condition.filled(True)] = masked
return a
def trimr(a, limits=None, inclusive=(True, True), axis=None):
"""
Trims an array by masking some proportion of the data on each end.
Returns a masked version of the input array.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming is
n*(1.-sum(limits)). The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True,True) tuple}, optional
Tuple of flags indicating whether the number of data being masked on
the left (right) end should be truncated (True) or rounded (False) to
integers.
axis : {None,int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
"""
def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = int(np.round(low_limit*n))
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - int(np.round(n*up_limit))
a[idx[upidx:]] = masked
return a
a = ma.asarray(a)
a.unshare_mask()
if limits is None:
return a
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
else:
return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
trimdoc = """
Parameters
----------
a : sequence
Input array
limits : {None, tuple}, optional
If `relative` is False, tuple (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit are
masked.
If `relative` is True, tuple (lower percentage, upper percentage) to cut
on each side of the array, with respect to the number of unmasked data.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
In each case, the value of one limit can be set to None to indicate an
open interval.
If limits is None, no trimming is performed
inclusive : {(bool, bool) tuple}, optional
If `relative` is False, tuple indicating whether values exactly equal
to the absolute limits are allowed.
If `relative` is True, tuple indicating whether the number of data
being masked on each side should be rounded (True) or truncated
(False).
relative : bool, optional
Whether to consider the limits as absolute values (False) or proportions
to cut (True).
axis : int, optional
Axis along which to trim.
"""
def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
%s
Examples
--------
>>> from scipy.stats.mstats import trim
>>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
>>> print(trim(z,(3,8)))
[-- -- 3 4 5 6 7 8 -- --]
>>> print(trim(z,(0.1,0.2),relative=True))
[-- 2 3 4 5 6 7 8 -- --]
"""
if relative:
return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
else:
return trima(a, limits=limits, inclusive=inclusive)
if trim.__doc__:
trim.__doc__ = trim.__doc__ % trimdoc
def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
"""
Trims the smallest and largest data values.
Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and
``int(proportiontocut * n)`` largest values of data along the given axis,
where n is the number of unmasked values before trimming.
Parameters
----------
data : ndarray
Data to trim.
proportiontocut : float, optional
Percentage of trimming (as a float between 0 and 1).
If n is the number of unmasked values before trimming, the number of
values after trimming is ``(1 - 2*proportiontocut) * n``.
Default is 0.2.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
return trimr(data, limits=(proportiontocut,proportiontocut),
inclusive=inclusive, axis=axis)
def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
axis=None):
"""
Trims the data by masking values from one tail.
Parameters
----------
data : array_like
Data to trim.
proportiontocut : float, optional
Percentage of trimming. If n is the number of unmasked values
before trimming, the number of values after trimming is
``(1 - proportiontocut) * n``. Default is 0.2.
tail : {'left','right'}, optional
If 'left' the `proportiontocut` lowest values will be masked.
If 'right' the `proportiontocut` highest values will be masked.
Default is 'left'.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False). Default is
(True, True).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened. Default is None.
Returns
-------
trimtail : ndarray
Returned array of same shape as `data` with masked tail values.
"""
tail = str(tail).lower()[0]
if tail == 'l':
limits = (proportiontocut,None)
elif tail == 'r':
limits = (None, proportiontocut)
else:
raise TypeError("The tail argument should be in ('left','right')")
return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
trim1 = trimtail
def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None):
"""Returns the trimmed mean of the data along the given axis.
%s
"""
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis)
else:
return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
if trimmed_mean.__doc__:
trimmed_mean.__doc__ = trimmed_mean.__doc__ % trimdoc
def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed variance of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
"""
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits, inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.var(axis=axis, ddof=ddof)
if trimmed_var.__doc__:
trimmed_var.__doc__ = trimmed_var.__doc__ % trimdoc
def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed standard deviation of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
"""
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits,inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.std(axis=axis,ddof=ddof)
if trimmed_std.__doc__:
trimmed_std.__doc__ = trimmed_std.__doc__ % trimdoc
def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None):
"""
Returns the standard error of the trimmed mean along the given axis.
Parameters
----------
a : sequence
Input array
limits : {(0.1,0.1), tuple of float}, optional
tuple (lower percentage, upper percentage) to cut on each side of the
array, with respect to the number of unmasked data.
If n is the number of unmasked data before trimming, the values
smaller than ``n * limits[0]`` and the values larger than
``n * `limits[1]`` are masked, and the total number of unmasked
data after trimming is ``n * (1.-sum(limits))``. In each case,
the value of one limit can be set to None to indicate an open interval.
If `limits` is None, no trimming is performed.
inclusive : {(bool, bool) tuple} optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to trim.
Returns
-------
trimmed_stde : scalar or ndarray
"""
def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
"Returns the standard error of the trimmed mean for a 1D input data."
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
a[idx[:lowidx]] = a[idx[lowidx]]
a[idx[upidx:]] = a[idx[upidx-1]]
winstd = a.std(ddof=1)
return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a)))
a = ma.array(a, copy=True, subok=True)
a.unshare_mask()
if limits is None:
return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis))
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if (axis is None):
return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc)
else:
if a.ndim > 2:
raise ValueError("Array 'a' must be at most two dimensional, "
"but got a.ndim = %d" % a.ndim)
return ma.apply_along_axis(_trimmed_stde_1D, axis, a,
lolim,uplim,loinc,upinc)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. If None, compute over the
whole array. Default is None.
Returns
-------
tmean : float
Notes
-----
For more details on `tmean`, see `scipy.stats.tmean`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import mstats
>>> a = np.array([[6, 8, 3, 0],
... [3, 9, 1, 2],
... [8, 7, 8, 2],
... [5, 6, 0, 2],
... [4, 5, 5, 2]])
...
...
>>> mstats.tmean(a, (2,5))
3.3
>>> mstats.tmean(a, (2,5), axis=0)
masked_array(data=[4.0, 5.0, 4.0, 2.0],
mask=[False, False, False, False],
fill_value=1e+20)
"""
return trima(a, limits=limits, inclusive=inclusive).mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. If None, compute over the
whole array. Default is zero.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
For more details on `tvar`, see `scipy.stats.tvar`.
"""
a = a.astype(float).ravel()
if limits is None:
n = (~a.mask).sum() # todo: better way to do that?
return np.ma.var(a) * n/(n-1.)
am = _mask_to_limits(a, limits=limits, inclusive=inclusive)
return np.ma.var(am, axis=axis, ddof=ddof)
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed minimum
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
Returns
-------
tmin : float, int or ndarray
Notes
-----
For more details on `tmin`, see `scipy.stats.tmin`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import mstats
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 2],
... [8, 1, 8, 2],
... [5, 3, 0, 2],
... [4, 7, 5, 2]])
...
>>> mstats.tmin(a, 5)
masked_array(data=[5, 7, 5, --],
mask=[False, False, False, True],
fill_value=999999)
"""
a, axis = _chk_asarray(a, axis)
am = trima(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
def tmax(a, upperlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
Returns
-------
tmax : float, int or ndarray
Notes
-----
For more details on `tmax`, see `scipy.stats.tmax`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import mstats
>>> a = np.array([[6, 8, 3, 0],
... [3, 9, 1, 2],
... [8, 7, 8, 2],
... [5, 6, 0, 2],
... [4, 5, 5, 2]])
...
...
>>> mstats.tmax(a, 4)
masked_array(data=[4, --, 3, 2],
mask=[False, True, False, False],
fill_value=999999)
"""
a, axis = _chk_asarray(a, axis)
am = trima(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. If None, compute over the
whole array. Default is zero.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
For more details on `tsem`, see `scipy.stats.tsem`.
"""
a = ma.asarray(a).ravel()
if limits is None:
n = float(a.count())
return a.std(axis=axis, ddof=ddof)/ma.sqrt(n)
am = trima(a.ravel(), limits, inclusive)
sd = np.sqrt(am.var(axis=axis, ddof=ddof))
return sd / np.sqrt(am.count())
def winsorize(a, limits=None, inclusive=(True, True), inplace=False,
axis=None, nan_policy='propagate'):
"""Returns a Winsorized version of the input array.
The (limits[0])th lowest values are set to the (limits[0])th percentile,
and the (limits[1])th highest values are set to the (1 - limits[1])th
percentile.
Masked values are skipped.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple of float}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming
is n*(1.-sum(limits)) The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be truncated (True) or rounded (False).
inplace : {False, True}, optional
Whether to winsorize in place (True) or to use a copy (False)
axis : {None, int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': allows nan values and may overwrite or propagate them
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Notes
-----
This function is applied to reduce the effect of possibly spurious outliers
by limiting the extreme values.
Examples
--------
>>> import numpy as np
>>> from scipy.stats.mstats import winsorize
A shuffled array contains integers from 1 to 10.
>>> a = np.array([10, 4, 9, 8, 5, 3, 7, 2, 1, 6])
The 10% of the lowest value (i.e., `1`) and the 20% of the highest
values (i.e., `9` and `10`) are replaced.
>>> winsorize(a, limits=[0.1, 0.2])
masked_array(data=[8, 4, 8, 8, 5, 3, 7, 2, 2, 6],
mask=False,
fill_value=999999)
"""
def _winsorize1D(a, low_limit, up_limit, low_include, up_include,
contains_nan, nan_policy):
n = a.count()
idx = a.argsort()
if contains_nan:
nan_count = np.count_nonzero(np.isnan(a))
if low_limit:
if low_include:
lowidx = int(low_limit * n)
else:
lowidx = np.round(low_limit * n).astype(int)
if contains_nan and nan_policy == 'omit':
lowidx = min(lowidx, n-nan_count-1)
a[idx[:lowidx]] = a[idx[lowidx]]
if up_limit is not None:
if up_include:
upidx = n - int(n * up_limit)
else:
upidx = n - np.round(n * up_limit).astype(int)
if contains_nan and nan_policy == 'omit':
a[idx[upidx:-nan_count]] = a[idx[upidx - 1]]
else:
a[idx[upidx:]] = a[idx[upidx - 1]]
return a
contains_nan, nan_policy = _contains_nan(a, nan_policy)
# We are going to modify a: better make a copy
a = ma.array(a, copy=np.logical_not(inplace))
if limits is None:
return a
if (not isinstance(limits, tuple)) and isinstance(limits, float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc,
contains_nan, nan_policy).reshape(shp)
else:
return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc,
upinc, contains_nan, nan_policy)
def moment(a, moment=1, axis=0):
"""
Calculates the nth moment about the mean for a sample.
Parameters
----------
a : array_like
data
moment : int, optional
order of central moment that is returned
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
Notes
-----
For more details about `moment`, see `scipy.stats.moment`.
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
moment_shape = list(a.shape)
del moment_shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
# empty array, return nan(s) with shape matching `moment`
out_shape = (moment_shape if np.isscalar(moment)
else [len(moment)] + moment_shape)
if len(out_shape) == 0:
return dtype(np.nan)
else:
return ma.array(np.full(out_shape, np.nan, dtype=dtype))
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mean = a.mean(axis, keepdims=True)
mmnt = [_moment(a, i, axis, mean=mean) for i in moment]
return ma.array(mmnt)
else:
return _moment(a, moment, axis)
# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
def _moment(a, moment, axis, *, mean=None):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0 or moment == 1:
# By definition the zeroth moment about the mean is 1, and the first
# moment is 0.
shape = list(a.shape)
del shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
if len(shape) == 0:
return dtype(1.0 if moment == 0 else 0.0)
else:
return (ma.ones(shape, dtype=dtype) if moment == 0
else ma.zeros(shape, dtype=dtype))
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
mean = a.mean(axis, keepdims=True) if mean is None else mean
a_zero_mean = a - mean
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return s.mean(axis)
def variation(a, axis=0, ddof=0):
"""
Compute the coefficient of variation.
The coefficient of variation is the standard deviation divided by the
mean. This function is equivalent to::
np.std(x, axis=axis, ddof=ddof) / np.mean(x)
The default for ``ddof`` is 0, but many definitions of the coefficient
of variation use the square root of the unbiased sample variance
for the sample standard deviation, which corresponds to ``ddof=1``.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 0.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
Notes
-----
For more details about `variation`, see `scipy.stats.variation`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats.mstats import variation
>>> a = np.array([2,8,4])
>>> variation(a)
0.5345224838248487
>>> b = np.array([2,8,3,4])
>>> c = np.ma.masked_array(b, mask=[0,0,1,0])
>>> variation(c)
0.5345224838248487
In the example above, it can be seen that this works the same as
`scipy.stats.variation` except 'stats.mstats.variation' ignores masked
array elements.
"""
a, axis = _chk_asarray(a, axis)
return a.std(axis, ddof=ddof)/a.mean(axis)
def skew(a, axis=0, bias=True):
"""
Computes the skewness of a data set.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
For more details about `skew`, see `scipy.stats.skew`.
"""
a, axis = _chk_asarray(a,axis)
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m3 = _moment(a, 3, axis, mean=mean)
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
with np.errstate(all='ignore'):
vals = ma.where(zero, 0, m3 / m2**1.5)
if not bias and zero is not ma.masked and m2 is not ma.masked:
n = a.count(axis)
can_correct = ~zero & (n > 2)
if can_correct.any():
n = np.extract(can_correct, n)
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
np.place(vals, can_correct, nval)
return vals
def kurtosis(a, axis=0, fisher=True, bias=True):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
Notes
-----
For more details about `kurtosis`, see `scipy.stats.kurtosis`.
"""
a, axis = _chk_asarray(a, axis)
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m4 = _moment(a, 4, axis, mean=mean)
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
with np.errstate(all='ignore'):
vals = ma.where(zero, 0, m4 / m2**2.0)
if not bias and zero is not ma.masked and m2 is not ma.masked:
n = a.count(axis)
can_correct = ~zero & (n > 3)
if can_correct.any():
n = np.extract(can_correct, n)
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
np.place(vals, can_correct, nval+3.0)
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=0, bias=True):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Data array
axis : int or None, optional
Axis along which to calculate statistics. Default 0. If None,
compute over the whole array `a`.
ddof : int, optional
degree of freedom (default 0); note that default ddof is different
from the same routine in stats.describe
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
Returns
-------
nobs : int
(size of the data (discarding missing values)
minmax : (int, int)
min, max
mean : float
arithmetic mean
variance : float
unbiased variance
skewness : float
biased skewness
kurtosis : float
biased kurtosis
Examples
--------
>>> import numpy as np
>>> from scipy.stats.mstats import describe
>>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1])
>>> describe(ma)
DescribeResult(nobs=3, minmax=(masked_array(data=0,
mask=False,
fill_value=999999), masked_array(data=2,
mask=False,
fill_value=999999)), mean=1.0, variance=0.6666666666666666,
skewness=masked_array(data=0., mask=False, fill_value=1e+20),
kurtosis=-1.5)
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis)
mm = (ma.minimum.reduce(a, axis=axis), ma.maximum.reduce(a, axis=axis))
m = a.mean(axis)
v = a.var(axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
def stde_median(data, axis=None):
"""Returns the McKean-Schrader estimate of the standard error of the sample
median along the given axis. masked values are discarded.
Parameters
----------
data : ndarray
Data to trim.
axis : {None,int}, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
def _stdemed_1D(data):
data = np.sort(data.compressed())
n = len(data)
z = 2.5758293035489004
k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0))
return ((data[n-k] - data[k-1])/(2.*z))
data = ma.array(data, copy=False, subok=True)
if (axis is None):
return _stdemed_1D(data)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
return ma.apply_along_axis(_stdemed_1D, axis, data)
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, alternative='two-sided'):
"""
Tests whether the skew is different from the normal distribution.
Parameters
----------
a : array_like
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the skewness of the distribution underlying the sample
is different from that of the normal distribution (i.e. 0)
* 'less': the skewness of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the skewness of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : array_like
The computed z-score for this test.
pvalue : array_like
A p-value for the hypothesis test
Notes
-----
For more details about `skewtest`, see `scipy.stats.skewtest`.
"""
a, axis = _chk_asarray(a, axis)
if axis is None:
a = a.ravel()
axis = 0
b2 = skew(a,axis)
n = a.count(axis)
if np.min(n) < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % np.min(n))
y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
W2 = -1 + ma.sqrt(2*(beta2-1))
delta = 1/ma.sqrt(0.5*ma.log(W2))
alpha = ma.sqrt(2.0/(W2-1))
y = ma.where(y == 0, 1, y)
Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1))
return SkewtestResult(*scipy.stats._stats_py._normtest_finish(Z, alternative))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, alternative='two-sided'):
"""
Tests whether a dataset has normal kurtosis
Parameters
----------
a : array_like
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the kurtosis of the distribution underlying the sample
is different from that of the normal distribution
* 'less': the kurtosis of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the kurtosis of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : array_like
The computed z-score for this test.
pvalue : array_like
The p-value for the hypothesis test
Notes
-----
For more details about `kurtosistest`, see `scipy.stats.kurtosistest`.
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
if np.min(n) < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % np.min(n))
if np.min(n) < 20:
warnings.warn(
"kurtosistest only valid for n>=20 ... continuing anyway, n=%i" %
np.min(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E)/ma.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2./(9.0*A)
denom = 1 + x*ma.sqrt(2/(A-4.0))
if np.ma.isMaskedArray(denom):
# For multi-dimensional array input
denom[denom == 0.0] = masked
elif denom == 0.0:
denom = masked
term2 = np.ma.where(denom > 0, ma.power((1-2.0/A)/denom, 1/3.0),
-ma.power(-(1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
return KurtosistestResult(
*scipy.stats._stats_py._normtest_finish(Z, alternative)
)
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0):
"""
Tests whether a sample differs from a normal distribution.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
Notes
-----
For more details about `normaltest`, see `scipy.stats.normaltest`.
"""
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
limit=()):
"""
Computes empirical quantiles for a data array.
Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``,
where ``x[j]`` is the j-th order statistic, and gamma is a function of
``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and
``g = n*p + m - j``.
Reinterpreting the above equations to compare to **R** lead to the
equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)``
Typical values of (alphap,betap) are:
- (0,1) : ``p(k) = k/n`` : linear interpolation of cdf
(**R** type 4)
- (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function
(**R** type 5)
- (0,0) : ``p(k) = k/(n+1)`` :
(**R** type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])].
(**R** type 7, **R** default)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x.
(**R** type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed
(**R** type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
a : array_like
Input data, as a sequence or array of dimension at most 2.
prob : array_like, optional
List of quantiles to compute.
alphap : float, optional
Plotting positions parameter, default is 0.4.
betap : float, optional
Plotting positions parameter, default is 0.4.
axis : int, optional
Axis along which to perform the trimming.
If None (default), the input array is first flattened.
limit : tuple, optional
Tuple of (lower, upper) values.
Values of `a` outside this open interval are ignored.
Returns
-------
mquantiles : MaskedArray
An array containing the calculated quantiles.
Notes
-----
This formulation is very similar to **R** except the calculation of
``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined
with each type.
References
----------
.. [1] *R* statistical software: https://www.r-project.org/
.. [2] *R* ``quantile`` function:
http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
Examples
--------
>>> import numpy as np
>>> from scipy.stats.mstats import mquantiles
>>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
>>> mquantiles(a)
array([ 19.2, 40. , 42.8])
Using a 2D array, specifying axis and limit.
>>> data = np.array([[ 6., 7., 1.],
... [ 47., 15., 2.],
... [ 49., 36., 3.],
... [ 15., 39., 4.],
... [ 42., 40., -999.],
... [ 41., 41., -999.],
... [ 7., -999., -999.],
... [ 39., -999., -999.],
... [ 43., -999., -999.],
... [ 40., -999., -999.],
... [ 36., -999., -999.]])
>>> print(mquantiles(data, axis=0, limit=(0, 50)))
[[19.2 14.6 1.45]
[40. 37.5 2.5 ]
[42.8 40.05 3.55]]
>>> data[:, 2] = -999.
>>> print(mquantiles(data, axis=0, limit=(0, 50)))
[[19.200000000000003 14.6 --]
[40.0 37.5 --]
[42.800000000000004 40.05 --]]
"""
def _quantiles1D(data,m,p):
x = np.sort(data.compressed())
n = len(x)
if n == 0:
return ma.array(np.empty(len(p), dtype=float), mask=True)
elif n == 1:
return ma.array(np.resize(x, p.shape), mask=nomask)
aleph = (n*p + m)
k = np.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
data = ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
if limit:
condition = (limit[0] < data) & (data < limit[1])
data[~condition.filled(True)] = masked
p = np.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
if (per < 0) or (per > 100.):
raise ValueError("The percentile should be between 0. and 100. !"
" (got %s)" % per)
return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=0).squeeze()
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- `alpha` and `beta` are two parameters.
Typical values for `alpha` and `beta` are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort(axis=None)[:n]] = ((np.arange(1, n+1) - alpha) /
(n + 1.0 - alpha - beta))
return ma.array(plpos, mask=data._mask)
meppf = plotting_positions
def obrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in ``*args`` is one level of a factor. If an `f_oneway()` run on
the transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Returns: transformed data for use in an ANOVA
"""
data = argstoarray(*args).T
v = data.var(axis=0,ddof=1)
m = data.mean(0)
n = data.count(0).astype(float)
# result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2))
data -= m
data **= 2
data *= (n-1.5)*n
data -= 0.5*v*(n-1)
data /= (n-1.)*(n-2.)
if not ma.allclose(v,data.mean(0)):
raise ValueError("Lack of convergence in obrientransform.")
return data
def sem(a, axis=0, ddof=1):
"""
Calculates the standard error of the mean of the input array.
Also sometimes called standard error of measurement.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
If axis is None, ravel `a` first. If axis is an integer, this will be
the axis over which to operate. Defaults to 0.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` changed in scipy 0.15.0 to be consistent with
`scipy.stats.sem` as well as with the most common definition used (like in
the R documentation).
Examples
--------
Find standard error along the first axis:
>>> import numpy as np
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> print(stats.mstats.sem(a))
[2.8284271247461903 2.8284271247461903 2.8284271247461903
2.8284271247461903]
Find standard error across the whole array, using n degrees of freedom:
>>> print(stats.mstats.sem(a, axis=None, ddof=0))
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n)
return s
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays,
one per treatment group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
"""
# Construct a single array of arguments: each row is a group
data = argstoarray(*args)
ngroups = len(data)
ntot = data.count()
sstot = (data**2).sum() - (data.sum())**2/float(ntot)
ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum()
sswg = sstot-ssbg
dfbg = ngroups-1
dfwg = ntot - ngroups
msb = ssbg/float(dfbg)
msw = sswg/float(dfwg)
f = msb/msw
prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
This function calculates the Friedman Chi-square test for repeated measures
and returns the result, along with the associated probability value.
Each input is considered a given group. Ideally, the number of treatments
among each group should be equal. If this is not the case, only the first
n treatments are taken into account, where n is the number of treatments
of the smallest group.
If a group has some missing values, the corresponding treatments are masked
in the other groups.
The test statistic is corrected for ties.
Masked values in one group are propagated to the other groups.
Returns
-------
statistic : float
the test statistic.
pvalue : float
the associated p-value.
"""
data = argstoarray(*args).astype(float)
k = len(data)
if k < 3:
raise ValueError("Less than 3 groups (%i): " % k +
"the Friedman test is NOT appropriate.")
ranked = ma.masked_values(rankdata(data, axis=0), 0)
if ranked._mask is not nomask:
ranked = ma.mask_cols(ranked)
ranked = ranked.compressed().reshape(k,-1).view(ndarray)
else:
ranked = ranked._data
(k,n) = ranked.shape
# Ties correction
repeats = [find_repeats(row) for row in ranked.T]
ties = np.array([y for x, y in repeats if x.size > 0])
tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k))
ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2)
chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction
return FriedmanchisquareResult(chisq,
distributions.chi2.sf(chisq, k-1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult', ('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t"):
"""
Computes the Brunner-Munzel test on samples x and y
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : 'less', 'two-sided', or 'greater', optional
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults value is 'two-sided' .
distribution : 't' or 'normal', optional
Whether to get the p-value by t-distribution or by standard normal
distribution.
Defaults value is 't' .
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
For more details on `brunnermunzel`, see `scipy.stats.brunnermunzel`.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x,y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
| 117,894
| 32.44539
| 84
|
py
|
scipy
|
scipy-main/scipy/stats/_multicomp.py
|
from __future__ import annotations
import warnings
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
import numpy as np
from scipy import stats
from scipy.optimize import minimize_scalar
from scipy.stats._common import ConfidenceInterval
from scipy.stats._qmc import check_random_state
from scipy.stats._stats_py import _var
if TYPE_CHECKING:
import numpy.typing as npt
from scipy._lib._util import DecimalNumber, SeedType
from typing import Literal, Sequence # noqa: UP035
__all__ = [
'dunnett'
]
@dataclass
class DunnettResult:
"""Result object returned by `scipy.stats.dunnett`.
Attributes
----------
statistic : float ndarray
The computed statistic of the test for each comparison. The element
at index ``i`` is the statistic for the comparison between
groups ``i`` and the control.
pvalue : float ndarray
The computed p-value of the test for each comparison. The element
at index ``i`` is the p-value for the comparison between
group ``i`` and the control.
"""
statistic: np.ndarray
pvalue: np.ndarray
_alternative: Literal['two-sided', 'less', 'greater'] = field(repr=False)
_rho: np.ndarray = field(repr=False)
_df: int = field(repr=False)
_std: float = field(repr=False)
_mean_samples: np.ndarray = field(repr=False)
_mean_control: np.ndarray = field(repr=False)
_n_samples: np.ndarray = field(repr=False)
_n_control: int = field(repr=False)
_rng: SeedType = field(repr=False)
_ci: ConfidenceInterval | None = field(default=None, repr=False)
_ci_cl: DecimalNumber | None = field(default=None, repr=False)
def __str__(self):
# Note: `__str__` prints the confidence intervals from the most
# recent call to `confidence_interval`. If it has not been called,
# it will be called with the default CL of .95.
if self._ci is None:
self.confidence_interval(confidence_level=.95)
s = (
"Dunnett's test"
f" ({self._ci_cl*100:.1f}% Confidence Interval)\n"
"Comparison Statistic p-value Lower CI Upper CI\n"
)
for i in range(self.pvalue.size):
s += (f" (Sample {i} - Control) {self.statistic[i]:>10.3f}"
f"{self.pvalue[i]:>10.3f}"
f"{self._ci.low[i]:>10.3f}"
f"{self._ci.high[i]:>10.3f}\n")
return s
def _allowance(
self, confidence_level: DecimalNumber = 0.95, tol: DecimalNumber = 1e-3
) -> float:
"""Allowance.
It is the quantity to add/subtract from the observed difference
between the means of observed groups and the mean of the control
group. The result gives confidence limits.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval.
Default is .95.
tol : float, optional
A tolerance for numerical optimization: the allowance will produce
a confidence within ``10*tol*(1 - confidence_level)`` of the
specified level, or a warning will be emitted. Tight tolerances
may be impractical due to noisy evaluation of the objective.
Default is 1e-3.
Returns
-------
allowance : float
Allowance around the mean.
"""
alpha = 1 - confidence_level
def pvalue_from_stat(statistic):
statistic = np.array(statistic)
sf = _pvalue_dunnett(
rho=self._rho, df=self._df,
statistic=statistic, alternative=self._alternative,
rng=self._rng
)
return abs(sf - alpha)/alpha
# Evaluation of `pvalue_from_stat` is noisy due to the use of RQMC to
# evaluate `multivariate_t.cdf`. `minimize_scalar` is not designed
# to tolerate a noisy objective function and may fail to find the
# minimum accurately. We mitigate this possibility with the validation
# step below, but implementation of a noise-tolerant root finder or
# minimizer would be a welcome enhancement. See gh-18150.
res = minimize_scalar(pvalue_from_stat, method='brent', tol=tol)
critical_value = res.x
# validation
# tol*10 because tol=1e-3 means we tolerate a 1% change at most
if res.success is False or res.fun >= tol*10:
warnings.warn(
"Computation of the confidence interval did not converge to "
"the desired level. The confidence level corresponding with "
f"the returned interval is approximately {alpha*(1+res.fun)}.",
stacklevel=3
)
# From [1] p. 1101 between (1) and (3)
allowance = critical_value*self._std*np.sqrt(
1/self._n_samples + 1/self._n_control
)
return abs(allowance)
def confidence_interval(
self, confidence_level: DecimalNumber = 0.95
) -> ConfidenceInterval:
"""Compute the confidence interval for the specified confidence level.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval.
Default is .95.
Returns
-------
ci : ``ConfidenceInterval`` object
The object has attributes ``low`` and ``high`` that hold the
lower and upper bounds of the confidence intervals for each
comparison. The high and low values are accessible for each
comparison at index ``i`` for each group ``i``.
"""
# check to see if the supplied confidence level matches that of the
# previously computed CI.
if (self._ci is not None) and (confidence_level == self._ci_cl):
return self._ci
if not (0 < confidence_level < 1):
raise ValueError("Confidence level must be between 0 and 1.")
allowance = self._allowance(confidence_level=confidence_level)
diff_means = self._mean_samples - self._mean_control
low = diff_means-allowance
high = diff_means+allowance
if self._alternative == 'greater':
high = [np.inf] * len(diff_means)
elif self._alternative == 'less':
low = [-np.inf] * len(diff_means)
self._ci_cl = confidence_level
self._ci = ConfidenceInterval(
low=low,
high=high
)
return self._ci
def dunnett(
*samples: npt.ArrayLike, # noqa: D417
control: npt.ArrayLike,
alternative: Literal['two-sided', 'less', 'greater'] = "two-sided",
random_state: SeedType = None
) -> DunnettResult:
"""Dunnett's test: multiple comparisons of means against a control group.
This is an implementation of Dunnett's original, single-step test as
described in [1]_.
Parameters
----------
sample1, sample2, ... : 1D array_like
The sample measurements for each experimental group.
control : 1D array_like
The sample measurements for the control group.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The null hypothesis is that the means of the distributions underlying
the samples and control are equal. The following alternative
hypotheses are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
and control are unequal.
* 'less': the means of the distributions underlying the samples
are less than the mean of the distribution underlying the control.
* 'greater': the means of the distributions underlying the
samples are greater than the mean of the distribution underlying
the control.
random_state : {None, int, `numpy.random.Generator`}, optional
If `random_state` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(random_state)``.
If `random_state` is already a ``Generator`` instance, then the
provided instance is used.
The random number generator is used to control the randomized
Quasi-Monte Carlo integration of the multivariate-t distribution.
Returns
-------
res : `~scipy.stats._result_classes.DunnettResult`
An object containing attributes:
statistic : float ndarray
The computed statistic of the test for each comparison. The element
at index ``i`` is the statistic for the comparison between
groups ``i`` and the control.
pvalue : float ndarray
The computed p-value of the test for each comparison. The element
at index ``i`` is the p-value for the comparison between
group ``i`` and the control.
And the following method:
confidence_interval(confidence_level=0.95) :
Compute the difference in means of the groups
with the control +- the allowance.
See Also
--------
tukey_hsd : performs pairwise comparison of means.
Notes
-----
Like the independent-sample t-test, Dunnett's test [1]_ is used to make
inferences about the means of distributions from which samples were drawn.
However, when multiple t-tests are performed at a fixed significance level,
the "family-wise error rate" - the probability of incorrectly rejecting the
null hypothesis in at least one test - will exceed the significance level.
Dunnett's test is designed to perform multiple comparisons while
controlling the family-wise error rate.
Dunnett's test compares the means of multiple experimental groups
against a single control group. Tukey's Honestly Significant Difference Test
is another multiple-comparison test that controls the family-wise error
rate, but `tukey_hsd` performs *all* pairwise comparisons between groups.
When pairwise comparisons between experimental groups are not needed,
Dunnett's test is preferable due to its higher power.
The use of this test relies on several assumptions.
1. The observations are independent within and among groups.
2. The observations within each group are normally distributed.
3. The distributions from which the samples are drawn have the same finite
variance.
References
----------
.. [1] Charles W. Dunnett. "A Multiple Comparison Procedure for Comparing
Several Treatments with a Control."
Journal of the American Statistical Association, 50:272, 1096-1121,
:doi:`10.1080/01621459.1955.10501294`, 1955.
Examples
--------
In [1]_, the influence of drugs on blood count measurements on three groups
of animal is investigated.
The following table summarizes the results of the experiment in which
two groups received different drugs, and one group acted as a control.
Blood counts (in millions of cells per cubic millimeter) were recorded::
>>> import numpy as np
>>> control = np.array([7.40, 8.50, 7.20, 8.24, 9.84, 8.32])
>>> drug_a = np.array([9.76, 8.80, 7.68, 9.36])
>>> drug_b = np.array([12.80, 9.68, 12.16, 9.20, 10.55])
We would like to see if the means between any of the groups are
significantly different. First, visually examine a box and whisker plot.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.boxplot([control, drug_a, drug_b])
>>> ax.set_xticklabels(["Control", "Drug A", "Drug B"]) # doctest: +SKIP
>>> ax.set_ylabel("mean") # doctest: +SKIP
>>> plt.show()
Note the overlapping interquartile ranges of the drug A group and control
group and the apparent separation between the drug B group and control
group.
Next, we will use Dunnett's test to assess whether the difference
between group means is significant while controlling the family-wise error
rate: the probability of making any false discoveries.
Let the null hypothesis be that the experimental groups have the same
mean as the control and the alternative be that an experimental group does
not have the same mean as the control. We will consider a 5% family-wise
error rate to be acceptable, and therefore we choose 0.05 as the threshold
for significance.
>>> from scipy.stats import dunnett
>>> res = dunnett(drug_a, drug_b, control=control)
>>> res.pvalue
array([0.62004941, 0.0059035 ]) # may vary
The p-value corresponding with the comparison between group A and control
exceeds 0.05, so we do not reject the null hypothesis for that comparison.
However, the p-value corresponding with the comparison between group B
and control is less than 0.05, so we consider the experimental results
to be evidence against the null hypothesis in favor of the alternative:
group B has a different mean than the control group.
"""
samples_, control_, rng = _iv_dunnett(
samples=samples, control=control,
alternative=alternative, random_state=random_state
)
rho, df, n_group, n_samples, n_control = _params_dunnett(
samples=samples_, control=control_
)
statistic, std, mean_control, mean_samples = _statistic_dunnett(
samples_, control_, df, n_samples, n_control
)
pvalue = _pvalue_dunnett(
rho=rho, df=df, statistic=statistic, alternative=alternative, rng=rng
)
return DunnettResult(
statistic=statistic, pvalue=pvalue,
_alternative=alternative,
_rho=rho, _df=df, _std=std,
_mean_samples=mean_samples,
_mean_control=mean_control,
_n_samples=n_samples,
_n_control=n_control,
_rng=rng
)
def _iv_dunnett(
samples: Sequence[npt.ArrayLike],
control: npt.ArrayLike,
alternative: Literal['two-sided', 'less', 'greater'],
random_state: SeedType
) -> tuple[list[np.ndarray], np.ndarray, SeedType]:
"""Input validation for Dunnett's test."""
rng = check_random_state(random_state)
if alternative not in {'two-sided', 'less', 'greater'}:
raise ValueError(
"alternative must be 'less', 'greater' or 'two-sided'"
)
ndim_msg = "Control and samples groups must be 1D arrays"
n_obs_msg = "Control and samples groups must have at least 1 observation"
control = np.asarray(control)
samples_ = [np.asarray(sample) for sample in samples]
# samples checks
samples_control: list[np.ndarray] = samples_ + [control]
for sample in samples_control:
if sample.ndim > 1:
raise ValueError(ndim_msg)
if sample.size < 1:
raise ValueError(n_obs_msg)
return samples_, control, rng
def _params_dunnett(
samples: list[np.ndarray], control: np.ndarray
) -> tuple[np.ndarray, int, int, np.ndarray, int]:
"""Specific parameters for Dunnett's test.
Degree of freedom is the number of observations minus the number of groups
including the control.
"""
n_samples = np.array([sample.size for sample in samples])
# From [1] p. 1100 d.f. = (sum N)-(p+1)
n_sample = n_samples.sum()
n_control = control.size
n = n_sample + n_control
n_groups = len(samples)
df = n - n_groups - 1
# From [1] p. 1103 rho_ij = 1/sqrt((N0/Ni+1)(N0/Nj+1))
rho = n_control/n_samples + 1
rho = 1/np.sqrt(rho[:, None] * rho[None, :])
np.fill_diagonal(rho, 1)
return rho, df, n_groups, n_samples, n_control
def _statistic_dunnett(
samples: list[np.ndarray], control: np.ndarray, df: int,
n_samples: np.ndarray, n_control: int
) -> tuple[np.ndarray, float, np.ndarray, np.ndarray]:
"""Statistic of Dunnett's test.
Computation based on the original single-step test from [1].
"""
mean_control = np.mean(control)
mean_samples = np.array([np.mean(sample) for sample in samples])
all_samples = [control] + samples
all_means = np.concatenate([[mean_control], mean_samples])
# Variance estimate s^2 from [1] Eq. 1
s2 = np.sum([_var(sample, mean=mean)*sample.size
for sample, mean in zip(all_samples, all_means)]) / df
std = np.sqrt(s2)
# z score inferred from [1] unlabeled equation after Eq. 1
z = (mean_samples - mean_control) / np.sqrt(1/n_samples + 1/n_control)
return z / std, std, mean_control, mean_samples
def _pvalue_dunnett(
rho: np.ndarray, df: int, statistic: np.ndarray,
alternative: Literal['two-sided', 'less', 'greater'],
rng: SeedType = None
) -> np.ndarray:
"""pvalue from the multivariate t-distribution.
Critical values come from the multivariate student-t distribution.
"""
statistic = statistic.reshape(-1, 1)
mvt = stats.multivariate_t(shape=rho, df=df, seed=rng)
if alternative == "two-sided":
statistic = abs(statistic)
pvalue = 1 - mvt.cdf(statistic, lower_limit=-statistic)
elif alternative == "greater":
pvalue = 1 - mvt.cdf(statistic, lower_limit=-np.inf)
else:
pvalue = 1 - mvt.cdf(np.inf, lower_limit=statistic)
return np.atleast_1d(pvalue)
| 17,282
| 36.571739
| 80
|
py
|
scipy
|
scipy-main/scipy/stats/mstats_extras.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.stats` namespace for importing the functions
# included below.
import warnings
from . import _mstats_extras
__all__ = [ # noqa: F822
'compare_medians_ms',
'hdquantiles', 'hdmedian', 'hdquantiles_sd',
'idealfourths',
'median_cihs','mjci','mquantiles_cimj',
'rsh',
'trimmed_mean_ci', 'float_', 'int_', 'ma', 'MaskedArray', 'mstats',
'norm', 'beta', 't', 'binom'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.stats.mstats_extras is deprecated and has no attribute "
f"{name}. Try looking in scipy.stats instead.")
warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
"the `scipy.stats.mstats_extras` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mstats_extras, name)
| 1,001
| 27.628571
| 77
|
py
|
scipy
|
scipy-main/scipy/stats/_relative_risk.py
|
import operator
from dataclasses import dataclass
import numpy as np
from scipy.special import ndtri
from ._common import ConfidenceInterval
def _validate_int(n, bound, name):
msg = f'{name} must be an integer not less than {bound}, but got {n!r}'
try:
n = operator.index(n)
except TypeError:
raise TypeError(msg) from None
if n < bound:
raise ValueError(msg)
return n
@dataclass
class RelativeRiskResult:
"""
Result of `scipy.stats.contingency.relative_risk`.
Attributes
----------
relative_risk : float
This is::
(exposed_cases/exposed_total) / (control_cases/control_total)
exposed_cases : int
The number of "cases" (i.e. occurrence of disease or other event
of interest) among the sample of "exposed" individuals.
exposed_total : int
The total number of "exposed" individuals in the sample.
control_cases : int
The number of "cases" among the sample of "control" or non-exposed
individuals.
control_total : int
The total number of "control" individuals in the sample.
Methods
-------
confidence_interval :
Compute the confidence interval for the relative risk estimate.
"""
relative_risk: float
exposed_cases: int
exposed_total: int
control_cases: int
control_total: int
def confidence_interval(self, confidence_level=0.95):
"""
Compute the confidence interval for the relative risk.
The confidence interval is computed using the Katz method
(i.e. "Method C" of [1]_; see also [2]_, section 3.1.2).
Parameters
----------
confidence_level : float, optional
The confidence level to use for the confidence interval.
Default is 0.95.
Returns
-------
ci : ConfidenceInterval instance
The return value is an object with attributes ``low`` and
``high`` that hold the confidence interval.
References
----------
.. [1] D. Katz, J. Baptista, S. P. Azen and M. C. Pike, "Obtaining
confidence intervals for the risk ratio in cohort studies",
Biometrics, 34, 469-474 (1978).
.. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
CRC Press LLC, Boca Raton, FL, USA (1996).
Examples
--------
>>> from scipy.stats.contingency import relative_risk
>>> result = relative_risk(exposed_cases=10, exposed_total=75,
... control_cases=12, control_total=225)
>>> result.relative_risk
2.5
>>> result.confidence_interval()
ConfidenceInterval(low=1.1261564003469628, high=5.549850800541033)
"""
if not 0 <= confidence_level <= 1:
raise ValueError('confidence_level must be in the interval '
'[0, 1].')
# Handle edge cases where either exposed_cases or control_cases
# is zero. We follow the convention of the R function riskratio
# from the epitools library.
if self.exposed_cases == 0 and self.control_cases == 0:
# relative risk is nan.
return ConfidenceInterval(low=np.nan, high=np.nan)
elif self.exposed_cases == 0:
# relative risk is 0.
return ConfidenceInterval(low=0.0, high=np.nan)
elif self.control_cases == 0:
# relative risk is inf
return ConfidenceInterval(low=np.nan, high=np.inf)
alpha = 1 - confidence_level
z = ndtri(1 - alpha/2)
rr = self.relative_risk
# Estimate of the variance of log(rr) is
# var(log(rr)) = 1/exposed_cases - 1/exposed_total +
# 1/control_cases - 1/control_total
# and the standard error is the square root of that.
se = np.sqrt(1/self.exposed_cases - 1/self.exposed_total +
1/self.control_cases - 1/self.control_total)
delta = z*se
katz_lo = rr*np.exp(-delta)
katz_hi = rr*np.exp(delta)
return ConfidenceInterval(low=katz_lo, high=katz_hi)
def relative_risk(exposed_cases, exposed_total, control_cases, control_total):
"""
Compute the relative risk (also known as the risk ratio).
This function computes the relative risk associated with a 2x2
contingency table ([1]_, section 2.2.3; [2]_, section 3.1.2). Instead
of accepting a table as an argument, the individual numbers that are
used to compute the relative risk are given as separate parameters.
This is to avoid the ambiguity of which row or column of the contingency
table corresponds to the "exposed" cases and which corresponds to the
"control" cases. Unlike, say, the odds ratio, the relative risk is not
invariant under an interchange of the rows or columns.
Parameters
----------
exposed_cases : nonnegative int
The number of "cases" (i.e. occurrence of disease or other event
of interest) among the sample of "exposed" individuals.
exposed_total : positive int
The total number of "exposed" individuals in the sample.
control_cases : nonnegative int
The number of "cases" among the sample of "control" or non-exposed
individuals.
control_total : positive int
The total number of "control" individuals in the sample.
Returns
-------
result : instance of `~scipy.stats._result_classes.RelativeRiskResult`
The object has the float attribute ``relative_risk``, which is::
rr = (exposed_cases/exposed_total) / (control_cases/control_total)
The object also has the method ``confidence_interval`` to compute
the confidence interval of the relative risk for a given confidence
level.
See Also
--------
odds_ratio
Notes
-----
The R package epitools has the function `riskratio`, which accepts
a table with the following layout::
disease=0 disease=1
exposed=0 (ref) n00 n01
exposed=1 n10 n11
With a 2x2 table in the above format, the estimate of the CI is
computed by `riskratio` when the argument method="wald" is given,
or with the function `riskratio.wald`.
For example, in a test of the incidence of lung cancer among a
sample of smokers and nonsmokers, the "exposed" category would
correspond to "is a smoker" and the "disease" category would
correspond to "has or had lung cancer".
To pass the same data to ``relative_risk``, use::
relative_risk(n11, n10 + n11, n01, n00 + n01)
.. versionadded:: 1.7.0
References
----------
.. [1] Alan Agresti, An Introduction to Categorical Data Analysis
(second edition), Wiley, Hoboken, NJ, USA (2007).
.. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
CRC Press LLC, Boca Raton, FL, USA (1996).
Examples
--------
>>> from scipy.stats.contingency import relative_risk
This example is from Example 3.1 of [2]_. The results of a heart
disease study are summarized in the following table::
High CAT Low CAT Total
-------- ------- -----
CHD 27 44 71
No CHD 95 443 538
Total 122 487 609
CHD is coronary heart disease, and CAT refers to the level of
circulating catecholamine. CAT is the "exposure" variable, and
high CAT is the "exposed" category. So the data from the table
to be passed to ``relative_risk`` is::
exposed_cases = 27
exposed_total = 122
control_cases = 44
control_total = 487
>>> result = relative_risk(27, 122, 44, 487)
>>> result.relative_risk
2.4495156482861398
Find the confidence interval for the relative risk.
>>> result.confidence_interval(confidence_level=0.95)
ConfidenceInterval(low=1.5836990926700116, high=3.7886786315466354)
The interval does not contain 1, so the data supports the statement
that high CAT is associated with greater risk of CHD.
"""
# Relative risk is a trivial calculation. The nontrivial part is in the
# `confidence_interval` method of the RelativeRiskResult class.
exposed_cases = _validate_int(exposed_cases, 0, "exposed_cases")
exposed_total = _validate_int(exposed_total, 1, "exposed_total")
control_cases = _validate_int(control_cases, 0, "control_cases")
control_total = _validate_int(control_total, 1, "control_total")
if exposed_cases > exposed_total:
raise ValueError('exposed_cases must not exceed exposed_total.')
if control_cases > control_total:
raise ValueError('control_cases must not exceed control_total.')
if exposed_cases == 0 and control_cases == 0:
# relative risk is 0/0.
rr = np.nan
elif exposed_cases == 0:
# relative risk is 0/nonzero
rr = 0.0
elif control_cases == 0:
# relative risk is nonzero/0.
rr = np.inf
else:
p1 = exposed_cases / exposed_total
p2 = control_cases / control_total
rr = p1 / p2
return RelativeRiskResult(relative_risk=rr,
exposed_cases=exposed_cases,
exposed_total=exposed_total,
control_cases=control_cases,
control_total=control_total)
| 9,571
| 35.257576
| 78
|
py
|
scipy
|
scipy-main/scipy/stats/sampling.py
|
"""
======================================================
Random Number Generators (:mod:`scipy.stats.sampling`)
======================================================
.. currentmodule:: scipy.stats.sampling
This module contains a collection of random number generators to sample
from univariate continuous and discrete distributions. It uses the
implementation of a C library called "UNU.RAN". The only exception is
RatioUniforms, which is a pure Python implementation of the
Ratio-of-Uniforms method.
Generators Wrapped
==================
For continuous distributions
----------------------------
.. autosummary::
:toctree: generated/
NumericalInverseHermite
NumericalInversePolynomial
TransformedDensityRejection
SimpleRatioUniforms
RatioUniforms
For discrete distributions
--------------------------
.. autosummary::
:toctree: generated/
DiscreteAliasUrn
DiscreteGuideTable
Warnings / Errors used in :mod:`scipy.stats.sampling`
-----------------------------------------------------
.. autosummary::
:toctree: generated/
UNURANError
"""
from ._sampling import RatioUniforms # noqa: F401
from ._unuran.unuran_wrapper import ( # noqa: F401
TransformedDensityRejection,
DiscreteAliasUrn,
DiscreteGuideTable,
NumericalInversePolynomial,
NumericalInverseHermite,
SimpleRatioUniforms,
UNURANError
)
| 1,372
| 23.517857
| 71
|
py
|
scipy
|
scipy-main/scipy/stats/_crosstab.py
|
import numpy as np
from scipy.sparse import coo_matrix
from scipy._lib._bunch import _make_tuple_bunch
CrosstabResult = _make_tuple_bunch(
"CrosstabResult", ["elements", "count"]
)
def crosstab(*args, levels=None, sparse=False):
"""
Return table of counts for each possible unique combination in ``*args``.
When ``len(args) > 1``, the array computed by this function is
often referred to as a *contingency table* [1]_.
The arguments must be sequences with the same length. The second return
value, `count`, is an integer array with ``len(args)`` dimensions. If
`levels` is None, the shape of `count` is ``(n0, n1, ...)``, where ``nk``
is the number of unique elements in ``args[k]``.
Parameters
----------
*args : sequences
A sequence of sequences whose unique aligned elements are to be
counted. The sequences in args must all be the same length.
levels : sequence, optional
If `levels` is given, it must be a sequence that is the same length as
`args`. Each element in `levels` is either a sequence or None. If it
is a sequence, it gives the values in the corresponding sequence in
`args` that are to be counted. If any value in the sequences in `args`
does not occur in the corresponding sequence in `levels`, that value
is ignored and not counted in the returned array `count`. The default
value of `levels` for ``args[i]`` is ``np.unique(args[i])``
sparse : bool, optional
If True, return a sparse matrix. The matrix will be an instance of
the `scipy.sparse.coo_matrix` class. Because SciPy's sparse matrices
must be 2-d, only two input sequences are allowed when `sparse` is
True. Default is False.
Returns
-------
res : CrosstabResult
An object containing the following attributes:
elements : tuple of numpy.ndarrays.
Tuple of length ``len(args)`` containing the arrays of elements
that are counted in `count`. These can be interpreted as the
labels of the corresponding dimensions of `count`. If `levels` was
given, then if ``levels[i]`` is not None, ``elements[i]`` will
hold the values given in ``levels[i]``.
count : numpy.ndarray or scipy.sparse.coo_matrix
Counts of the unique elements in ``zip(*args)``, stored in an
array. Also known as a *contingency table* when ``len(args) > 1``.
See Also
--------
numpy.unique
Notes
-----
.. versionadded:: 1.7.0
References
----------
.. [1] "Contingency table", http://en.wikipedia.org/wiki/Contingency_table
Examples
--------
>>> from scipy.stats.contingency import crosstab
Given the lists `a` and `x`, create a contingency table that counts the
frequencies of the corresponding pairs.
>>> a = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']
>>> x = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']
>>> res = crosstab(a, x)
>>> avals, xvals = res.elements
>>> avals
array(['A', 'B'], dtype='<U1')
>>> xvals
array(['X', 'Y', 'Z'], dtype='<U1')
>>> res.count
array([[2, 3, 0],
[1, 0, 4]])
So `('A', 'X')` occurs twice, `('A', 'Y')` occurs three times, etc.
Higher dimensional contingency tables can be created.
>>> p = [0, 0, 0, 0, 1, 1, 1, 0, 0, 1]
>>> res = crosstab(a, x, p)
>>> res.count
array([[[2, 0],
[2, 1],
[0, 0]],
[[1, 0],
[0, 0],
[1, 3]]])
>>> res.count.shape
(2, 3, 2)
The values to be counted can be set by using the `levels` argument.
It allows the elements of interest in each input sequence to be
given explicitly instead finding the unique elements of the sequence.
For example, suppose one of the arguments is an array containing the
answers to a survey question, with integer values 1 to 4. Even if the
value 1 does not occur in the data, we want an entry for it in the table.
>>> q1 = [2, 3, 3, 2, 4, 4, 2, 3, 4, 4, 4, 3, 3, 3, 4] # 1 does not occur.
>>> q2 = [4, 4, 2, 2, 2, 4, 1, 1, 2, 2, 4, 2, 2, 2, 4] # 3 does not occur.
>>> options = [1, 2, 3, 4]
>>> res = crosstab(q1, q2, levels=(options, options))
>>> res.count
array([[0, 0, 0, 0],
[1, 1, 0, 1],
[1, 4, 0, 1],
[0, 3, 0, 3]])
If `levels` is given, but an element of `levels` is None, the unique values
of the corresponding argument are used. For example,
>>> res = crosstab(q1, q2, levels=(None, options))
>>> res.elements
[array([2, 3, 4]), [1, 2, 3, 4]]
>>> res.count
array([[1, 1, 0, 1],
[1, 4, 0, 1],
[0, 3, 0, 3]])
If we want to ignore the pairs where 4 occurs in ``q2``, we can
give just the values [1, 2] to `levels`, and the 4 will be ignored:
>>> res = crosstab(q1, q2, levels=(None, [1, 2]))
>>> res.elements
[array([2, 3, 4]), [1, 2]]
>>> res.count
array([[1, 1],
[1, 4],
[0, 3]])
Finally, let's repeat the first example, but return a sparse matrix:
>>> res = crosstab(a, x, sparse=True)
>>> res.count
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 4 stored elements in COOrdinate format>
>>> res.count.A
array([[2, 3, 0],
[1, 0, 4]])
"""
nargs = len(args)
if nargs == 0:
raise TypeError("At least one input sequence is required.")
len0 = len(args[0])
if not all(len(a) == len0 for a in args[1:]):
raise ValueError("All input sequences must have the same length.")
if sparse and nargs != 2:
raise ValueError("When `sparse` is True, only two input sequences "
"are allowed.")
if levels is None:
# Call np.unique with return_inverse=True on each argument.
actual_levels, indices = zip(*[np.unique(a, return_inverse=True)
for a in args])
else:
# `levels` is not None...
if len(levels) != nargs:
raise ValueError('len(levels) must equal the number of input '
'sequences')
args = [np.asarray(arg) for arg in args]
mask = np.zeros((nargs, len0), dtype=np.bool_)
inv = np.zeros((nargs, len0), dtype=np.intp)
actual_levels = []
for k, (levels_list, arg) in enumerate(zip(levels, args)):
if levels_list is None:
levels_list, inv[k, :] = np.unique(arg, return_inverse=True)
mask[k, :] = True
else:
q = arg == np.asarray(levels_list).reshape(-1, 1)
mask[k, :] = np.any(q, axis=0)
qnz = q.T.nonzero()
inv[k, qnz[0]] = qnz[1]
actual_levels.append(levels_list)
mask_all = mask.all(axis=0)
indices = tuple(inv[:, mask_all])
if sparse:
count = coo_matrix((np.ones(len(indices[0]), dtype=int),
(indices[0], indices[1])))
count.sum_duplicates()
else:
shape = [len(u) for u in actual_levels]
count = np.zeros(shape, dtype=int)
np.add.at(count, indices, 1)
return CrosstabResult(actual_levels, count)
| 7,355
| 34.882927
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/mstats.py
|
"""
===================================================================
Statistical functions for masked arrays (:mod:`scipy.stats.mstats`)
===================================================================
.. currentmodule:: scipy.stats.mstats
This module contains a large number of statistical functions that can
be used with masked arrays.
Most of these functions are similar to those in `scipy.stats` but might
have small differences in the API or in the algorithm used. Since this
is a relatively new package, some API changes are still possible.
Summary statistics
==================
.. autosummary::
:toctree: generated/
describe
gmean
hmean
kurtosis
mode
mquantiles
hdmedian
hdquantiles
hdquantiles_sd
idealfourths
plotting_positions
meppf
moment
skew
tmean
tvar
tmin
tmax
tsem
variation
find_repeats
sem
trimmed_mean
trimmed_mean_ci
trimmed_std
trimmed_var
Frequency statistics
====================
.. autosummary::
:toctree: generated/
scoreatpercentile
Correlation functions
=====================
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
kendalltau_seasonal
linregress
siegelslopes
theilslopes
sen_seasonal_slopes
Statistical tests
=================
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_onesamp
ttest_ind
ttest_rel
chisquare
kstest
ks_2samp
ks_1samp
ks_twosamp
mannwhitneyu
rankdata
kruskal
kruskalwallis
friedmanchisquare
brunnermunzel
skewtest
kurtosistest
normaltest
Transformations
===============
.. autosummary::
:toctree: generated/
obrientransform
trim
trima
trimmed_stde
trimr
trimtail
trimboth
winsorize
zmap
zscore
Other
=====
.. autosummary::
:toctree: generated/
argstoarray
count_tied_groups
msign
compare_medians_ms
median_cihs
mjci
mquantiles_cimj
rsh
"""
from ._mstats_basic import *
from ._mstats_extras import *
# Functions that support masked array input in stats but need to be kept in the
# mstats namespace for backwards compatibility:
from scipy.stats import gmean, hmean, zmap, zscore, chisquare
| 2,262
| 15.639706
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/morestats.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.stats` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene',
'fligner', 'mood', 'wilcoxon', 'median_test',
'circmean', 'circvar', 'circstd', 'anderson_ksamp',
'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',
'yeojohnson_normplot', 'annotations', 'namedtuple', 'isscalar', 'log',
'around', 'unique', 'arange', 'sort', 'amin', 'amax', 'atleast_1d',
'array', 'compress', 'exp', 'ravel', 'count_nonzero', 'arctan2',
'hypot', 'optimize', 'find_repeats',
'chi2_contingency', 'distributions', 'rv_generic', 'Mean',
'Variance', 'Std_dev', 'ShapiroResult', 'AndersonResult',
'Anderson_ksampResult', 'AnsariResult', 'BartlettResult',
'LeveneResult', 'FlignerResult', 'WilcoxonResult'
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="stats", module="morestats",
private_module="_morestats", all=__all__,
attribute=name)
| 1,388
| 38.685714
| 76
|
py
|
scipy
|
scipy-main/scipy/stats/_result_classes.py
|
# This module exists only to allow Sphinx to generate docs
# for the result objects returned by some functions in stats
# _without_ adding them to the main stats documentation page.
"""
Result classes
--------------
.. currentmodule:: scipy.stats._result_classes
.. autosummary::
:toctree: generated/
RelativeRiskResult
BinomTestResult
TukeyHSDResult
DunnettResult
PearsonRResult
FitResult
OddsRatioResult
TtestResult
ECDFResult
EmpiricalDistributionFunction
"""
__all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult',
'PearsonRResult', 'FitResult', 'OddsRatioResult',
'TtestResult', 'DunnettResult', 'ECDFResult',
'EmpiricalDistributionFunction']
from ._binomtest import BinomTestResult
from ._odds_ratio import OddsRatioResult
from ._relative_risk import RelativeRiskResult
from ._hypotests import TukeyHSDResult
from ._multicomp import DunnettResult
from ._stats_py import PearsonRResult, TtestResult
from ._fit import FitResult
from ._survival import ECDFResult, EmpiricalDistributionFunction
| 1,085
| 25.487805
| 69
|
py
|
scipy
|
scipy-main/scipy/stats/_sensitivity_analysis.py
|
from __future__ import annotations
import inspect
from dataclasses import dataclass
from typing import (
Callable, Literal, Protocol, TYPE_CHECKING
)
import numpy as np
from scipy.stats._common import ConfidenceInterval
from scipy.stats._qmc import check_random_state
from scipy.stats._resampling import BootstrapResult
from scipy.stats import qmc, bootstrap
if TYPE_CHECKING:
import numpy.typing as npt
from scipy._lib._util import DecimalNumber, IntNumber, SeedType
__all__ = [
'sobol_indices'
]
def f_ishigami(x: npt.ArrayLike) -> np.ndarray:
r"""Ishigami function.
.. math::
Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1
with :math:`\mathbf{x} \in [-\pi, \pi]^3`.
Parameters
----------
x : array_like ([x1, x2, x3], n)
Returns
-------
f : array_like (n,)
Function evaluation.
References
----------
.. [1] Ishigami, T. and T. Homma. "An importance quantification technique
in uncertainty analysis for computer models." IEEE,
:doi:`10.1109/ISUMA.1990.151285`, 1990.
"""
x = np.atleast_2d(x)
f_eval = (
np.sin(x[0])
+ 7 * np.sin(x[1])**2
+ 0.1 * (x[2]**4) * np.sin(x[0])
)
return f_eval
def sample_A_B(
n: IntNumber,
dists: list[PPFDist],
random_state: SeedType = None
) -> np.ndarray:
"""Sample two matrices A and B.
Uses a Sobol' sequence with 2`d` columns to have 2 uncorrelated matrices.
This is more efficient than using 2 random draw of Sobol'.
See sec. 5 from [1]_.
Output shape is (d, n).
References
----------
.. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola. "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
:doi:`10.1016/j.cpc.2009.09.018`, 2010.
"""
d = len(dists)
A_B = qmc.Sobol(d=2*d, seed=random_state, bits=64).random(n).T
A_B = A_B.reshape(2, d, -1)
try:
for d_, dist in enumerate(dists):
A_B[:, d_] = dist.ppf(A_B[:, d_])
except AttributeError as exc:
message = "Each distribution in `dists` must have method `ppf`."
raise ValueError(message) from exc
return A_B
def sample_AB(A: np.ndarray, B: np.ndarray) -> np.ndarray:
"""AB matrix.
AB: rows of B into A. Shape (d, d, n).
- Copy A into d "pages"
- In the first page, replace 1st rows of A with 1st row of B.
...
- In the dth page, replace dth row of A with dth row of B.
- return the stack of pages
"""
d, n = A.shape
AB = np.tile(A, (d, 1, 1))
i = np.arange(d)
AB[i, i] = B[i]
return AB
def saltelli_2010(
f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray
) -> tuple[np.ndarray, np.ndarray]:
r"""Saltelli2010 formulation.
.. math::
S_i = \frac{1}{N} \sum_{j=1}^N
f(\mathbf{B})_j (f(\mathbf{AB}^{(i)})_j - f(\mathbf{A})_j)
.. math::
S_{T_i} = \frac{1}{N} \sum_{j=1}^N
(f(\mathbf{A})_j - f(\mathbf{AB}^{(i)})_j)^2
Parameters
----------
f_A, f_B : array_like (s, n)
Function values at A and B, respectively
f_AB : array_like (d, s, n)
Function values at each of the AB pages
Returns
-------
s, st : array_like (s, d)
First order and total order Sobol' indices.
References
----------
.. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola. "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
:doi:`10.1016/j.cpc.2009.09.018`, 2010.
"""
# Empirical variance calculated using output from A and B which are
# independent. Output of AB is not independent and cannot be used
var = np.var([f_A, f_B], axis=(0, -1))
# We divide by the variance to have a ratio of variance
# this leads to eq. 2
s = np.mean(f_B * (f_AB - f_A), axis=-1) / var # Table 2 (b)
st = 0.5 * np.mean((f_A - f_AB) ** 2, axis=-1) / var # Table 2 (f)
return s.T, st.T
@dataclass
class BootstrapSobolResult:
first_order: BootstrapResult
total_order: BootstrapResult
@dataclass
class SobolResult:
first_order: np.ndarray
total_order: np.ndarray
_indices_method: Callable
_f_A: np.ndarray
_f_B: np.ndarray
_f_AB: np.ndarray
_A: np.ndarray | None = None
_B: np.ndarray | None = None
_AB: np.ndarray | None = None
_bootstrap_result: BootstrapResult | None = None
def bootstrap(
self,
confidence_level: DecimalNumber = 0.95,
n_resamples: IntNumber = 999
) -> BootstrapSobolResult:
"""Bootstrap Sobol' indices to provide confidence intervals.
Parameters
----------
confidence_level : float, default: ``0.95``
The confidence level of the confidence intervals.
n_resamples : int, default: ``999``
The number of resamples performed to form the bootstrap
distribution of the indices.
Returns
-------
res : BootstrapSobolResult
Bootstrap result containing the confidence intervals and the
bootstrap distribution of the indices.
An object with attributes:
first_order : BootstrapResult
Bootstrap result of the first order indices.
total_order : BootstrapResult
Bootstrap result of the total order indices.
See `BootstrapResult` for more details.
"""
def statistic(idx):
f_A_ = self._f_A[:, idx]
f_B_ = self._f_B[:, idx]
f_AB_ = self._f_AB[..., idx]
return self._indices_method(f_A_, f_B_, f_AB_)
n = self._f_A.shape[1]
res = bootstrap(
[np.arange(n)], statistic=statistic, method="BCa",
n_resamples=n_resamples,
confidence_level=confidence_level,
bootstrap_result=self._bootstrap_result
)
self._bootstrap_result = res
first_order = BootstrapResult(
confidence_interval=ConfidenceInterval(
res.confidence_interval.low[0], res.confidence_interval.high[0]
),
bootstrap_distribution=res.bootstrap_distribution[0],
standard_error=res.standard_error[0],
)
total_order = BootstrapResult(
confidence_interval=ConfidenceInterval(
res.confidence_interval.low[1], res.confidence_interval.high[1]
),
bootstrap_distribution=res.bootstrap_distribution[1],
standard_error=res.standard_error[1],
)
return BootstrapSobolResult(
first_order=first_order, total_order=total_order
)
class PPFDist(Protocol):
@property
def ppf(self) -> Callable[..., float]:
...
def sobol_indices(
*,
func: Callable[[np.ndarray], npt.ArrayLike] |
dict[Literal['f_A', 'f_B', 'f_AB'], np.ndarray], # noqa
n: IntNumber,
dists: list[PPFDist] | None = None,
method: Callable | Literal['saltelli_2010'] = 'saltelli_2010',
random_state: SeedType = None
) -> SobolResult:
r"""Global sensitivity indices of Sobol'.
Parameters
----------
func : callable or dict(str, array_like)
If `func` is a callable, function to compute the Sobol' indices from.
Its signature must be::
func(x: ArrayLike) -> ArrayLike
with ``x`` of shape ``(d, n)`` and output of shape ``(s, n)`` where:
- ``d`` is the input dimensionality of `func`
(number of input variables),
- ``s`` is the output dimensionality of `func`
(number of output variables), and
- ``n`` is the number of samples (see `n` below).
Function evaluation values must be finite.
If `func` is a dictionary, contains the function evaluations from three
different arrays. Keys must be: ``f_A``, ``f_B`` and ``f_AB``.
``f_A`` and ``f_B`` should have a shape ``(s, n)`` and ``f_AB``
should have a shape ``(d, s, n)``.
This is an advanced feature and misuse can lead to wrong analysis.
n : int
Number of samples used to generate the matrices ``A`` and ``B``.
Must be a power of 2. The total number of points at which `func` is
evaluated will be ``n*(d+2)``.
dists : list(distributions), optional
List of each parameter's distribution. The distribution of parameters
depends on the application and should be carefully chosen.
Parameters are assumed to be independently distributed, meaning there
is no constraint nor relationship between their values.
Distributions must be an instance of a class with a ``ppf``
method.
Must be specified if `func` is a callable, and ignored otherwise.
method : Callable or str, default: 'saltelli_2010'
Method used to compute the first and total Sobol' indices.
If a callable, its signature must be::
func(f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray)
-> Tuple[np.ndarray, np.ndarray]
with ``f_A, f_B`` of shape ``(s, n)`` and ``f_AB`` of shape
``(d, s, n)``.
These arrays contain the function evaluations from three different sets
of samples.
The output is a tuple of the first and total indices with
shape ``(s, d)``.
This is an advanced feature and misuse can lead to wrong analysis.
random_state : {None, int, `numpy.random.Generator`}, optional
If `random_state` is an int or None, a new `numpy.random.Generator` is
created using ``np.random.default_rng(random_state)``.
If `random_state` is already a ``Generator`` instance, then the
provided instance is used.
Returns
-------
res : SobolResult
An object with attributes:
first_order : ndarray of shape (s, d)
First order Sobol' indices.
total_order : ndarray of shape (s, d)
Total order Sobol' indices.
And method:
bootstrap(confidence_level: float, n_resamples: int)
-> BootstrapSobolResult
A method providing confidence intervals on the indices.
See `scipy.stats.bootstrap` for more details.
The bootstrapping is done on both first and total order indices,
and they are available in `BootstrapSobolResult` as attributes
``first_order`` and ``total_order``.
Notes
-----
The Sobol' method [1]_, [2]_ is a variance-based Sensitivity Analysis which
obtains the contribution of each parameter to the variance of the
quantities of interest (QoIs; i.e., the outputs of `func`).
Respective contributions can be used to rank the parameters and
also gauge the complexity of the model by computing the
model's effective (or mean) dimension.
.. note::
Parameters are assumed to be independently distributed. Each
parameter can still follow any distribution. In fact, the distribution
is very important and should match the real distribution of the
parameters.
It uses a functional decomposition of the variance of the function to
explore
.. math::
\mathbb{V}(Y) = \sum_{i}^{d} \mathbb{V}_i (Y) + \sum_{i<j}^{d}
\mathbb{V}_{ij}(Y) + ... + \mathbb{V}_{1,2,...,d}(Y),
introducing conditional variances:
.. math::
\mathbb{V}_i(Y) = \mathbb{\mathbb{V}}[\mathbb{E}(Y|x_i)]
\qquad
\mathbb{V}_{ij}(Y) = \mathbb{\mathbb{V}}[\mathbb{E}(Y|x_i x_j)]
- \mathbb{V}_i(Y) - \mathbb{V}_j(Y),
Sobol' indices are expressed as
.. math::
S_i = \frac{\mathbb{V}_i(Y)}{\mathbb{V}[Y]}
\qquad
S_{ij} =\frac{\mathbb{V}_{ij}(Y)}{\mathbb{V}[Y]}.
:math:`S_{i}` corresponds to the first-order term which apprises the
contribution of the i-th parameter, while :math:`S_{ij}` corresponds to the
second-order term which informs about the contribution of interactions
between the i-th and the j-th parameters. These equations can be
generalized to compute higher order terms; however, they are expensive to
compute and their interpretation is complex.
This is why only first order indices are provided.
Total order indices represent the global contribution of the parameters
to the variance of the QoI and are defined as:
.. math::
S_{T_i} = S_i + \sum_j S_{ij} + \sum_{j,k} S_{ijk} + ...
= 1 - \frac{\mathbb{V}[\mathbb{E}(Y|x_{\sim i})]}{\mathbb{V}[Y]}.
First order indices sum to at most 1, while total order indices sum to at
least 1. If there are no interactions, then first and total order indices
are equal, and both first and total order indices sum to 1.
.. warning::
Negative Sobol' values are due to numerical errors. Increasing the
number of points `n` should help.
The number of sample required to have a good analysis increases with
the dimensionality of the problem. e.g. for a 3 dimension problem,
consider at minima ``n >= 2**12``. The more complex the model is,
the more samples will be needed.
Even for a purely addiditive model, the indices may not sum to 1 due
to numerical noise.
References
----------
.. [1] Sobol, I. M.. "Sensitivity analysis for nonlinear mathematical
models." Mathematical Modeling and Computational Experiment, 1:407-414,
1993.
.. [2] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
:doi:`10.1016/S0378-4754(00)00270-6`, 2001.
.. [3] Saltelli, A. "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, :doi:`10.1016/S0010-4655(02)00280-1`, 2002.
.. [4] Saltelli, A., M. Ratto, T. Andres, F. Campolongo, J. Cariboni,
D. Gatelli, M. Saisana, and S. Tarantola. "Global Sensitivity Analysis.
The Primer." 2007.
.. [5] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola. "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
:doi:`10.1016/j.cpc.2009.09.018`, 2010.
.. [6] Ishigami, T. and T. Homma. "An importance quantification technique
in uncertainty analysis for computer models." IEEE,
:doi:`10.1109/ISUMA.1990.151285`, 1990.
Examples
--------
The following is an example with the Ishigami function [6]_
.. math::
Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1,
with :math:`\mathbf{x} \in [-\pi, \pi]^3`. This function exhibits strong
non-linearity and non-monotonicity.
Remember, Sobol' indices assumes that samples are independently
distributed. In this case we use a uniform distribution on each marginals.
>>> import numpy as np
>>> from scipy.stats import sobol_indices, uniform
>>> rng = np.random.default_rng()
>>> def f_ishigami(x):
... f_eval = (
... np.sin(x[0])
... + 7 * np.sin(x[1])**2
... + 0.1 * (x[2]**4) * np.sin(x[0])
... )
... return f_eval
>>> indices = sobol_indices(
... func=f_ishigami, n=1024,
... dists=[
... uniform(loc=-np.pi, scale=2*np.pi),
... uniform(loc=-np.pi, scale=2*np.pi),
... uniform(loc=-np.pi, scale=2*np.pi)
... ],
... random_state=rng
... )
>>> indices.first_order
array([0.31637954, 0.43781162, 0.00318825])
>>> indices.total_order
array([0.56122127, 0.44287857, 0.24229595])
Confidence interval can be obtained using bootstrapping.
>>> boot = indices.bootstrap()
Then, this information can be easily visualized.
>>> import matplotlib.pyplot as plt
>>> fig, axs = plt.subplots(1, 2, figsize=(9, 4))
>>> _ = axs[0].errorbar(
... [1, 2, 3], indices.first_order, fmt='o',
... yerr=[
... indices.first_order - boot.first_order.confidence_interval.low,
... boot.first_order.confidence_interval.high - indices.first_order
... ],
... )
>>> axs[0].set_ylabel("First order Sobol' indices")
>>> axs[0].set_xlabel('Input parameters')
>>> axs[0].set_xticks([1, 2, 3])
>>> _ = axs[1].errorbar(
... [1, 2, 3], indices.total_order, fmt='o',
... yerr=[
... indices.total_order - boot.total_order.confidence_interval.low,
... boot.total_order.confidence_interval.high - indices.total_order
... ],
... )
>>> axs[1].set_ylabel("Total order Sobol' indices")
>>> axs[1].set_xlabel('Input parameters')
>>> axs[1].set_xticks([1, 2, 3])
>>> plt.tight_layout()
>>> plt.show()
.. note::
By default, `scipy.stats.uniform` has support ``[0, 1]``.
Using the parameters ``loc`` and ``scale``, one obtains the uniform
distribution on ``[loc, loc + scale]``.
This result is particularly interesting because the first order index
:math:`S_{x_3} = 0` whereas its total order is :math:`S_{T_{x_3}} = 0.244`.
This means that higher order interactions with :math:`x_3` are responsible
for the difference. Almost 25% of the observed variance
on the QoI is due to the correlations between :math:`x_3` and :math:`x_1`,
although :math:`x_3` by itself has no impact on the QoI.
The following gives a visual explanation of Sobol' indices on this
function. Let's generate 1024 samples in :math:`[-\pi, \pi]^3` and
calculate the value of the output.
>>> from scipy.stats import qmc
>>> n_dim = 3
>>> p_labels = ['$x_1$', '$x_2$', '$x_3$']
>>> sample = qmc.Sobol(d=n_dim, seed=rng).random(1024)
>>> sample = qmc.scale(
... sample=sample,
... l_bounds=[-np.pi, -np.pi, -np.pi],
... u_bounds=[np.pi, np.pi, np.pi]
... )
>>> output = f_ishigami(sample.T)
Now we can do scatter plots of the output with respect to each parameter.
This gives a visual way to understand how each parameter impacts the
output of the function.
>>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4))
>>> for i in range(n_dim):
... xi = sample[:, i]
... ax[i].scatter(xi, output, marker='+')
... ax[i].set_xlabel(p_labels[i])
>>> ax[0].set_ylabel('Y')
>>> plt.tight_layout()
>>> plt.show()
Now Sobol' goes a step further:
by conditioning the output value by given values of the parameter
(black lines), the conditional output mean is computed. It corresponds to
the term :math:`\mathbb{E}(Y|x_i)`. Taking the variance of this term gives
the numerator of the Sobol' indices.
>>> mini = np.min(output)
>>> maxi = np.max(output)
>>> n_bins = 10
>>> bins = np.linspace(-np.pi, np.pi, num=n_bins, endpoint=False)
>>> dx = bins[1] - bins[0]
>>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4))
>>> for i in range(n_dim):
... xi = sample[:, i]
... ax[i].scatter(xi, output, marker='+')
... ax[i].set_xlabel(p_labels[i])
... for bin_ in bins:
... idx = np.where((bin_ <= xi) & (xi <= bin_ + dx))
... xi_ = xi[idx]
... y_ = output[idx]
... ave_y_ = np.mean(y_)
... ax[i].plot([bin_ + dx/2] * 2, [mini, maxi], c='k')
... ax[i].scatter(bin_ + dx/2, ave_y_, c='r')
>>> ax[0].set_ylabel('Y')
>>> plt.tight_layout()
>>> plt.show()
Looking at :math:`x_3`, the variance
of the mean is zero leading to :math:`S_{x_3} = 0`. But we can further
observe that the variance of the output is not constant along the parameter
values of :math:`x_3`. This heteroscedasticity is explained by higher order
interactions. Moreover, an heteroscedasticity is also noticeable on
:math:`x_1` leading to an interaction between :math:`x_3` and :math:`x_1`.
On :math:`x_2`, the variance seems to be constant and thus null interaction
with this parameter can be supposed.
This case is fairly simple to analyse visually---although it is only a
qualitative analysis. Nevertheless, when the number of input parameters
increases such analysis becomes unrealistic as it would be difficult to
conclude on high-order terms. Hence the benefit of using Sobol' indices.
"""
random_state = check_random_state(random_state)
n_ = int(n)
if not (n_ & (n_ - 1) == 0) or n != n_:
raise ValueError(
"The balance properties of Sobol' points require 'n' "
"to be a power of 2."
)
n = n_
if not callable(method):
indices_methods: dict[str, Callable] = {
"saltelli_2010": saltelli_2010,
}
try:
method = method.lower() # type: ignore[assignment]
indices_method_ = indices_methods[method]
except KeyError as exc:
message = (
f"{method!r} is not a valid 'method'. It must be one of"
f" {set(indices_methods)!r} or a callable."
)
raise ValueError(message) from exc
else:
indices_method_ = method
sig = inspect.signature(indices_method_)
if set(sig.parameters) != {'f_A', 'f_B', 'f_AB'}:
message = (
"If 'method' is a callable, it must have the following"
f" signature: {inspect.signature(saltelli_2010)}"
)
raise ValueError(message)
def indices_method(f_A, f_B, f_AB):
"""Wrap indices method to ensure proper output dimension.
1D when single output, 2D otherwise.
"""
return np.squeeze(indices_method_(f_A=f_A, f_B=f_B, f_AB=f_AB))
if callable(func):
if dists is None:
raise ValueError(
"'dists' must be defined when 'func' is a callable."
)
def wrapped_func(x):
return np.atleast_2d(func(x))
A, B = sample_A_B(n=n, dists=dists, random_state=random_state)
AB = sample_AB(A=A, B=B)
f_A = wrapped_func(A)
if f_A.shape[1] != n:
raise ValueError(
"'func' output should have a shape ``(s, -1)`` with ``s`` "
"the number of output."
)
def funcAB(AB):
d, d, n = AB.shape
AB = np.moveaxis(AB, 0, -1).reshape(d, n*d)
f_AB = wrapped_func(AB)
return np.moveaxis(f_AB.reshape((-1, n, d)), -1, 0)
f_B = wrapped_func(B)
f_AB = funcAB(AB)
else:
message = (
"When 'func' is a dictionary, it must contain the following "
"keys: 'f_A', 'f_B' and 'f_AB'."
"'f_A' and 'f_B' should have a shape ``(s, n)`` and 'f_AB' "
"should have a shape ``(d, s, n)``."
)
try:
f_A, f_B, f_AB = np.atleast_2d(
func['f_A'], func['f_B'], func['f_AB']
)
except KeyError as exc:
raise ValueError(message) from exc
if f_A.shape[1] != n or f_A.shape != f_B.shape or \
f_AB.shape == f_A.shape or f_AB.shape[-1] % n != 0:
raise ValueError(message)
# Normalization by mean
# Sobol', I. and Levitan, Y. L. (1999). On the use of variance reducing
# multipliers in monte carlo computations of a global sensitivity index.
# Computer Physics Communications, 117(1) :52-61.
mean = np.mean([f_A, f_B], axis=(0, -1)).reshape(-1, 1)
f_A -= mean
f_B -= mean
f_AB -= mean
# Compute indices
# Filter warnings for constant output as var = 0
with np.errstate(divide='ignore', invalid='ignore'):
first_order, total_order = indices_method(f_A=f_A, f_B=f_B, f_AB=f_AB)
# null variance means null indices
first_order[~np.isfinite(first_order)] = 0
total_order[~np.isfinite(total_order)] = 0
res = dict(
first_order=first_order,
total_order=total_order,
_indices_method=indices_method,
_f_A=f_A,
_f_B=f_B,
_f_AB=f_AB
)
if callable(func):
res.update(
dict(
_A=A,
_B=B,
_AB=AB,
)
)
return SobolResult(**res)
| 24,753
| 33.718093
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/qmc.py
|
r"""
====================================================
Quasi-Monte Carlo submodule (:mod:`scipy.stats.qmc`)
====================================================
.. currentmodule:: scipy.stats.qmc
This module provides Quasi-Monte Carlo generators and associated helper
functions.
Quasi-Monte Carlo
=================
Engines
-------
.. autosummary::
:toctree: generated/
QMCEngine
Sobol
Halton
LatinHypercube
PoissonDisk
MultinomialQMC
MultivariateNormalQMC
Helpers
-------
.. autosummary::
:toctree: generated/
discrepancy
update_discrepancy
scale
Introduction to Quasi-Monte Carlo
=================================
Quasi-Monte Carlo (QMC) methods [1]_, [2]_, [3]_ provide an
:math:`n \times d` array of numbers in :math:`[0,1]`. They can be used in
place of :math:`n` points from the :math:`U[0,1]^{d}` distribution. Compared to
random points, QMC points are designed to have fewer gaps and clumps. This is
quantified by discrepancy measures [4]_. From the Koksma-Hlawka
inequality [5]_ we know that low discrepancy reduces a bound on
integration error. Averaging a function :math:`f` over :math:`n` QMC points
can achieve an integration error close to :math:`O(n^{-1})` for well
behaved functions [2]_.
Most QMC constructions are designed for special values of :math:`n`
such as powers of 2 or large primes. Changing the sample
size by even one can degrade their performance, even their
rate of convergence [6]_. For instance :math:`n=100` points may give less
accuracy than :math:`n=64` if the method was designed for :math:`n=2^m`.
Some QMC constructions are extensible in :math:`n`: we can find
another special sample size :math:`n' > n` and often an infinite
sequence of increasing special sample sizes. Some QMC
constructions are extensible in :math:`d`: we can increase the dimension,
possibly to some upper bound, and typically without requiring
special values of :math:`d`. Some QMC methods are extensible in
both :math:`n` and :math:`d`.
QMC points are deterministic. That makes it hard to estimate the accuracy of
integrals estimated by averages over QMC points. Randomized QMC (RQMC) [7]_
points are constructed so that each point is individually :math:`U[0,1]^{d}`
while collectively the :math:`n` points retain their low discrepancy.
One can make :math:`R` independent replications of RQMC points to
see how stable a computation is. From :math:`R` independent values,
a t-test (or bootstrap t-test [8]_) then gives approximate confidence
intervals on the mean value. Some RQMC methods produce a
root mean squared error that is actually :math:`o(1/n)` and smaller than
the rate seen in unrandomized QMC. An intuitive explanation is
that the error is a sum of many small ones and random errors
cancel in a way that deterministic ones do not. RQMC also
has advantages on integrands that are singular or, for other
reasons, fail to be Riemann integrable.
(R)QMC cannot beat Bahkvalov's curse of dimension (see [9]_). For
any random or deterministic method, there are worst case functions
that will give it poor performance in high dimensions. A worst
case function for QMC might be 0 at all n points but very
large elsewhere. Worst case analyses get very pessimistic
in high dimensions. (R)QMC can bring a great improvement over
MC when the functions on which it is used are not worst case.
For instance (R)QMC can be especially effective on integrands
that are well approximated by sums of functions of
some small number of their input variables at a time [10]_, [11]_.
That property is often a surprising finding about those functions.
Also, to see an improvement over IID MC, (R)QMC requires a bit of smoothness of
the integrand, roughly the mixed first order derivative in each direction,
:math:`\partial^d f/\partial x_1 \cdots \partial x_d`, must be integral.
For instance, a function that is 1 inside the hypersphere and 0 outside of it
has infinite variation in the sense of Hardy and Krause for any dimension
:math:`d = 2`.
Scrambled nets are a kind of RQMC that have some valuable robustness
properties [12]_. If the integrand is square integrable, they give variance
:math:`var_{SNET} = o(1/n)`. There is a finite upper bound on
:math:`var_{SNET} / var_{MC}` that holds simultaneously for every square
integrable integrand. Scrambled nets satisfy a strong law of large numbers
for :math:`f` in :math:`L^p` when :math:`p>1`. In some
special cases there is a central limit theorem [13]_. For smooth enough
integrands they can achieve RMSE nearly :math:`O(n^{-3})`. See [12]_
for references about these properties.
The main kinds of QMC methods are lattice rules [14]_ and digital
nets and sequences [2]_, [15]_. The theories meet up in polynomial
lattice rules [16]_ which can produce digital nets. Lattice rules
require some form of search for good constructions. For digital
nets there are widely used default constructions.
The most widely used QMC methods are Sobol' sequences [17]_.
These are digital nets. They are extensible in both :math:`n` and :math:`d`.
They can be scrambled. The special sample sizes are powers
of 2. Another popular method are Halton sequences [18]_.
The constructions resemble those of digital nets. The earlier
dimensions have much better equidistribution properties than
later ones. There are essentially no special sample sizes.
They are not thought to be as accurate as Sobol' sequences.
They can be scrambled. The nets of Faure [19]_ are also widely
used. All dimensions are equally good, but the special sample
sizes grow rapidly with dimension :math:`d`. They can be scrambled.
The nets of Niederreiter and Xing [20]_ have the best asymptotic
properties but have not shown good empirical performance [21]_.
Higher order digital nets are formed by a digit interleaving process
in the digits of the constructed points. They can achieve higher
levels of asymptotic accuracy given higher smoothness conditions on :math:`f`
and they can be scrambled [22]_. There is little or no empirical work
showing the improved rate to be attained.
Using QMC is like using the entire period of a small random
number generator. The constructions are similar and so
therefore are the computational costs [23]_.
(R)QMC is sometimes improved by passing the points through
a baker's transformation (tent function) prior to using them.
That function has the form :math:`1-2|x-1/2|`. As :math:`x` goes from 0 to
1, this function goes from 0 to 1 and then back. It is very
useful to produce a periodic function for lattice rules [14]_,
and sometimes it improves the convergence rate [24]_.
It is not straightforward to apply QMC methods to Markov
chain Monte Carlo (MCMC). We can think of MCMC as using
:math:`n=1` point in :math:`[0,1]^{d}` for very large :math:`d`, with
ergodic results corresponding to :math:`d \to \infty`. One proposal is
in [25]_ and under strong conditions an improved rate of convergence
has been shown [26]_.
Returning to Sobol' points: there are many versions depending
on what are called direction numbers. Those are the result of
searches and are tabulated. A very widely used set of direction
numbers come from [27]_. It is extensible in dimension up to
:math:`d=21201`.
References
----------
.. [1] Owen, Art B. "Monte Carlo Book: the Quasi-Monte Carlo parts." 2019.
.. [2] Niederreiter, Harald. "Random number generation and quasi-Monte Carlo
methods." Society for Industrial and Applied Mathematics, 1992.
.. [3] Dick, Josef, Frances Y. Kuo, and Ian H. Sloan. "High-dimensional
integration: the quasi-Monte Carlo way." Acta Numerica no. 22: 133, 2013.
.. [4] Aho, A. V., C. Aistleitner, T. Anderson, K. Appel, V. Arnol'd, N.
Aronszajn, D. Asotsky et al. "W. Chen et al.(eds.), "A Panorama of
Discrepancy Theory", Sringer International Publishing,
Switzerland: 679, 2014.
.. [5] Hickernell, Fred J. "Koksma-Hlawka Inequality." Wiley StatsRef:
Statistics Reference Online, 2014.
.. [6] Owen, Art B. "On dropping the first Sobol' point." :arxiv:`2008.08051`,
2020.
.. [7] L'Ecuyer, Pierre, and Christiane Lemieux. "Recent advances in randomized
quasi-Monte Carlo methods." In Modeling uncertainty, pp. 419-474. Springer,
New York, NY, 2002.
.. [8] DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence
intervals." Statistical science: 189-212, 1996.
.. [9] Dimov, Ivan T. "Monte Carlo methods for applied scientists." World
Scientific, 2008.
.. [10] Caflisch, Russel E., William J. Morokoff, and Art B. Owen. "Valuation
of mortgage backed securities using Brownian bridges to reduce effective
dimension." Journal of Computational Finance: no. 1 27-46, 1997.
.. [11] Sloan, Ian H., and Henryk Wozniakowski. "When are quasi-Monte Carlo
algorithms efficient for high dimensional integrals?." Journal of Complexity
14, no. 1 (1998): 1-33.
.. [12] Owen, Art B., and Daniel Rudolf, "A strong law of large numbers for
scrambled net integration." SIAM Review, to appear.
.. [13] Loh, Wei-Liem. "On the asymptotic distribution of scrambled net
quadrature." The Annals of Statistics 31, no. 4: 1282-1324, 2003.
.. [14] Sloan, Ian H. and S. Joe. "Lattice methods for multiple integration."
Oxford University Press, 1994.
.. [15] Dick, Josef, and Friedrich Pillichshammer. "Digital nets and sequences:
discrepancy theory and quasi-Monte Carlo integration." Cambridge University
Press, 2010.
.. [16] Dick, Josef, F. Kuo, Friedrich Pillichshammer, and I. Sloan.
"Construction algorithms for polynomial lattice rules for multivariate
integration." Mathematics of computation 74, no. 252: 1895-1921, 2005.
.. [17] Sobol', Il'ya Meerovich. "On the distribution of points in a cube and
the approximate evaluation of integrals." Zhurnal Vychislitel'noi Matematiki
i Matematicheskoi Fiziki 7, no. 4: 784-802, 1967.
.. [18] Halton, John H. "On the efficiency of certain quasi-random sequences of
points in evaluating multi-dimensional integrals." Numerische Mathematik 2,
no. 1: 84-90, 1960.
.. [19] Faure, Henri. "Discrepance de suites associees a un systeme de
numeration (en dimension s)." Acta arithmetica 41, no. 4: 337-351, 1982.
.. [20] Niederreiter, Harold, and Chaoping Xing. "Low-discrepancy sequences and
global function fields with many rational places." Finite Fields and their
applications 2, no. 3: 241-273, 1996.
.. [21] Hong, Hee Sun, and Fred J. Hickernell. "Algorithm 823: Implementing
scrambled digital sequences." ACM Transactions on Mathematical Software
(TOMS) 29, no. 2: 95-109, 2003.
.. [22] Dick, Josef. "Higher order scrambled digital nets achieve the optimal
rate of the root mean square error for smooth integrands." The Annals of
Statistics 39, no. 3: 1372-1398, 2011.
.. [23] Niederreiter, Harald. "Multidimensional numerical integration using
pseudorandom numbers." In Stochastic Programming 84 Part I, pp. 17-38.
Springer, Berlin, Heidelberg, 1986.
.. [24] Hickernell, Fred J. "Obtaining O (N-2+e) Convergence for Lattice
Quadrature Rules." In Monte Carlo and Quasi-Monte Carlo Methods 2000,
pp. 274-289. Springer, Berlin, Heidelberg, 2002.
.. [25] Owen, Art B., and Seth D. Tribble. "A quasi-Monte Carlo Metropolis
algorithm." Proceedings of the National Academy of Sciences 102,
no. 25: 8844-8849, 2005.
.. [26] Chen, Su. "Consistency and convergence rate of Markov chain quasi Monte
Carlo with examples." PhD diss., Stanford University, 2011.
.. [27] Joe, Stephen, and Frances Y. Kuo. "Constructing Sobol sequences with
better two-dimensional projections." SIAM Journal on Scientific Computing
30, no. 5: 2635-2654, 2008.
"""
from ._qmc import *
| 11,624
| 48.468085
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/biasedurn.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
import warnings
from . import _biasedurn
__all__ = [ # noqa: F822
'_PyFishersNCHypergeometric',
'_PyWalleniusNCHypergeometric',
'_PyStochasticLib3'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.stats.biasedurn is deprecated and has no attribute "
f"{name}.")
warnings.warn("the `scipy.stats.biasedurn` namespace is deprecated and "
"will be removed in SciPy v2.0.0.",
category=DeprecationWarning, stacklevel=2)
return getattr(_biasedurn, name)
| 690
| 22.033333
| 76
|
py
|
scipy
|
scipy-main/scipy/stats/_entropy.py
|
"""
Created on Fri Apr 2 09:06:05 2021
@author: matth
"""
from __future__ import annotations
import math
import numpy as np
from scipy import special
__all__ = ['entropy', 'differential_entropy']
def entropy(pk: np.typing.ArrayLike,
qk: np.typing.ArrayLike | None = None,
base: float | None = None,
axis: int = 0
) -> np.number | np.ndarray:
"""
Calculate the Shannon entropy/relative entropy of given distribution(s).
If only probabilities `pk` are given, the Shannon entropy is calculated as
``H = -sum(pk * log(pk))``.
If `qk` is not None, then compute the relative entropy
``D = sum(pk * log(pk / qk))``. This quantity is also known
as the Kullback-Leibler divergence.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : array_like
Defines the (discrete) distribution. Along each axis-slice of ``pk``,
element ``i`` is the (possibly unnormalized) probability of event
``i``.
qk : array_like, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
axis : int, optional
The axis along which the entropy is calculated. Default is 0.
Returns
-------
S : {float, array_like}
The calculated entropy.
Notes
-----
Informally, the Shannon entropy quantifies the expected uncertainty
inherent in the possible outcomes of a discrete random variable.
For example,
if messages consisting of sequences of symbols from a set are to be
encoded and transmitted over a noiseless channel, then the Shannon entropy
``H(pk)`` gives a tight lower bound for the average number of units of
information needed per symbol if the symbols occur with frequencies
governed by the discrete distribution `pk` [1]_. The choice of base
determines the choice of units; e.g., ``e`` for nats, ``2`` for bits, etc.
The relative entropy, ``D(pk|qk)``, quantifies the increase in the average
number of units of information needed per symbol if the encoding is
optimized for the probability distribution `qk` instead of the true
distribution `pk`. Informally, the relative entropy quantifies the expected
excess in surprise experienced if one believes the true distribution is
`qk` when it is actually `pk`.
A related quantity, the cross entropy ``CE(pk, qk)``, satisfies the
equation ``CE(pk, qk) = H(pk) + D(pk|qk)`` and can also be calculated with
the formula ``CE = -sum(pk * log(qk))``. It gives the average
number of units of information needed per symbol if an encoding is
optimized for the probability distribution `qk` when the true distribution
is `pk`. It is not computed directly by `entropy`, but it can be computed
using two calls to the function (see Examples).
See [2]_ for more information.
References
----------
.. [1] Shannon, C.E. (1948), A Mathematical Theory of Communication.
Bell System Technical Journal, 27: 379-423.
https://doi.org/10.1002/j.1538-7305.1948.tb01338.x
.. [2] Thomas M. Cover and Joy A. Thomas. 2006. Elements of Information
Theory (Wiley Series in Telecommunications and Signal Processing).
Wiley-Interscience, USA.
Examples
--------
The outcome of a fair coin is the most uncertain:
>>> import numpy as np
>>> from scipy.stats import entropy
>>> base = 2 # work in units of bits
>>> pk = np.array([1/2, 1/2]) # fair coin
>>> H = entropy(pk, base=base)
>>> H
1.0
>>> H == -np.sum(pk * np.log(pk)) / np.log(base)
True
The outcome of a biased coin is less uncertain:
>>> qk = np.array([9/10, 1/10]) # biased coin
>>> entropy(qk, base=base)
0.46899559358928117
The relative entropy between the fair coin and biased coin is calculated
as:
>>> D = entropy(pk, qk, base=base)
>>> D
0.7369655941662062
>>> D == np.sum(pk * np.log(pk/qk)) / np.log(base)
True
The cross entropy can be calculated as the sum of the entropy and
relative entropy`:
>>> CE = entropy(pk, base=base) + entropy(pk, qk, base=base)
>>> CE
1.736965594166206
>>> CE == -np.sum(pk * np.log(qk)) / np.log(base)
True
"""
if base is not None and base <= 0:
raise ValueError("`base` must be a positive number or `None`.")
pk = np.asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True)
if qk is None:
vec = special.entr(pk)
else:
qk = np.asarray(qk)
pk, qk = np.broadcast_arrays(pk, qk)
qk = 1.0*qk / np.sum(qk, axis=axis, keepdims=True)
vec = special.rel_entr(pk, qk)
S = np.sum(vec, axis=axis)
if base is not None:
S /= np.log(base)
return S
def differential_entropy(
values: np.typing.ArrayLike,
*,
window_length: int | None = None,
base: float | None = None,
axis: int = 0,
method: str = "auto",
) -> np.number | np.ndarray:
r"""Given a sample of a distribution, estimate the differential entropy.
Several estimation methods are available using the `method` parameter. By
default, a method is selected based the size of the sample.
Parameters
----------
values : sequence
Sample from a continuous distribution.
window_length : int, optional
Window length for computing Vasicek estimate. Must be an integer
between 1 and half of the sample size. If ``None`` (the default), it
uses the heuristic value
.. math::
\left \lfloor \sqrt{n} + 0.5 \right \rfloor
where :math:`n` is the sample size. This heuristic was originally
proposed in [2]_ and has become common in the literature.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
axis : int, optional
The axis along which the differential entropy is calculated.
Default is 0.
method : {'vasicek', 'van es', 'ebrahimi', 'correa', 'auto'}, optional
The method used to estimate the differential entropy from the sample.
Default is ``'auto'``. See Notes for more information.
Returns
-------
entropy : float
The calculated differential entropy.
Notes
-----
This function will converge to the true differential entropy in the limit
.. math::
n \to \infty, \quad m \to \infty, \quad \frac{m}{n} \to 0
The optimal choice of ``window_length`` for a given sample size depends on
the (unknown) distribution. Typically, the smoother the density of the
distribution, the larger the optimal value of ``window_length`` [1]_.
The following options are available for the `method` parameter.
* ``'vasicek'`` uses the estimator presented in [1]_. This is
one of the first and most influential estimators of differential entropy.
* ``'van es'`` uses the bias-corrected estimator presented in [3]_, which
is not only consistent but, under some conditions, asymptotically normal.
* ``'ebrahimi'`` uses an estimator presented in [4]_, which was shown
in simulation to have smaller bias and mean squared error than
the Vasicek estimator.
* ``'correa'`` uses the estimator presented in [5]_ based on local linear
regression. In a simulation study, it had consistently smaller mean
square error than the Vasiceck estimator, but it is more expensive to
compute.
* ``'auto'`` selects the method automatically (default). Currently,
this selects ``'van es'`` for very small samples (<10), ``'ebrahimi'``
for moderate sample sizes (11-1000), and ``'vasicek'`` for larger
samples, but this behavior is subject to change in future versions.
All estimators are implemented as described in [6]_.
References
----------
.. [1] Vasicek, O. (1976). A test for normality based on sample entropy.
Journal of the Royal Statistical Society:
Series B (Methodological), 38(1), 54-59.
.. [2] Crzcgorzewski, P., & Wirczorkowski, R. (1999). Entropy-based
goodness-of-fit test for exponentiality. Communications in
Statistics-Theory and Methods, 28(5), 1183-1202.
.. [3] Van Es, B. (1992). Estimating functionals related to a density by a
class of statistics based on spacings. Scandinavian Journal of
Statistics, 61-72.
.. [4] Ebrahimi, N., Pflughoeft, K., & Soofi, E. S. (1994). Two measures
of sample entropy. Statistics & Probability Letters, 20(3), 225-234.
.. [5] Correa, J. C. (1995). A new estimator of entropy. Communications
in Statistics-Theory and Methods, 24(10), 2439-2449.
.. [6] Noughabi, H. A. (2015). Entropy Estimation Using Numerical Methods.
Annals of Data Science, 2(2), 231-241.
https://link.springer.com/article/10.1007/s40745-015-0045-9
Examples
--------
>>> import numpy as np
>>> from scipy.stats import differential_entropy, norm
Entropy of a standard normal distribution:
>>> rng = np.random.default_rng()
>>> values = rng.standard_normal(100)
>>> differential_entropy(values)
1.3407817436640392
Compare with the true entropy:
>>> float(norm.entropy())
1.4189385332046727
For several sample sizes between 5 and 1000, compare the accuracy of
the ``'vasicek'``, ``'van es'``, and ``'ebrahimi'`` methods. Specifically,
compare the root mean squared error (over 1000 trials) between the estimate
and the true differential entropy of the distribution.
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>>
>>>
>>> def rmse(res, expected):
... '''Root mean squared error'''
... return np.sqrt(np.mean((res - expected)**2))
>>>
>>>
>>> a, b = np.log10(5), np.log10(1000)
>>> ns = np.round(np.logspace(a, b, 10)).astype(int)
>>> reps = 1000 # number of repetitions for each sample size
>>> expected = stats.expon.entropy()
>>>
>>> method_errors = {'vasicek': [], 'van es': [], 'ebrahimi': []}
>>> for method in method_errors:
... for n in ns:
... rvs = stats.expon.rvs(size=(reps, n), random_state=rng)
... res = stats.differential_entropy(rvs, method=method, axis=-1)
... error = rmse(res, expected)
... method_errors[method].append(error)
>>>
>>> for method, errors in method_errors.items():
... plt.loglog(ns, errors, label=method)
>>>
>>> plt.legend()
>>> plt.xlabel('sample size')
>>> plt.ylabel('RMSE (1000 trials)')
>>> plt.title('Entropy Estimator Error (Exponential Distribution)')
"""
values = np.asarray(values)
values = np.moveaxis(values, axis, -1)
n = values.shape[-1] # number of observations
if window_length is None:
window_length = math.floor(math.sqrt(n) + 0.5)
if not 2 <= 2 * window_length < n:
raise ValueError(
f"Window length ({window_length}) must be positive and less "
f"than half the sample size ({n}).",
)
if base is not None and base <= 0:
raise ValueError("`base` must be a positive number or `None`.")
sorted_data = np.sort(values, axis=-1)
methods = {"vasicek": _vasicek_entropy,
"van es": _van_es_entropy,
"correa": _correa_entropy,
"ebrahimi": _ebrahimi_entropy,
"auto": _vasicek_entropy}
method = method.lower()
if method not in methods:
message = f"`method` must be one of {set(methods)}"
raise ValueError(message)
if method == "auto":
if n <= 10:
method = 'van es'
elif n <= 1000:
method = 'ebrahimi'
else:
method = 'vasicek'
res = methods[method](sorted_data, window_length)
if base is not None:
res /= np.log(base)
return res
def _pad_along_last_axis(X, m):
"""Pad the data for computing the rolling window difference."""
# scales a bit better than method in _vasicek_like_entropy
shape = np.array(X.shape)
shape[-1] = m
Xl = np.broadcast_to(X[..., [0]], shape) # [0] vs 0 to maintain shape
Xr = np.broadcast_to(X[..., [-1]], shape)
return np.concatenate((Xl, X, Xr), axis=-1)
def _vasicek_entropy(X, m):
"""Compute the Vasicek estimator as described in [6] Eq. 1.3."""
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
differences = X[..., 2 * m:] - X[..., : -2 * m:]
logs = np.log(n/(2*m) * differences)
return np.mean(logs, axis=-1)
def _van_es_entropy(X, m):
"""Compute the van Es estimator as described in [6]."""
# No equation number, but referred to as HVE_mn.
# Typo: there should be a log within the summation.
n = X.shape[-1]
difference = X[..., m:] - X[..., :-m]
term1 = 1/(n-m) * np.sum(np.log((n+1)/m * difference), axis=-1)
k = np.arange(m, n+1)
return term1 + np.sum(1/k) + np.log(m) - np.log(n+1)
def _ebrahimi_entropy(X, m):
"""Compute the Ebrahimi estimator as described in [6]."""
# No equation number, but referred to as HE_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
differences = X[..., 2 * m:] - X[..., : -2 * m:]
i = np.arange(1, n+1).astype(float)
ci = np.ones_like(i)*2
ci[i <= m] = 1 + (i[i <= m] - 1)/m
ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m
logs = np.log(n * differences / (ci * m))
return np.mean(logs, axis=-1)
def _correa_entropy(X, m):
"""Compute the Correa estimator as described in [6]."""
# No equation number, but referred to as HC_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
i = np.arange(1, n+1)
dj = np.arange(-m, m+1)[:, None]
j = i + dj
j0 = j + m - 1 # 0-indexed version of j
Xibar = np.mean(X[..., j0], axis=-2, keepdims=True)
difference = X[..., j0] - Xibar
num = np.sum(difference*dj, axis=-2) # dj is d-i
den = n*np.sum(difference**2, axis=-2)
return -np.mean(np.log(num/den), axis=-1)
| 14,279
| 34.879397
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_bws_test.py
|
import numpy as np
from functools import partial
from scipy import stats
def _bws_input_validation(x, y, alternative, method):
''' Input validation and standardization for bws test'''
x, y = np.atleast_1d(x, y)
if x.ndim > 1 or y.ndim > 1:
raise ValueError('`x` and `y` must be exactly one-dimensional.')
if np.isnan(x).any() or np.isnan(y).any():
raise ValueError('`x` and `y` must not contain NaNs.')
if np.size(x) == 0 or np.size(y) == 0:
raise ValueError('`x` and `y` must be of nonzero size.')
z = stats.rankdata(np.concatenate((x, y)))
x, y = z[:len(x)], z[len(x):]
alternatives = {'two-sided', 'less', 'greater'}
alternative = alternative.lower()
if alternative not in alternatives:
raise ValueError(f'`alternative` must be one of {alternatives}.')
method = stats.PermutationMethod() if method is None else method
if not isinstance(method, stats.PermutationMethod):
raise ValueError('`method` must be an instance of '
'`scipy.stats.PermutationMethod`')
return x, y, alternative, method
def _bws_statistic(x, y, alternative, axis):
'''Compute the BWS test statistic for two independent samples'''
# Public function currently does not accept `axis`, but `permutation_test`
# uses `axis` to make vectorized call.
Ri, Hj = np.sort(x, axis=axis), np.sort(y, axis=axis)
n, m = Ri.shape[axis], Hj.shape[axis]
i, j = np.arange(1, n+1), np.arange(1, m+1)
Bx_num = Ri - (m + n)/n * i
By_num = Hj - (m + n)/m * j
if alternative == 'two-sided':
Bx_num *= Bx_num
By_num *= By_num
else:
Bx_num *= np.abs(Bx_num)
By_num *= np.abs(By_num)
Bx_den = i/(n+1) * (1 - i/(n+1)) * m*(m+n)/n
By_den = j/(m+1) * (1 - j/(m+1)) * n*(m+n)/m
Bx = 1/n * np.sum(Bx_num/Bx_den, axis=axis)
By = 1/m * np.sum(By_num/By_den, axis=axis)
B = (Bx + By) / 2 if alternative == 'two-sided' else (Bx - By) / 2
return B
def bws_test(x, y, *, alternative="two-sided", method=None):
r'''Perform the Baumgartner-Weiss-Schindler test on two independent samples.
The Baumgartner-Weiss-Schindler (BWS) test is a nonparametric test of
the null hypothesis that the distribution underlying sample `x`
is the same as the distribution underlying sample `y`. Unlike
the Kolmogorov-Smirnov, Wilcoxon, and Cramer-Von Mises tests,
the BWS test weights the integral by the variance of the difference
in cumulative distribution functions (CDFs), emphasizing the tails of the
distributions, which increases the power of the test in many applications.
Parameters
----------
x, y : array-like
1-d arrays of samples.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
Let *F(u)* and *G(u)* be the cumulative distribution functions of the
distributions underlying `x` and `y`, respectively. Then the following
alternative hypotheses are available:
* 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for
at least one *u*.
* 'less': the distribution underlying `x` is stochastically less than
the distribution underlying `y`, i.e. *F(u) >= G(u)* for all *u*.
* 'greater': the distribution underlying `x` is stochastically greater
than the distribution underlying `y`, i.e. *F(u) <= G(u)* for all
*u*.
Under a more restrictive set of assumptions, the alternative hypotheses
can be expressed in terms of the locations of the distributions;
see [2] section 5.1.
method : PermutationMethod, optional
Configures the method used to compute the p-value. The default is
the default `PermutationMethod` object.
Returns
-------
res : PermutationTestResult
An object with attributes:
statistic : float
The observed test statistic of the data.
pvalue : float
The p-value for the given alternative.
null_distribution : ndarray
The values of the test statistic generated under the null hypothesis.
See also
--------
scipy.stats.wilcoxon, scipy.stats.mannwhitneyu, scipy.stats.ttest_ind
Notes
-----
When ``alternative=='two-sided'``, the statistic is defined by the
equations given in [1]_ Section 2. This statistic is not appropriate for
one-sided alternatives; in that case, the statistic is the *negative* of
that given by the equations in [1]_ Section 2. Consequently, when the
distribution of the first sample is stochastically greater than that of the
second sample, the statistic will tend to be positive.
References
----------
.. [1] Neuhäuser, M. (2005). Exact Tests Based on the
Baumgartner-Weiss-Schindler Statistic: A Survey. Statistical Papers,
46(1), 1-29.
.. [2] Fay, M. P., & Proschan, M. A. (2010). Wilcoxon-Mann-Whitney or t-test?
On assumptions for hypothesis tests and multiple interpretations of
decision rules. Statistics surveys, 4, 1.
Examples
--------
We follow the example of table 3 in [1]_: Fourteen children were divided
randomly into two groups. Their ranks at performing a specific tests are
as follows.
>>> import numpy as np
>>> x = [1, 2, 3, 4, 6, 7, 8]
>>> y = [5, 9, 10, 11, 12, 13, 14]
We use the BWS test to assess whether there is a statistically significant
difference between the two groups.
The null hypothesis is that there is no difference in the distributions of
performance between the two groups. We decide that a significance level of
1% is required to reject the null hypothesis in favor of the alternative
that the distributions are different.
Since the number of samples is very small, we can compare the observed test
statistic against the *exact* distribution of the test statistic under the
null hypothesis.
>>> from scipy.stats import bws_test
>>> res = bws_test(x, y)
>>> print(res.statistic)
5.132167152575315
This agrees with :math:`B = 5.132` reported in [1]_. The *p*-value produced
by `bws_test` also agrees with :math:`p = 0.0029` reported in [1]_.
>>> print(res.pvalue)
0.002913752913752914
Because the p-value is below our threshold of 1%, we take this as evidence
against the null hypothesis in favor of the alternative that there is a
difference in performance between the two groups.
'''
x, y, alternative, method = _bws_input_validation(x, y, alternative,
method)
bws_statistic = partial(_bws_statistic, alternative=alternative)
permutation_alternative = 'less' if alternative == 'less' else 'greater'
res = stats.permutation_test((x, y), bws_statistic,
alternative=permutation_alternative,
**method._asdict())
return res
| 7,059
| 38.662921
| 81
|
py
|
scipy
|
scipy-main/scipy/stats/_axis_nan_policy.py
|
# Many scipy.stats functions support `axis` and `nan_policy` parameters.
# When the two are combined, it can be tricky to get all the behavior just
# right. This file contains utility functions useful for scipy.stats functions
# that support `axis` and `nan_policy`, including a decorator that
# automatically adds `axis` and `nan_policy` arguments to a function.
import numpy as np
from functools import wraps
from scipy._lib._docscrape import FunctionDoc, Parameter
from scipy._lib._util import _contains_nan
import inspect
def _broadcast_arrays(arrays, axis=None):
"""
Broadcast shapes of arrays, ignoring incompatibility of specified axes
"""
new_shapes = _broadcast_array_shapes(arrays, axis=axis)
if axis is None:
new_shapes = [new_shapes]*len(arrays)
return [np.broadcast_to(array, new_shape)
for array, new_shape in zip(arrays, new_shapes)]
def _broadcast_array_shapes(arrays, axis=None):
"""
Broadcast shapes of arrays, ignoring incompatibility of specified axes
"""
shapes = [np.asarray(arr).shape for arr in arrays]
return _broadcast_shapes(shapes, axis)
def _broadcast_shapes(shapes, axis=None):
"""
Broadcast shapes, ignoring incompatibility of specified axes
"""
if not shapes:
return shapes
# input validation
if axis is not None:
axis = np.atleast_1d(axis)
axis_int = axis.astype(int)
if not np.array_equal(axis_int, axis):
raise np.AxisError('`axis` must be an integer, a '
'tuple of integers, or `None`.')
axis = axis_int
# First, ensure all shapes have same number of dimensions by prepending 1s.
n_dims = max([len(shape) for shape in shapes])
new_shapes = np.ones((len(shapes), n_dims), dtype=int)
for row, shape in zip(new_shapes, shapes):
row[len(row)-len(shape):] = shape # can't use negative indices (-0:)
# Remove the shape elements of the axes to be ignored, but remember them.
if axis is not None:
axis[axis < 0] = n_dims + axis[axis < 0]
axis = np.sort(axis)
if axis[-1] >= n_dims or axis[0] < 0:
message = (f"`axis` is out of bounds "
f"for array of dimension {n_dims}")
raise np.AxisError(message)
if len(np.unique(axis)) != len(axis):
raise np.AxisError("`axis` must contain only distinct elements")
removed_shapes = new_shapes[:, axis]
new_shapes = np.delete(new_shapes, axis, axis=1)
# If arrays are broadcastable, shape elements that are 1 may be replaced
# with a corresponding non-1 shape element. Assuming arrays are
# broadcastable, that final shape element can be found with:
new_shape = np.max(new_shapes, axis=0)
# except in case of an empty array:
new_shape *= new_shapes.all(axis=0)
# Among all arrays, there can only be one unique non-1 shape element.
# Therefore, if any non-1 shape element does not match what we found
# above, the arrays must not be broadcastable after all.
if np.any(~((new_shapes == 1) | (new_shapes == new_shape))):
raise ValueError("Array shapes are incompatible for broadcasting.")
if axis is not None:
# Add back the shape elements that were ignored
new_axis = axis - np.arange(len(axis))
new_shapes = [tuple(np.insert(new_shape, new_axis, removed_shape))
for removed_shape in removed_shapes]
return new_shapes
else:
return tuple(new_shape)
def _broadcast_array_shapes_remove_axis(arrays, axis=None):
"""
Broadcast shapes of arrays, dropping specified axes
Given a sequence of arrays `arrays` and an integer or tuple `axis`, find
the shape of the broadcast result after consuming/dropping `axis`.
In other words, return output shape of a typical hypothesis test on
`arrays` vectorized along `axis`.
Examples
--------
>>> import numpy as np
>>> a = np.zeros((5, 2, 1))
>>> b = np.zeros((9, 3))
>>> _broadcast_array_shapes((a, b), 1)
(5, 3)
"""
# Note that here, `axis=None` means do not consume/drop any axes - _not_
# ravel arrays before broadcasting.
shapes = [arr.shape for arr in arrays]
return _broadcast_shapes_remove_axis(shapes, axis)
def _broadcast_shapes_remove_axis(shapes, axis=None):
"""
Broadcast shapes, dropping specified axes
Same as _broadcast_array_shapes, but given a sequence
of array shapes `shapes` instead of the arrays themselves.
"""
shapes = _broadcast_shapes(shapes, axis)
shape = shapes[0]
if axis is not None:
shape = np.delete(shape, axis)
return tuple(shape)
def _broadcast_concatenate(arrays, axis):
"""Concatenate arrays along an axis with broadcasting."""
arrays = _broadcast_arrays(arrays, axis)
res = np.concatenate(arrays, axis=axis)
return res
# TODO: add support for `axis` tuples
def _remove_nans(samples, paired):
"Remove nans from paired or unpaired 1D samples"
# potential optimization: don't copy arrays that don't contain nans
if not paired:
return [sample[~np.isnan(sample)] for sample in samples]
# for paired samples, we need to remove the whole pair when any part
# has a nan
nans = np.isnan(samples[0])
for sample in samples[1:]:
nans = nans | np.isnan(sample)
not_nans = ~nans
return [sample[not_nans] for sample in samples]
def _remove_sentinel(samples, paired, sentinel):
"Remove sentinel values from paired or unpaired 1D samples"
# could consolidate with `_remove_nans`, but it's not quite as simple as
# passing `sentinel=np.nan` because `(np.nan == np.nan) is False`
# potential optimization: don't copy arrays that don't contain sentinel
if not paired:
return [sample[sample != sentinel] for sample in samples]
# for paired samples, we need to remove the whole pair when any part
# has a nan
sentinels = (samples[0] == sentinel)
for sample in samples[1:]:
sentinels = sentinels | (sample == sentinel)
not_sentinels = ~sentinels
return [sample[not_sentinels] for sample in samples]
def _masked_arrays_2_sentinel_arrays(samples):
# masked arrays in `samples` are converted to regular arrays, and values
# corresponding with masked elements are replaced with a sentinel value
# return without modifying arrays if none have a mask
has_mask = False
for sample in samples:
mask = getattr(sample, 'mask', False)
has_mask = has_mask or np.any(mask)
if not has_mask:
return samples, None # None means there is no sentinel value
# Choose a sentinel value. We can't use `np.nan`, because sentinel (masked)
# values are always omitted, but there are different nan policies.
dtype = np.result_type(*samples)
dtype = dtype if np.issubdtype(dtype, np.number) else np.float64
for i in range(len(samples)):
# Things get more complicated if the arrays are of different types.
# We could have different sentinel values for each array, but
# the purpose of this code is convenience, not efficiency.
samples[i] = samples[i].astype(dtype, copy=False)
inexact = np.issubdtype(dtype, np.inexact)
info = np.finfo if inexact else np.iinfo
max_possible, min_possible = info(dtype).max, info(dtype).min
nextafter = np.nextafter if inexact else (lambda x, _: x - 1)
sentinel = max_possible
# For simplicity, min_possible/np.infs are not candidate sentinel values
while sentinel > min_possible:
for sample in samples:
if np.any(sample == sentinel): # choose a new sentinel value
sentinel = nextafter(sentinel, -np.inf)
break
else: # when sentinel value is OK, break the while loop
break
else:
message = ("This function replaces masked elements with sentinel "
"values, but the data contains all distinct values of this "
"data type. Consider promoting the dtype to `np.float64`.")
raise ValueError(message)
# replace masked elements with sentinel value
out_samples = []
for sample in samples:
mask = getattr(sample, 'mask', None)
if mask is not None: # turn all masked arrays into sentinel arrays
mask = np.broadcast_to(mask, sample.shape)
sample = sample.data.copy() if np.any(mask) else sample.data
sample = np.asarray(sample) # `sample.data` could be a memoryview?
sample[mask] = sentinel
out_samples.append(sample)
return out_samples, sentinel
def _check_empty_inputs(samples, axis):
"""
Check for empty sample; return appropriate output for a vectorized hypotest
"""
# if none of the samples are empty, we need to perform the test
if not any(sample.size == 0 for sample in samples):
return None
# otherwise, the statistic and p-value will be either empty arrays or
# arrays with NaNs. Produce the appropriate array and return it.
output_shape = _broadcast_array_shapes_remove_axis(samples, axis)
output = np.ones(output_shape) * np.nan
return output
def _add_reduced_axes(res, reduced_axes, keepdims):
"""
Add reduced axes back to all the arrays in the result object
if keepdims = True.
"""
return ([np.expand_dims(output, reduced_axes) for output in res]
if keepdims else res)
# Standard docstring / signature entries for `axis`, `nan_policy`, `keepdims`
_name = 'axis'
_desc = (
"""If an int, the axis of the input along which to compute the statistic.
The statistic of each axis-slice (e.g. row) of the input will appear in a
corresponding element of the output.
If ``None``, the input will be raveled before computing the statistic."""
.split('\n'))
def _get_axis_params(default_axis=0, _name=_name, _desc=_desc): # bind NOW
_type = f"int or None, default: {default_axis}"
_axis_parameter_doc = Parameter(_name, _type, _desc)
_axis_parameter = inspect.Parameter(_name,
inspect.Parameter.KEYWORD_ONLY,
default=default_axis)
return _axis_parameter_doc, _axis_parameter
_name = 'nan_policy'
_type = "{'propagate', 'omit', 'raise'}"
_desc = (
"""Defines how to handle input NaNs.
- ``propagate``: if a NaN is present in the axis slice (e.g. row) along
which the statistic is computed, the corresponding entry of the output
will be NaN.
- ``omit``: NaNs will be omitted when performing the calculation.
If insufficient data remains in the axis slice along which the
statistic is computed, the corresponding entry of the output will be
NaN.
- ``raise``: if a NaN is present, a ``ValueError`` will be raised."""
.split('\n'))
_nan_policy_parameter_doc = Parameter(_name, _type, _desc)
_nan_policy_parameter = inspect.Parameter(_name,
inspect.Parameter.KEYWORD_ONLY,
default='propagate')
_name = 'keepdims'
_type = "bool, default: False"
_desc = (
"""If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array."""
.split('\n'))
_keepdims_parameter_doc = Parameter(_name, _type, _desc)
_keepdims_parameter = inspect.Parameter(_name,
inspect.Parameter.KEYWORD_ONLY,
default=False)
_standard_note_addition = (
"""\nBeginning in SciPy 1.9, ``np.matrix`` inputs (not recommended for new
code) are converted to ``np.ndarray`` before the calculation is performed. In
this case, the output will be a scalar or ``np.ndarray`` of appropriate shape
rather than a 2D ``np.matrix``. Similarly, while masked elements of masked
arrays are ignored, the output will be a scalar or ``np.ndarray`` rather than a
masked array with ``mask=False``.""").split('\n')
def _axis_nan_policy_factory(tuple_to_result, default_axis=0,
n_samples=1, paired=False,
result_to_tuple=None, too_small=0,
n_outputs=2, kwd_samples=[], override=None):
"""Factory for a wrapper that adds axis/nan_policy params to a function.
Parameters
----------
tuple_to_result : callable
Callable that returns an object of the type returned by the function
being wrapped (e.g. the namedtuple or dataclass returned by a
statistical test) provided the separate components (e.g. statistic,
pvalue).
default_axis : int, default: 0
The default value of the axis argument. Standard is 0 except when
backwards compatibility demands otherwise (e.g. `None`).
n_samples : int or callable, default: 1
The number of data samples accepted by the function
(e.g. `mannwhitneyu`), a callable that accepts a dictionary of
parameters passed into the function and returns the number of data
samples (e.g. `wilcoxon`), or `None` to indicate an arbitrary number
of samples (e.g. `kruskal`).
paired : {False, True}
Whether the function being wrapped treats the samples as paired (i.e.
corresponding elements of each sample should be considered as different
components of the same sample.)
result_to_tuple : callable, optional
Function that unpacks the results of the function being wrapped into
a tuple. This is essentially the inverse of `tuple_to_result`. Default
is `None`, which is appropriate for statistical tests that return a
statistic, pvalue tuple (rather than, e.g., a non-iterable datalass).
too_small : int, default: 0
The largest unnacceptably small sample for the function being wrapped.
For example, some functions require samples of size two or more or they
raise an error. This argument prevents the error from being raised when
input is not 1D and instead places a NaN in the corresponding element
of the result.
n_outputs : int or callable, default: 2
The number of outputs produced by the function given 1d sample(s). For
example, hypothesis tests that return a namedtuple or result object
with attributes ``statistic`` and ``pvalue`` use the default
``n_outputs=2``; summary statistics with scalar output use
``n_outputs=1``. Alternatively, may be a callable that accepts a
dictionary of arguments passed into the wrapped function and returns
the number of outputs corresponding with those arguments.
kwd_samples : sequence, default: []
The names of keyword parameters that should be treated as samples. For
example, `gmean` accepts as its first argument a sample `a` but
also `weights` as a fourth, optional keyword argument. In this case, we
use `n_samples=1` and kwd_samples=['weights'].
override : dict, default: {'vectorization': False, 'nan_propagation': True}
Pass a dictionary with ``'vectorization': True`` to ensure that the
decorator overrides the function's behavior for multimensional input.
Use ``'nan_propagation': False`` to ensure that the decorator does not
override the function's behavior for ``nan_policy='propagate'``.
(See `scipy.stats.mode`, for example.)
"""
# Specify which existing behaviors the decorator must override
temp = override or {}
override = {'vectorization': False,
'nan_propagation': True}
override.update(temp)
if result_to_tuple is None:
def result_to_tuple(res):
return res
def is_too_small(samples):
for sample in samples:
if len(sample) <= too_small:
return True
return False
def axis_nan_policy_decorator(hypotest_fun_in):
@wraps(hypotest_fun_in)
def axis_nan_policy_wrapper(*args, _no_deco=False, **kwds):
if _no_deco: # for testing, decorator does nothing
return hypotest_fun_in(*args, **kwds)
# We need to be flexible about whether position or keyword
# arguments are used, but we need to make sure users don't pass
# both for the same parameter. To complicate matters, some
# functions accept samples with *args, and some functions already
# accept `axis` and `nan_policy` as positional arguments.
# The strategy is to make sure that there is no duplication
# between `args` and `kwds`, combine the two into `kwds`, then
# the samples, `nan_policy`, and `axis` from `kwds`, as they are
# dealt with separately.
# Check for intersection between positional and keyword args
params = list(inspect.signature(hypotest_fun_in).parameters)
if n_samples is None:
# Give unique names to each positional sample argument
# Note that *args can't be provided as a keyword argument
params = [f"arg{i}" for i in range(len(args))] + params[1:]
# raise if there are too many positional args
maxarg = (np.inf if inspect.getfullargspec(hypotest_fun_in).varargs
else len(inspect.getfullargspec(hypotest_fun_in).args))
if len(args) > maxarg: # let the function raise the right error
hypotest_fun_in(*args, **kwds)
# raise if multiple values passed for same parameter
d_args = dict(zip(params, args))
intersection = set(d_args) & set(kwds)
if intersection: # let the function raise the right error
hypotest_fun_in(*args, **kwds)
# Consolidate other positional and keyword args into `kwds`
kwds.update(d_args)
# rename avoids UnboundLocalError
if callable(n_samples):
# Future refactoring idea: no need for callable n_samples.
# Just replace `n_samples` and `kwd_samples` with a single
# list of the names of all samples, and treat all of them
# as `kwd_samples` are treated below.
n_samp = n_samples(kwds)
else:
n_samp = n_samples or len(args)
# get the number of outputs
n_out = n_outputs # rename to avoid UnboundLocalError
if callable(n_out):
n_out = n_out(kwds)
# If necessary, rearrange function signature: accept other samples
# as positional args right after the first n_samp args
kwd_samp = [name for name in kwd_samples
if kwds.get(name, None) is not None]
n_kwd_samp = len(kwd_samp)
if not kwd_samp:
hypotest_fun_out = hypotest_fun_in
else:
def hypotest_fun_out(*samples, **kwds):
new_kwds = dict(zip(kwd_samp, samples[n_samp:]))
kwds.update(new_kwds)
return hypotest_fun_in(*samples[:n_samp], **kwds)
# Extract the things we need here
try: # if something is missing
samples = [np.atleast_1d(kwds.pop(param))
for param in (params[:n_samp] + kwd_samp)]
except KeyError: # let the function raise the right error
# might need to revisit this if required arg is not a "sample"
hypotest_fun_in(*args, **kwds)
vectorized = True if 'axis' in params else False
vectorized = vectorized and not override['vectorization']
axis = kwds.pop('axis', default_axis)
nan_policy = kwds.pop('nan_policy', 'propagate')
keepdims = kwds.pop("keepdims", False)
del args # avoid the possibility of passing both `args` and `kwds`
# convert masked arrays to regular arrays with sentinel values
samples, sentinel = _masked_arrays_2_sentinel_arrays(samples)
# standardize to always work along last axis
reduced_axes = axis
if axis is None:
if samples:
# when axis=None, take the maximum of all dimensions since
# all the dimensions are reduced.
n_dims = np.max([sample.ndim for sample in samples])
reduced_axes = tuple(range(n_dims))
samples = [np.asarray(sample.ravel()) for sample in samples]
else:
samples = _broadcast_arrays(samples, axis=axis)
axis = np.atleast_1d(axis)
n_axes = len(axis)
# move all axes in `axis` to the end to be raveled
samples = [np.moveaxis(sample, axis, range(-len(axis), 0))
for sample in samples]
shapes = [sample.shape for sample in samples]
# New shape is unchanged for all axes _not_ in `axis`
# At the end, we append the product of the shapes of the axes
# in `axis`. Appending -1 doesn't work for zero-size arrays!
new_shapes = [shape[:-n_axes] + (np.prod(shape[-n_axes:]),)
for shape in shapes]
samples = [sample.reshape(new_shape)
for sample, new_shape in zip(samples, new_shapes)]
axis = -1 # work over the last axis
# if axis is not needed, just handle nan_policy and return
ndims = np.array([sample.ndim for sample in samples])
if np.all(ndims <= 1):
# Addresses nan_policy == "raise"
if nan_policy != 'propagate' or override['nan_propagation']:
contains_nan = [_contains_nan(sample, nan_policy)[0]
for sample in samples]
else:
# Behave as though there are no NaNs (even if there are)
contains_nan = [False]*len(samples)
# Addresses nan_policy == "propagate"
if any(contains_nan) and (nan_policy == 'propagate'
and override['nan_propagation']):
res = np.full(n_out, np.nan)
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
# Addresses nan_policy == "omit"
if any(contains_nan) and nan_policy == 'omit':
# consider passing in contains_nan
samples = _remove_nans(samples, paired)
# ideally, this is what the behavior would be:
# if is_too_small(samples):
# return tuple_to_result(np.nan, np.nan)
# but some existing functions raise exceptions, and changing
# behavior of those would break backward compatibility.
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
res = hypotest_fun_out(*samples, **kwds)
res = result_to_tuple(res)
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
# check for empty input
# ideally, move this to the top, but some existing functions raise
# exceptions for empty input, so overriding it would break
# backward compatibility.
empty_output = _check_empty_inputs(samples, axis)
if empty_output is not None:
res = [empty_output.copy() for i in range(n_out)]
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
# otherwise, concatenate all samples along axis, remembering where
# each separate sample begins
lengths = np.array([sample.shape[axis] for sample in samples])
split_indices = np.cumsum(lengths)
x = _broadcast_concatenate(samples, axis)
# Addresses nan_policy == "raise"
if nan_policy != 'propagate' or override['nan_propagation']:
contains_nan, _ = _contains_nan(x, nan_policy)
else:
contains_nan = False # behave like there are no NaNs
if vectorized and not contains_nan and not sentinel:
res = hypotest_fun_out(*samples, axis=axis, **kwds)
res = result_to_tuple(res)
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
# Addresses nan_policy == "omit"
if contains_nan and nan_policy == 'omit':
def hypotest_fun(x):
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
samples = _remove_nans(samples, paired)
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples):
return np.full(n_out, np.nan)
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
# Addresses nan_policy == "propagate"
elif (contains_nan and nan_policy == 'propagate'
and override['nan_propagation']):
def hypotest_fun(x):
if np.isnan(x).any():
return np.full(n_out, np.nan)
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples):
return np.full(n_out, np.nan)
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
else:
def hypotest_fun(x):
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples):
return np.full(n_out, np.nan)
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
x = np.moveaxis(x, axis, 0)
res = np.apply_along_axis(hypotest_fun, axis=0, arr=x)
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
_axis_parameter_doc, _axis_parameter = _get_axis_params(default_axis)
doc = FunctionDoc(axis_nan_policy_wrapper)
parameter_names = [param.name for param in doc['Parameters']]
if 'axis' in parameter_names:
doc['Parameters'][parameter_names.index('axis')] = (
_axis_parameter_doc)
else:
doc['Parameters'].append(_axis_parameter_doc)
if 'nan_policy' in parameter_names:
doc['Parameters'][parameter_names.index('nan_policy')] = (
_nan_policy_parameter_doc)
else:
doc['Parameters'].append(_nan_policy_parameter_doc)
if 'keepdims' in parameter_names:
doc['Parameters'][parameter_names.index('keepdims')] = (
_keepdims_parameter_doc)
else:
doc['Parameters'].append(_keepdims_parameter_doc)
doc['Notes'] += _standard_note_addition
doc = str(doc).split("\n", 1)[1] # remove signature
axis_nan_policy_wrapper.__doc__ = str(doc)
sig = inspect.signature(axis_nan_policy_wrapper)
parameters = sig.parameters
parameter_list = list(parameters.values())
if 'axis' not in parameters:
parameter_list.append(_axis_parameter)
if 'nan_policy' not in parameters:
parameter_list.append(_nan_policy_parameter)
if 'keepdims' not in parameters:
parameter_list.append(_keepdims_parameter)
sig = sig.replace(parameters=parameter_list)
axis_nan_policy_wrapper.__signature__ = sig
return axis_nan_policy_wrapper
return axis_nan_policy_decorator
| 28,384
| 43.984152
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_multivariate.py
|
#
# Author: Joris Vankerschaver 2013
#
import math
import numpy as np
from numpy import asarray_chkfinite, asarray
from numpy.lib import NumpyVersion
import scipy.linalg
from scipy._lib import doccer
from scipy.special import (gammaln, psi, multigammaln, xlogy, entr, betaln,
ive, loggamma)
from scipy._lib._util import check_random_state, _lazywhere
from scipy.linalg.blas import drot
from scipy.linalg._misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
from ._continuous_distns import norm
from ._discrete_distns import binom
from . import _mvn, _covariance, _rcont
from ._qmvnt import _qmvt
from ._morestats import directional_stats
from scipy.optimize import root_scalar
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'dirichlet_multinomial',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation',
'unitary_group',
'multivariate_t',
'multivariate_hypergeom',
'random_table',
'uniform_direction',
'vonmises_fisher']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
seed : {None, int, np.random.RandomState, np.random.Generator}, optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD:
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
self._M = np.asarray(M)
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
msg = "The input matrix must be symmetric positive semidefinite."
raise ValueError(msg)
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
msg = ("When `allow_singular is False`, the input matrix must be "
"symmetric positive definite.")
raise np.linalg.LinAlgError(msg)
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Save the eigenvector basis, and tolerance for testing support
self.eps = 1e3*eps
self.V = u[:, s <= eps]
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize attributes to be lazily computed.
self._pinv = None
def _support_mask(self, x):
"""
Check whether x lies in the support of the distribution.
"""
residual = np.linalg.norm(x @ self.V, axis=-1)
in_support = residual < self.eps
return in_support
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic:
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super().__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the Generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen:
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, default: ``[0]``
Mean of the distribution.
cov : array_like or `Covariance`, default: ``[1]``
Symmetric positive (semi)definite covariance matrix of the distribution.
allow_singular : bool, default: ``False``
Whether to allow a singular covariance matrix. This is ignored if `cov` is
a `Covariance` object.
"""
_mvn_doc_callparams_note = """\
Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, a two-dimensional array_like,
or a `Covariance` object.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
pdf(x, mean=None, cov=1, allow_singular=False)
Probability density function.
logpdf(x, mean=None, cov=1, allow_singular=False)
Log of the probability density function.
cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5, lower_limit=None) # noqa
Cumulative distribution function.
logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)
Log of the cumulative distribution function.
rvs(mean=None, cov=1, size=1, random_state=None)
Draw random samples from a multivariate normal distribution.
entropy()
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` may be an instance of a subclass of
`Covariance`, e.g. `scipy.stats.CovViaPrecision`. If so, `allow_singular`
is ignored.
Otherwise, `cov` must be a symmetric positive semidefinite
matrix when `allow_singular` is True; it must be (strictly) positive
definite when `allow_singular` is False.
Symmetry is not checked; only the lower triangular portion is used.
The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
:math:`k` the rank of :math:`\Sigma`. In case of singular :math:`\Sigma`,
SciPy extends this definition according to [1]_.
.. versionadded:: 0.14.0
References
----------
.. [1] Multivariate Normal Distribution - Degenerate Case, Wikipedia,
https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Degenerate_case
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
>>> plt.show()
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
>>> rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
>>> # Frozen object with the same methods but holding the given
>>> # mean and covariance fixed.
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, mean, cov, allow_singular=True):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
if isinstance(cov, _covariance.Covariance):
return self._process_parameters_Covariance(mean, cov)
else:
# Before `Covariance` classes were introduced,
# `multivariate_normal` accepted plain arrays as `cov` and used the
# following input validation. To avoid disturbing the behavior of
# `multivariate_normal` when plain arrays are used, we use the
# original input validation here.
dim, mean, cov = self._process_parameters_psd(None, mean, cov)
# After input validation, some methods then processed the arrays
# with a `_PSD` object and used that to perform computation.
# To avoid branching statements in each method depending on whether
# `cov` is an array or `Covariance` object, we always process the
# array with `_PSD`, and then use wrapper that satisfies the
# `Covariance` interface, `CovViaPSD`.
psd = _PSD(cov, allow_singular=allow_singular)
cov_object = _covariance.CovViaPSD(psd)
return dim, mean, cov_object
def _process_parameters_Covariance(self, mean, cov):
dim = cov.shape[-1]
mean = np.array([0.]) if mean is None else mean
message = (f"`cov` represents a covariance matrix in {dim} dimensions,"
f"and so `mean` must be broadcastable to shape {(dim,)}")
try:
mean = np.broadcast_to(mean, dim)
except ValueError as e:
raise ValueError(message) from e
return dim, mean, cov
def _process_parameters_psd(self, dim, mean, cov):
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be "
"a scalar.")
# Check input sizes and return full arrays for mean and cov if
# necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean = mean.reshape(1)
cov = cov.reshape(1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." %
dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, cov_object):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
cov_object : Covariance
An object representing the Covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_cov, rank = cov_object.log_pdet, cov_object.rank
dev = x - mean
if dev.ndim > 1:
log_det_cov = log_det_cov[..., np.newaxis]
rank = rank[..., np.newaxis]
maha = np.sum(np.square(cov_object.whiten(dev)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
params = self._process_parameters(mean, cov, allow_singular)
dim, mean, cov_object = params
x = self._process_quantiles(x, dim)
out = self._logpdf(x, mean, cov_object)
if np.any(cov_object.rank < dim):
out_of_bounds = ~cov_object._support_mask(x-mean)
out[out_of_bounds] = -np.inf
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
params = self._process_parameters(mean, cov, allow_singular)
dim, mean, cov_object = params
x = self._process_quantiles(x, dim)
out = np.exp(self._logpdf(x, mean, cov_object))
if np.any(cov_object.rank < dim):
out_of_bounds = ~cov_object._support_mask(x-mean)
out[out_of_bounds] = 0.0
return _squeeze_output(out)
def _cdf(self, x, mean, cov, maxpts, abseps, releps, lower_limit):
"""Multivariate normal cumulative distribution function.
Parameters
----------
x : ndarray
Points at which to evaluate the cumulative distribution function.
mean : ndarray
Mean of the distribution
cov : array_like
Covariance matrix of the distribution
maxpts : integer
The maximum number of points to use for integration
abseps : float
Absolute error tolerance
releps : float
Relative error tolerance
lower_limit : array_like, optional
Lower limit of integration of the cumulative distribution function.
Default is negative infinity. Must be broadcastable with `x`.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'cdf' instead.
.. versionadded:: 1.0.0
"""
lower = (np.full(mean.shape, -np.inf)
if lower_limit is None else lower_limit)
# In 2d, _mvn.mvnun accepts input in which `lower` bound elements
# are greater than `x`. Not so in other dimensions. Fix this by
# ensuring that lower bounds are indeed lower when passed, then
# set signs of resulting CDF manually.
b, a = np.broadcast_arrays(x, lower)
i_swap = b < a
signs = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative
a, b = a.copy(), b.copy()
a[i_swap], b[i_swap] = b[i_swap], a[i_swap]
n = x.shape[-1]
limits = np.concatenate((a, b), axis=-1)
# mvnun expects 1-d arguments, so process points sequentially
def func1d(limits):
return _mvn.mvnun(limits[:n], limits[n:], mean, cov,
maxpts, abseps, releps)[0]
out = np.apply_along_axis(func1d, -1, limits) * signs
return _squeeze_output(out)
def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5, *, lower_limit=None):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
lower_limit : array_like, optional
Lower limit of integration of the cumulative distribution function.
Default is negative infinity. Must be broadcastable with `x`.
Returns
-------
cdf : ndarray or scalar
Log of the cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
params = self._process_parameters(mean, cov, allow_singular)
dim, mean, cov_object = params
cov = cov_object.covariance
x = self._process_quantiles(x, dim)
if not maxpts:
maxpts = 1000000 * dim
cdf = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit)
# the log of a negative real is complex, and cdf can be negative
# if lower limit is greater than upper limit
cdf = cdf + 0j if np.any(cdf < 0) else cdf
out = np.log(cdf)
return out
def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5, *, lower_limit=None):
"""Multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
lower_limit : array_like, optional
Lower limit of integration of the cumulative distribution function.
Default is negative infinity. Must be broadcastable with `x`.
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
params = self._process_parameters(mean, cov, allow_singular)
dim, mean, cov_object = params
cov = cov_object.covariance
x = self._process_quantiles(x, dim)
if not maxpts:
maxpts = 1000000 * dim
out = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit)
return out
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov_object = self._process_parameters(mean, cov)
random_state = self._get_random_state(random_state)
if isinstance(cov_object, _covariance.CovViaPSD):
cov = cov_object.covariance
out = random_state.multivariate_normal(mean, cov, size)
out = _squeeze_output(out)
else:
size = size or tuple()
if not np.iterable(size):
size = (size,)
shape = tuple(size) + (cov_object.shape[-1],)
x = random_state.normal(size=shape)
out = mean + cov_object.colorize(x)
return out
def entropy(self, mean=None, cov=1):
"""Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov_object = self._process_parameters(mean, cov)
return 0.5 * (cov_object.rank * (_LOG_2PI + 1) + cov_object.log_pdet)
def fit(self, x):
"""Fit a multivariate normal distribution to data.
Parameters
----------
x : ndarray (m, n)
Data the distribution is fitted to. Must have two axes.
The first axis of length `m` represents the number of vectors
the distribution is fitted to. The second axis of length `n`
determines the dimensionality of the fitted distribution.
Returns
-------
mean : ndarray (n, )
Maximum likelihood estimate of the mean vector
cov : ndarray (n, n)
Maximum likelihood estimate of the covariance matrix
"""
# input validation
x = np.asarray(x)
if x.ndim != 2:
raise ValueError("`x` must be two-dimensional.")
n_vectors, dim = x.shape
# parameter estimation
# reference: https://home.ttic.edu/~shubhendu/Slides/Estimation.pdf
mean = x.mean(axis=0)
centered_data = x - mean
cov = centered_data.T @ centered_data / n_vectors
return mean, cov
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,
maxpts=None, abseps=1e-5, releps=1e-5):
"""Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, default: ``[0]``
Mean of the distribution.
cov : array_like, default: ``[1]``
Symmetric positive (semi)definite covariance matrix of the
distribution.
allow_singular : bool, default: ``False``
Whether to allow a singular covariance matrix.
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
maxpts : integer, optional
The maximum number of points to use for integration of the
cumulative distribution function (default `1000000*dim`)
abseps : float, optional
Absolute error tolerance for the cumulative distribution function
(default 1e-5)
releps : float, optional
Relative error tolerance for the cumulative distribution function
(default 1e-5)
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov_object = (
self._dist._process_parameters(mean, cov, allow_singular))
self.allow_singular = allow_singular or self.cov_object._allow_singular
if not maxpts:
maxpts = 1000000 * self.dim
self.maxpts = maxpts
self.abseps = abseps
self.releps = releps
@property
def cov(self):
return self.cov_object.covariance
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_object)
if np.any(self.cov_object.rank < self.dim):
out_of_bounds = ~self.cov_object._support_mask(x-self.mean)
out[out_of_bounds] = -np.inf
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def logcdf(self, x, *, lower_limit=None):
cdf = self.cdf(x, lower_limit=lower_limit)
# the log of a negative real is complex, and cdf can be negative
# if lower limit is greater than upper limit
cdf = cdf + 0j if np.any(cdf < 0) else cdf
out = np.log(cdf)
return out
def cdf(self, x, *, lower_limit=None):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._cdf(x, self.mean, self.cov_object.covariance,
self.maxpts, self.abseps, self.releps,
lower_limit)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov_object, size, random_state)
def entropy(self):
"""Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_object.log_pdet
rank = self.cov_object.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = """\
If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
pdf(X, mean=None, rowcov=1, colcov=1)
Probability density function.
logpdf(X, mean=None, rowcov=1, colcov=1)
Log of the probability density function.
rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)
Draw random samples.
entropy(rowcol=1, colcov=1)
Differential entropy.
Parameters
----------
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> import numpy as np
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
>>> rv = matrix_normal(mean=None, rowcov=1, colcov=1)
>>> # Frozen object with the same methods but holding the given
>>> # mean and covariance fixed.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the "
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the "
"same number of columns.")
else:
mean = np.zeros((numrows, numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""Log of the matrix normal probability density function.
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.moveaxis(X-mean, -1, 0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
# We aren't generating standard normal variates with size=(size,
# dims[0], dims[1]) directly to ensure random variates remain backwards
# compatible. See https://github.com/scipy/scipy/pull/12312 for more
# details.
std_norm = random_state.standard_normal(
size=(dims[1], size, dims[0])
).transpose(1, 2, 0)
out = mean + np.einsum('jp,ipq,kq->ijk',
rowchol, std_norm, colchol,
optimize=True)
if size == 1:
out = out.reshape(mean.shape)
return out
def entropy(self, rowcov=1, colcov=1):
"""Log of the matrix normal probability density function.
Parameters
----------
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
Returns
-------
entropy : float
Entropy of the distribution
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dummy_mean = np.zeros((rowcov.shape[0], colcov.shape[0]))
dims, _, rowcov, colcov = self._process_parameters(dummy_mean,
rowcov,
colcov)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
return self._entropy(dims, rowpsd.log_pdet, colpsd.log_pdet)
def _entropy(self, dims, row_cov_logdet, col_cov_logdet):
n, p = dims
return (0.5 * n * p * (1 + _LOG_2PI) + 0.5 * p * row_cov_logdet +
0.5 * n * col_cov_logdet)
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
"""
Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
def entropy(self):
return self._dist._entropy(self.dims, self.rowpsd.log_pdet,
self.colpsd.log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'entropy']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = {}.".format(alpha.shape))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = {} "
"and x.shape = {}.".format(alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) < 0:
raise ValueError("Each entry in 'x' must be greater than or equal "
"to zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
# Check x_i > 0 or alpha_i > 1
xeq0 = (x == 0)
alphalt1 = (alpha < 1)
if x.shape != alpha.shape:
alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape)
chk = np.logical_and(xeq0, alphalt1)
if np.sum(chk):
raise ValueError("Each entry in 'x' must be greater than zero if its "
"alpha is less than one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""Internal helper function to compute the log of the useful quotient.
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}
{\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""A Dirichlet random variable.
The ``alpha`` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
pdf(x, alpha)
Probability density function.
logpdf(x, alpha)
Log of the probability density function.
rvs(alpha, size=1, random_state=None)
Draw random samples from a Dirichlet distribution.
mean(alpha)
The mean of the Dirichlet distribution
var(alpha)
The variance of the Dirichlet distribution
cov(alpha)
The covariance of the Dirichlet distribution
entropy(alpha)
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i = 1
where :math:`0 < x_i < 1`.
If the quantiles don't lie within the simplex, a ValueError is raised.
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the `dirichlet` interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import dirichlet
Generate a dirichlet random variable
>>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles
>>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters
>>> dirichlet.pdf(quantiles, alpha)
0.2843831684937255
The same PDF but following a log scale
>>> dirichlet.logpdf(quantiles, alpha)
-1.2574327653159187
Once we specify the dirichlet distribution
we can then calculate quantities of interest
>>> dirichlet.mean(alpha) # get the mean of the distribution
array([0.01960784, 0.24509804, 0.73529412])
>>> dirichlet.var(alpha) # get variance
array([0.00089829, 0.00864603, 0.00909517])
>>> dirichlet.entropy(alpha) # calculate the differential entropy
-4.3280162474082715
We can also return random samples from the distribution
>>> dirichlet.rvs(alpha, size=1, random_state=1)
array([[0.00766178, 0.24670518, 0.74563305]])
>>> dirichlet.rvs(alpha, size=2, random_state=2)
array([[0.01639427, 0.1292273 , 0.85437844],
[0.00156917, 0.19033695, 0.80809388]])
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
>>> rv = dirichlet(alpha)
>>> # Frozen object with the same methods but holding the given
>>> # concentration parameters fixed.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0)
def logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""Mean of the Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : ndarray or scalar
Mean of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""Variance of the Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : ndarray or scalar
Variance of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return _squeeze_output(out)
def cov(self, alpha):
"""Covariance matrix of the Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
a = alpha / alpha0
cov = (np.diag(a) - np.outer(a, a)) / (alpha0 + 1)
return _squeeze_output(cov)
def entropy(self, alpha):
"""
Differential entropy of the Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def cov(self):
return self._dist.cov(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'cov', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix). These arguments must satisfy the relationship
``df > scale.ndim - 1``, but see notes on using the `rvs` method with
``df < scale.ndim``.
Methods
-------
pdf(x, df, scale)
Probability density function.
logpdf(x, df, scale)
Log of the probability density function.
rvs(df, scale, size=1, random_state=None)
Draw random samples from a Wishart distribution.
entropy()
Compute the differential entropy of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Raises
------
scipy.linalg.LinAlgError
If the scale matrix `scale` is not positive definite.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported. Symmetry is not checked; only the lower triangular
portion is used.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
The algorithm [2]_ implemented by the `rvs` method may
produce numerically singular matrices with :math:`p - 1 < \nu < p`; the
user may wish to check for this condition and generate replacement samples
as necessary.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
>>> plt.show()
The input quantiles can be any shape of array, as long as the last
axis labels the components.
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
>>> rv = wishart(df=1, scale=1)
>>> # Frozen object with the same methods but holding the given
>>> # degrees of freedom and scale fixed.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis, np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df <= dim - 1:
raise ValueError("Degrees of freedom must be greater than the "
"dimension of scale matrix minus 1.")
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be {}, got {}.'.format((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""Log of the Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.empty(x.shape[-1])
scale_inv_x = np.empty(x.shape)
tr_scale_inv_x = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:, :, i])
scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])
tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""Mode of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) +
shape[::-1]).T)
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None, None, None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from a Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See Also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf', 'potri'), (a1,))
triu_rows, triu_cols = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_rows, triu_cols] = a1[index][triu_cols, triu_rows]
return a1
class invwishart_gen(wishart_gen):
r"""An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
pdf(x, df, scale)
Probability density function.
logpdf(x, df, scale)
Log of the probability density function.
rvs(df, scale, size=1, random_state=None)
Draw random samples from an inverse Wishart distribution.
entropy(df, scale)
Differential entropy of the distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Raises
------
scipy.linalg.LinAlgError
If the scale matrix `scale` is not positive definite.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported. Symmetry is not checked; only the lower triangular
portion is used.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications
in Statistics - Simulation and Computation, vol. 14.2, pp.511-514,
1985.
.. [3] Gupta, M. and Srivastava, S. "Parametric Bayesian Estimation of
Differential Entropy and Relative Entropy". Entropy 12, 818 - 843.
2010.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
>>> plt.show()
The input quantiles can be any shape of array, as long as the last
axis labels the components.
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
>>> rv = invwishart(df=1, scale=1)
>>> # Frozen object with the same methods but holding the given
>>> # degrees of freedom and scale fixed.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.empty(x.shape[-1])
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""Mean of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""Variance of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""Variance of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super()._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
# reference: eq. (17) from ref. 3
psi_eval_points = [0.5 * (df - dim + i) for i in range(1, dim + 1)]
psi_eval_points = np.asarray(psi_eval_points)
return multigammaln(0.5 * df, dim) + 0.5 * dim * df + \
0.5 * (dim + 1) * (log_det_scale - _LOG_2) - \
0.5 * (df + dim + 1) * \
psi(psi_eval_points, out=psi_eval_points).sum()
def entropy(self, df, scale):
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = """\
`n` should be a nonnegative integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""A multinomial random variable.
Methods
-------
pmf(x, n, p)
Probability mass function.
logpmf(x, n, p)
Log of the probability mass function.
rvs(n, p, size=1, random_state=None)
Draw random samples from a multinomial distribution.
entropy(n, p)
Compute the entropy of the multinomial distribution.
cov(n, p)
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
>>> rv = multinomial(n=7, p=[.3, .7])
>>> # Frozen object with the same methods but holding the given
>>> # degrees of freedom and scale fixed.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.Generator.multinomial : Sampling from the multinomial distribution.
scipy.stats.multivariate_hypergeom :
The multivariate hypergeometric distribution.
""" # noqa: E501
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p, eps=1e-15):
"""Returns: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p_adjusted = 1. - p[..., :-1].sum(axis=-1)
i_adjusted = np.abs(p_adjusted) > eps
p[i_adjusted, -1] = p_adjusted[i_adjusted]
# true for bad p
pcond = np.any(p < 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int_, copy=True)
# true for bad n
ncond = n < 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""Returns: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int_)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." %
(xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, -np.inf)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.nan)
def pmf(self, x, n, p):
"""Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""Mean of the Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.nan)
def cov(self, n, p):
"""Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[..., i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""A Special Orthogonal matrix (SO(N)) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(N)) with a determinant of +1.
The `dim` keyword specifies the dimension N.
Methods
-------
rvs(dim=None, size=1, random_state=None)
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, np.random.RandomState, np.random.Generator}, optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Notes
-----
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(N)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`. For a random rotation in three
dimensions, see `scipy.spatial.transform.Rotation.random`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
Alternatively, the object may be called (as a function) to fix the `dim`
parameter, returning a "frozen" special_ortho_group random variable:
>>> rv = special_ortho_group(5)
>>> # Frozen object with the same methods but holding the
>>> # dimension parameter fixed.
See Also
--------
ortho_group, scipy.spatial.transform.Rotation.random
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
size = (size,) if size > 1 else ()
dim = self._process_parameters(dim)
# H represents a (dim, dim) matrix, while D represents the diagonal of
# a (dim, dim) diagonal matrix. The algorithm that follows is
# broadcasted on the leading shape in `size` to vectorize along
# samples.
H = np.empty(size + (dim, dim))
H[..., :, :] = np.eye(dim)
D = np.empty(size + (dim,))
for n in range(dim-1):
# x is a vector with length dim-n, xrow and xcol are views of it as
# a row vector and column vector respectively. It's important they
# are views and not copies because we are going to modify x
# in-place.
x = random_state.normal(size=size + (dim-n,))
xrow = x[..., None, :]
xcol = x[..., :, None]
# This is the squared norm of x, without vectorization it would be
# dot(x, x), to have proper broadcasting we use matmul and squeeze
# out (convert to scalar) the resulting 1x1 matrix
norm2 = np.matmul(xrow, xcol).squeeze((-2, -1))
x0 = x[..., 0].copy()
D[..., n] = np.where(x0 != 0, np.sign(x0), 1)
x[..., 0] += D[..., n]*np.sqrt(norm2)
# In renormalizing x we have to append an additional axis with
# [..., None] to broadcast the scalar against the vector x
x /= np.sqrt((norm2 - x0**2 + x[..., 0]**2) / 2.)[..., None]
# Householder transformation, without vectorization the RHS can be
# written as outer(H @ x, x) (apart from the slicing)
H[..., :, n:] -= np.matmul(H[..., :, n:], xcol) * xrow
D[..., -1] = (-1)**(dim-1)*D[..., :-1].prod(axis=-1)
# Without vectorization this could be written as H = diag(D) @ H,
# left-multiplication by a diagonal matrix amounts to multiplying each
# row of H by an element of the diagonal, so we add a dummy axis for
# the column index
H *= D[..., :, None]
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""An Orthogonal matrix (O(N)) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
rvs(dim=None, size=1, random_state=None)
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, np.random.RandomState, np.random.Generator}, optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Notes
-----
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
Alternatively, the object may be called (as a function) to fix the `dim`
parameter, returning a "frozen" ortho_group random variable:
>>> rv = ortho_group(5)
>>> # Frozen object with the same methods but holding the
>>> # dimension parameter fixed.
See Also
--------
special_ortho_group
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen O(N) distribution.
See `ortho_group_frozen` for more information.
"""
return ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1 and NumpyVersion(np.__version__) < '1.22.0':
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
size = (size,) if size > 1 else ()
z = random_state.normal(size=size + (dim, dim))
q, r = np.linalg.qr(z)
# The last two dimensions are the rows and columns of R matrices.
# Extract the diagonals. Note that this eliminates a dimension.
d = r.diagonal(offset=0, axis1=-2, axis2=-1)
# Add back a dimension for proper broadcasting: we're dividing
# each row of each R matrix by the diagonal of the R matrix.
q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly
return q
ortho_group = ortho_group_gen()
class ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""Create a frozen O(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import ortho_group
>>> g = ortho_group(5)
>>> x = g.rvs()
"""
self._dist = ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class random_correlation_gen(multi_rv_generic):
r"""A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
rvs(eigs=None, random_state=None)
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
Notes
-----
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> import numpy as np
>>> from scipy.stats import random_correlation
>>> rng = np.random.default_rng()
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng)
>>> x
array([[ 1. , -0.02423399, 0.03130519, 0.4946965 ],
[-0.02423399, 1. , 0.20334736, 0.04039817],
[ 0.03130519, 0.20334736, 1. , 0.02694275],
[ 0.4946965 , 0.04039817, 0.02694275, 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7):
"""Create a frozen random correlation matrix.
See `random_correlation_frozen` for more information.
"""
return random_correlation_frozen(eigs, seed=seed, tol=tol,
diag_tol=diag_tol)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length "
"greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form
[ c s ; -s c ]; the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and
m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i, i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""Draw random correlation matrices.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
class random_correlation_frozen(multi_rv_frozen):
def __init__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7):
"""Create a frozen random correlation matrix distribution.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
self._dist = random_correlation_gen(seed)
self.tol = tol
self.diag_tol = diag_tol
_, self.eigs = self._dist._process_parameters(eigs, tol=self.tol)
def rvs(self, random_state=None):
return self._dist.rvs(self.eigs, random_state=random_state,
tol=self.tol, diag_tol=self.diag_tol)
class unitary_group_gen(multi_rv_generic):
r"""A matrix-valued U(N) random variable.
Return a random unitary matrix.
The `dim` keyword specifies the dimension N.
Methods
-------
rvs(dim=None, size=1, random_state=None)
Draw random samples from U(N).
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, np.random.RandomState, np.random.Generator}, optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Notes
-----
This class is similar to `ortho_group`.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import unitary_group
>>> x = unitary_group.rvs(3)
>>> np.dot(x, x.conj().T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
This generates one random matrix from U(3). The dot product confirms that
it is unitary up to machine precision.
Alternatively, the object may be called (as a function) to fix the `dim`
parameter, return a "frozen" unitary_group random variable:
>>> rv = unitary_group(5)
See Also
--------
ortho_group
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen (U(N)) n-dimensional unitary matrix distribution.
See `unitary_group_frozen` for more information.
"""
return unitary_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from U(N).
Parameters
----------
dim : integer
Dimension of space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1 and NumpyVersion(np.__version__) < '1.22.0':
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
size = (size,) if size > 1 else ()
z = 1/math.sqrt(2)*(random_state.normal(size=size + (dim, dim)) +
1j*random_state.normal(size=size + (dim, dim)))
q, r = np.linalg.qr(z)
# The last two dimensions are the rows and columns of R matrices.
# Extract the diagonals. Note that this eliminates a dimension.
d = r.diagonal(offset=0, axis1=-2, axis2=-1)
# Add back a dimension for proper broadcasting: we're dividing
# each row of each R matrix by the diagonal of the R matrix.
q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly
return q
unitary_group = unitary_group_gen()
class unitary_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""Create a frozen (U(N)) n-dimensional unitary matrix distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import unitary_group
>>> x = unitary_group(3)
>>> x.rvs()
"""
self._dist = unitary_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
_mvt_doc_default_callparams = """\
loc : array_like, optional
Location of the distribution. (default ``0``)
shape : array_like, optional
Positive semidefinite matrix of the distribution. (default ``1``)
df : float, optional
Degrees of freedom of the distribution; must be greater than zero.
If ``np.inf`` then results are multivariate normal. The default is ``1``.
allow_singular : bool, optional
Whether to allow a singular matrix. (default ``False``)
"""
_mvt_doc_callparams_note = """\
Setting the parameter `loc` to ``None`` is equivalent to having `loc`
be the zero-vector. The parameter `shape` can be a scalar, in which case
the shape matrix is the identity times that value, a vector of
diagonal entries for the shape matrix, or a two-dimensional array_like.
"""
_mvt_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
mvt_docdict_params = {
'_mvt_doc_default_callparams': _mvt_doc_default_callparams,
'_mvt_doc_callparams_note': _mvt_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvt_docdict_noparams = {
'_mvt_doc_default_callparams': "",
'_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_t_gen(multi_rv_generic):
r"""A multivariate t-distributed random variable.
The `loc` parameter specifies the location. The `shape` parameter specifies
the positive semidefinite shape matrix. The `df` parameter specifies the
degrees of freedom.
In addition to calling the methods below, the object itself may be called
as a function to fix the location, shape matrix, and degrees of freedom
parameters, returning a "frozen" multivariate t-distribution random.
Methods
-------
pdf(x, loc=None, shape=1, df=1, allow_singular=False)
Probability density function.
logpdf(x, loc=None, shape=1, df=1, allow_singular=False)
Log of the probability density function.
cdf(x, loc=None, shape=1, df=1, allow_singular=False, *,
maxpts=None, lower_limit=None, random_state=None)
Cumulative distribution function.
rvs(loc=None, shape=1, df=1, size=1, random_state=None)
Draw random samples from a multivariate t-distribution.
entropy(loc=None, shape=1, df=1)
Differential entropy of a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_mvt_doc_callparams_note)s
The matrix `shape` must be a (symmetric) positive semidefinite matrix. The
determinant and inverse of `shape` are computed as the pseudo-determinant
and pseudo-inverse, respectively, so that `shape` does not need to have
full rank.
The probability density function for `multivariate_t` is
.. math::
f(x) = \frac{\Gamma((\nu + p)/2)}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}}
\left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top}
\boldsymbol{\Sigma}^{-1}
(\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2},
where :math:`p` is the dimension of :math:`\mathbf{x}`,
:math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location,
:math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape
matrix, and :math:`\nu` is the degrees of freedom.
.. versionadded:: 1.6.0
References
----------
[1] Arellano-Valle et al. "Shannon Entropy and Mutual Information for
Multivariate Skew-Elliptical Distributions". Scandinavian Journal
of Statistics. Vol. 40, issue 1.
Examples
--------
The object may be called (as a function) to fix the `loc`, `shape`,
`df`, and `allow_singular` parameters, returning a "frozen"
multivariate_t random variable:
>>> import numpy as np
>>> from scipy.stats import multivariate_t
>>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2)
>>> # Frozen object with the same methods but holding the given location,
>>> # scale, and degrees of freedom fixed.
Create a contour plot of the PDF.
>>> import matplotlib.pyplot as plt
>>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01]
>>> pos = np.dstack((x, y))
>>> fig, ax = plt.subplots(1, 1)
>>> ax.set_aspect('equal')
>>> plt.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
"""Initialize a multivariate t-distributed random variable.
Parameters
----------
seed : Random state.
"""
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)
self._random_state = check_random_state(seed)
def __call__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t-distribution.
See `multivariate_t_frozen` for parameters.
"""
if df == np.inf:
return multivariate_normal_frozen(mean=loc, cov=shape,
allow_singular=allow_singular,
seed=seed)
return multivariate_t_frozen(loc=loc, shape=shape, df=df,
allow_singular=allow_singular, seed=seed)
def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):
"""Multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability density function.
%(_mvt_doc_default_callparams)s
Returns
-------
pdf : Probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.pdf(x, loc, shape, df)
0.00075713
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape, allow_singular=allow_singular)
logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,
dim, shape_info.rank)
return np.exp(logpdf)
def logpdf(self, x, loc=None, shape=1, df=1):
"""Log of the multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability density
function.
%(_mvt_doc_default_callparams)s
Returns
-------
logpdf : Log of the probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.logpdf(x, loc, shape, df)
-7.1859802
See Also
--------
pdf : Probability density function.
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape)
return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,
shape_info.rank)
def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):
"""Utility method `pdf`, `logpdf` for parameters.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability density
function.
loc : ndarray
Location of the distribution.
prec_U : ndarray
A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
of the shape matrix.
log_pdet : float
Logarithm of the determinant of the shape matrix.
df : float
Degrees of freedom of the distribution.
dim : int
Dimension of the quantiles x.
rank : int
Rank of the shape matrix.
Notes
-----
As this function does no argument checking, it should not be called
directly; use 'logpdf' instead.
"""
if df == np.inf:
return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)
dev = x - loc
maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)
t = 0.5 * (df + dim)
A = gammaln(t)
B = gammaln(0.5 * df)
C = dim/2. * np.log(df * np.pi)
D = 0.5 * log_pdet
E = -t * np.log(1 + (1./df) * maha)
return _squeeze_output(A - B - C - D + E)
def _cdf(self, x, loc, shape, df, dim, maxpts=None, lower_limit=None,
random_state=None):
# All of this - random state validation, maxpts, apply_along_axis,
# etc. needs to go in this private method unless we want
# frozen distribution's `cdf` method to duplicate it or call `cdf`,
# which would require re-processing parameters
if random_state is not None:
rng = check_random_state(random_state)
else:
rng = self._random_state
if not maxpts:
maxpts = 1000 * dim
x = self._process_quantiles(x, dim)
lower_limit = (np.full(loc.shape, -np.inf)
if lower_limit is None else lower_limit)
# remove the mean
x, lower_limit = x - loc, lower_limit - loc
b, a = np.broadcast_arrays(x, lower_limit)
i_swap = b < a
signs = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative
a, b = a.copy(), b.copy()
a[i_swap], b[i_swap] = b[i_swap], a[i_swap]
n = x.shape[-1]
limits = np.concatenate((a, b), axis=-1)
def func1d(limits):
a, b = limits[:n], limits[n:]
return _qmvt(maxpts, df, shape, a, b, rng)[0]
res = np.apply_along_axis(func1d, -1, limits) * signs
# Fixing the output shape for existing distributions is a separate
# issue. For now, let's keep this consistent with pdf.
return _squeeze_output(res)
def cdf(self, x, loc=None, shape=1, df=1, allow_singular=False, *,
maxpts=None, lower_limit=None, random_state=None):
"""Multivariate t-distribution cumulative distribution function.
Parameters
----------
x : array_like
Points at which to evaluate the cumulative distribution function.
%(_mvt_doc_default_callparams)s
maxpts : int, optional
Maximum number of points to use for integration. The default is
1000 times the number of dimensions.
lower_limit : array_like, optional
Lower limit of integration of the cumulative distribution function.
Default is negative infinity. Must be broadcastable with `x`.
%(_doc_random_state)s
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.cdf(x, loc, shape, df)
0.64798491
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
shape = _PSD(shape, allow_singular=allow_singular)._M
return self._cdf(x, loc, shape, df, dim, maxpts,
lower_limit, random_state)
def _entropy(self, dim, df=1, shape=1):
if df == np.inf:
return multivariate_normal(None, cov=shape).entropy()
shape_info = _PSD(shape)
shape_term = 0.5 * shape_info.log_pdet
def regular(dim, df):
halfsum = 0.5 * (dim + df)
half_df = 0.5 * df
return (
-gammaln(halfsum) + gammaln(half_df)
+ 0.5 * dim * np.log(df * np.pi) + halfsum
* (psi(halfsum) - psi(half_df))
+ shape_term
)
def asymptotic(dim, df):
# Formula from Wolfram Alpha:
# "asymptotic expansion -gammaln((m+d)/2) + gammaln(d/2) + (m*log(d*pi))/2
# + ((m+d)/2) * (digamma((m+d)/2) - digamma(d/2))"
return (
dim * norm._entropy() + dim / df
- dim * (dim - 2) * df**-2.0 / 4
+ dim**2 * (dim - 2) * df**-3.0 / 6
+ dim * (-3 * dim**3 + 8 * dim**2 - 8) * df**-4.0 / 24
+ dim**2 * (3 * dim**3 - 10 * dim**2 + 16) * df**-5.0 / 30
+ shape_term
)[()]
# preserves ~12 digits accuracy up to at least `dim=1e5`. See gh-18465.
threshold = dim * 100 * 4 / (np.log(dim) + 1)
return _lazywhere(df >= threshold, (dim, df), f=asymptotic, f2=regular)
def entropy(self, loc=None, shape=1, df=1):
"""Calculate the differential entropy of a multivariate
t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Returns
-------
h : float
Differential entropy
"""
dim, loc, shape, df = self._process_parameters(None, shape, df)
return self._entropy(dim, df, shape)
def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):
"""Draw random samples from a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `P`), where `P` is the
dimension of the random variable.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.rvs(loc, shape, df)
array([[0.93477495, 3.00408716]])
"""
# For implementation details, see equation (3):
#
# Hofert, "On Sampling from the Multivariatet Distribution", 2013
# http://rjournal.github.io/archive/2013-2/hofert.pdf
#
dim, loc, shape, df = self._process_parameters(loc, shape, df)
if random_state is not None:
rng = check_random_state(random_state)
else:
rng = self._random_state
if np.isinf(df):
x = np.ones(size)
else:
x = rng.chisquare(df, size=size) / df
z = rng.multivariate_normal(np.zeros(dim), shape, size=size)
samples = loc + z / np.sqrt(x)[..., None]
return _squeeze_output(samples)
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _process_parameters(self, loc, shape, df):
"""
Infer dimensionality from location array and shape matrix, handle
defaults, and ensure compatible dimensions.
"""
if loc is None and shape is None:
loc = np.asarray(0, dtype=float)
shape = np.asarray(1, dtype=float)
dim = 1
elif loc is None:
shape = np.asarray(shape, dtype=float)
if shape.ndim < 2:
dim = 1
else:
dim = shape.shape[0]
loc = np.zeros(dim)
elif shape is None:
loc = np.asarray(loc, dtype=float)
dim = loc.size
shape = np.eye(dim)
else:
shape = np.asarray(shape, dtype=float)
loc = np.asarray(loc, dtype=float)
dim = loc.size
if dim == 1:
loc = loc.reshape(1)
shape = shape.reshape(1, 1)
if loc.ndim != 1 or loc.shape[0] != dim:
raise ValueError("Array 'loc' must be a vector of length %d." %
dim)
if shape.ndim == 0:
shape = shape * np.eye(dim)
elif shape.ndim == 1:
shape = np.diag(shape)
elif shape.ndim == 2 and shape.shape != (dim, dim):
rows, cols = shape.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(shape.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'loc' is a vector of length %d.")
msg = msg % (str(shape.shape), len(loc))
raise ValueError(msg)
elif shape.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % shape.ndim)
# Process degrees of freedom.
if df is None:
df = 1
elif df <= 0:
raise ValueError("'df' must be greater than zero.")
elif np.isnan(df):
raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.")
return dim, loc, shape, df
class multivariate_t_frozen(multi_rv_frozen):
def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> import numpy as np
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803])
"""
self._dist = multivariate_t_gen(seed)
dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
self.shape_info = _PSD(shape, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
U = self.shape_info.U
log_pdet = self.shape_info.log_pdet
return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,
self.shape_info.rank)
def cdf(self, x, *, maxpts=None, lower_limit=None, random_state=None):
x = self._dist._process_quantiles(x, self.dim)
return self._dist._cdf(x, self.loc, self.shape, self.df, self.dim,
maxpts, lower_limit, random_state)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(loc=self.loc,
shape=self.shape,
df=self.df,
size=size,
random_state=random_state)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.shape)
multivariate_t = multivariate_t_gen()
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_t_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'cdf', 'entropy']:
method = multivariate_t_gen.__dict__[name]
method_frozen = multivariate_t_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvt_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)
_mhg_doc_default_callparams = """\
m : array_like
The number of each type of object in the population.
That is, :math:`m[i]` is the number of objects of
type :math:`i`.
n : array_like
The number of samples taken from the population.
"""
_mhg_doc_callparams_note = """\
`m` must be an array of positive integers. If the quantile
:math:`i` contains values out of the range :math:`[0, m_i]`
where :math:`m_i` is the number of objects of type :math:`i`
in the population or if the parameters are inconsistent with one
another (e.g. ``x.sum() != n``), methods return the appropriate
value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative
values, the result will contain ``nan`` there.
"""
_mhg_doc_frozen_callparams = ""
_mhg_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
mhg_docdict_params = {
'_doc_default_callparams': _mhg_doc_default_callparams,
'_doc_callparams_note': _mhg_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mhg_docdict_noparams = {
'_doc_default_callparams': _mhg_doc_frozen_callparams,
'_doc_callparams_note': _mhg_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_hypergeom_gen(multi_rv_generic):
r"""A multivariate hypergeometric random variable.
Methods
-------
pmf(x, m, n)
Probability mass function.
logpmf(x, m, n)
Log of the probability mass function.
rvs(m, n, size=1, random_state=None)
Draw random samples from a multivariate hypergeometric
distribution.
mean(m, n)
Mean of the multivariate hypergeometric distribution.
var(m, n)
Variance of the multivariate hypergeometric distribution.
cov(m, n)
Compute the covariance matrix of the multivariate
hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
The probability mass function for `multivariate_hypergeom` is
.. math::
P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1}
\binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad
(x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with }
\sum_{i=1}^k x_i = n
where :math:`m_i` are the number of objects of type :math:`i`, :math:`M`
is the total number of objects in the population (sum of all the
:math:`m_i`), and :math:`n` is the size of the sample to be taken
from the population.
.. versionadded:: 1.6.0
Examples
--------
To evaluate the probability mass function of the multivariate
hypergeometric distribution, with a dichotomous population of size
:math:`10` and :math:`20`, at a sample of size :math:`12` with
:math:`8` objects of the first type and :math:`4` objects of the
second type, use:
>>> from scipy.stats import multivariate_hypergeom
>>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12)
0.0025207176631464523
The `multivariate_hypergeom` distribution is identical to the
corresponding `hypergeom` distribution (tiny numerical differences
notwithstanding) when only two types (good and bad) of objects
are present in the population as in the example above. Consider
another example for a comparison with the hypergeometric distribution:
>>> from scipy.stats import hypergeom
>>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
0.4395604395604395
>>> hypergeom.pmf(k=3, M=15, n=4, N=10)
0.43956043956044005
The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs``
support broadcasting, under the convention that the vector parameters
(``x``, ``m``, and ``n``) are interpreted as if each row along the last
axis is a single object. For instance, we can combine the previous two
calls to `multivariate_hypergeom` as
>>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]],
... n=[12, 4])
array([0.00252072, 0.43956044])
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``m.shape[-1]``. For example:
>>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
array([[[ 1.05, -1.05],
[-1.05, 1.05]],
[[ 1.56, -1.56],
[-1.56, 1.56]]])
That is, ``result[0]`` is equal to
``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal
to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``.
Alternatively, the object may be called (as a function) to fix the `m`
and `n` parameters, returning a "frozen" multivariate hypergeometric
random variable.
>>> rv = multivariate_hypergeom(m=[10, 20], n=12)
>>> rv.pmf(x=[8, 4])
0.0025207176631464523
See Also
--------
scipy.stats.hypergeom : The hypergeometric distribution.
scipy.stats.multinomial : The multinomial distribution.
References
----------
.. [1] The Multivariate Hypergeometric Distribution,
http://www.randomservices.org/random/urn/MultiHypergeometric.html
.. [2] Thomas J. Sargent and John Stachurski, 2020,
Multivariate Hypergeometric Distribution
https://python.quantecon.org/_downloads/pdf/multi_hyper.pdf
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params)
def __call__(self, m, n, seed=None):
"""Create a frozen multivariate_hypergeom distribution.
See `multivariate_hypergeom_frozen` for more information.
"""
return multivariate_hypergeom_frozen(m, n, seed=seed)
def _process_parameters(self, m, n):
m = np.asarray(m)
n = np.asarray(n)
if m.size == 0:
m = m.astype(int)
if n.size == 0:
n = n.astype(int)
if not np.issubdtype(m.dtype, np.integer):
raise TypeError("'m' must an array of integers.")
if not np.issubdtype(n.dtype, np.integer):
raise TypeError("'n' must an array of integers.")
if m.ndim == 0:
raise ValueError("'m' must be an array with"
" at least one dimension.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
m, n = np.broadcast_arrays(m, n)
# check for empty arrays
if m.size != 0:
n = n[..., 0]
mcond = m < 0
M = m.sum(axis=-1)
ncond = (n < 0) | (n > M)
return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond
def _process_quantiles(self, x, M, m, n):
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.integer):
raise TypeError("'x' must an array of integers.")
if x.ndim == 0:
raise ValueError("'x' must be an array with"
" at least one dimension.")
if not x.shape[-1] == m.shape[-1]:
raise ValueError(f"Size of each quantile must be size of 'm': "
f"received {x.shape[-1]}, "
f"but expected {m.shape[-1]}.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
M = M[..., np.newaxis]
x, m, n, M = np.broadcast_arrays(x, m, n, M)
# check for empty arrays
if m.size != 0:
n, M = n[..., 0], M[..., 0]
xcond = (x < 0) | (x > m)
return (x, M, m, n, xcond,
np.any(xcond, axis=-1) | (x.sum(axis=-1) != n))
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
return bad_value
if result.ndim == 0:
return result[()]
return result
def _logpmf(self, x, M, m, n, mxcond, ncond):
# This equation of the pmf comes from the relation,
# n combine r = beta(n+1, 1) / beta(r+1, n-r+1)
num = np.zeros_like(m, dtype=np.float_)
den = np.zeros_like(n, dtype=np.float_)
m, x = m[~mxcond], x[~mxcond]
M, n = M[~ncond], n[~ncond]
num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1))
den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1))
num[mxcond] = np.nan
den[ncond] = np.nan
num = num.sum(axis=-1)
return num - den
def logpmf(self, x, m, n):
"""Log of the multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
M, m, n, mcond, ncond, mncond = self._process_parameters(m, n)
(x, M, m, n, xcond,
xcond_reduced) = self._process_quantiles(x, M, m, n)
mxcond = mcond | xcond
ncond = ncond | np.zeros(n.shape, dtype=np.bool_)
result = self._logpmf(x, M, m, n, mxcond, ncond)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, -np.inf)
# replace values bad for n or m; broadcast
# mncond to the right shape
mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_)
return self._checkresult(result, mncond_, np.nan)
def pmf(self, x, m, n):
"""Multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
out = np.exp(self.logpmf(x, m, n))
return out
def mean(self, m, n):
"""Mean of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : array_like or scalar
The mean of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0)
M = np.ma.masked_array(M, mask=cond)
mu = n*(m/M)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(mu.shape, dtype=np.bool_))
return self._checkresult(mu, mncond, np.nan)
def var(self, m, n):
"""Variance of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
array_like
The variances of the components of the distribution. This is
the diagonal of the covariance matrix of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = n * m/M * (M-m)/M * (M-n)/(M-1)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def cov(self, m, n):
"""Covariance matrix of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : array_like
The covariance matrix of the distribution
"""
# see [1]_ for the formula and [2]_ for implementation
# cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2)
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M = M[..., np.newaxis, np.newaxis]
n = n[..., np.newaxis, np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = (-n * (M-n)/(M-1) *
np.einsum("...i,...j->...ij", m, m) / (M**2))
# check for empty arrays
if m.size != 0:
M, n = M[..., 0, 0], n[..., 0, 0]
cond = cond[..., 0, 0]
dim = m.shape[-1]
# diagonal entries need to be computed differently
for i in range(dim):
output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i]))
output[..., i, i] = output[..., i, i] / (M-1)
output[..., i, i] = output[..., i, i] / (M**2)
if m.size != 0:
mncond = (mncond[..., np.newaxis, np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def rvs(self, m, n, size=None, random_state=None):
"""Draw random samples from a multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw. Default is ``None``, in which case a
single variate is returned as an array with shape ``m.shape``.
%(_doc_random_state)s
Returns
-------
rvs : array_like
Random variates of shape ``size`` or ``m.shape``
(if ``size=None``).
Notes
-----
%(_doc_callparams_note)s
Also note that NumPy's `multivariate_hypergeometric` sampler is not
used as it doesn't support broadcasting.
"""
M, m, n, _, _, _ = self._process_parameters(m, n)
random_state = self._get_random_state(random_state)
if size is not None and isinstance(size, int):
size = (size, )
if size is None:
rvs = np.empty(m.shape, dtype=m.dtype)
else:
rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype)
rem = M
# This sampler has been taken from numpy gh-13794
# https://github.com/numpy/numpy/pull/13794
for c in range(m.shape[-1] - 1):
rem = rem - m[..., c]
n0mask = n == 0
rvs[..., c] = (~n0mask *
random_state.hypergeometric(m[..., c],
rem + n0mask,
n + n0mask,
size=size))
n = n - rvs[..., c]
rvs[..., m.shape[-1] - 1] = n
return rvs
multivariate_hypergeom = multivariate_hypergeom_gen()
class multivariate_hypergeom_frozen(multi_rv_frozen):
def __init__(self, m, n, seed=None):
self._dist = multivariate_hypergeom_gen(seed)
(self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond) = self._dist._process_parameters(m, n)
# monkey patch self._dist
def _process_parameters(m, n):
return (self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond)
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.m, self.n)
def pmf(self, x):
return self._dist.pmf(x, self.m, self.n)
def mean(self):
return self._dist.mean(self.m, self.n)
def var(self):
return self._dist.var(self.m, self.n)
def cov(self):
return self._dist.cov(self.m, self.n)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.m, self.n,
size=size,
random_state=random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_hypergeom and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']:
method = multivariate_hypergeom_gen.__dict__[name]
method_frozen = multivariate_hypergeom_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, mhg_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
mhg_docdict_params)
class random_table_gen(multi_rv_generic):
r"""Contingency tables from independent samples with fixed marginal sums.
This is the distribution of random tables with given row and column vector
sums. This distribution represents the set of random tables under the null
hypothesis that rows and columns are independent. It is used in hypothesis
tests of independence.
Because of assumed independence, the expected frequency of each table
element can be computed from the row and column sums, so that the
distribution is completely determined by these two vectors.
Methods
-------
logpmf(x)
Log-probability of table `x` to occur in the distribution.
pmf(x)
Probability of table `x` to occur in the distribution.
mean(row, col)
Mean table.
rvs(row, col, size=None, method=None, random_state=None)
Draw random tables with given row and column vector sums.
Parameters
----------
%(_doc_row_col)s
%(_doc_random_state)s
Notes
-----
%(_doc_row_col_note)s
Random elements from the distribution are generated either with Boyett's
[1]_ or Patefield's algorithm [2]_. Boyett's algorithm has
O(N) time and space complexity, where N is the total sum of entries in the
table. Patefield's algorithm has O(K x log(N)) time complexity, where K is
the number of cells in the table and requires only a small constant work
space. By default, the `rvs` method selects the fastest algorithm based on
the input, but you can specify the algorithm with the keyword `method`.
Allowed values are "boyett" and "patefield".
.. versionadded:: 1.10.0
Examples
--------
>>> from scipy.stats import random_table
>>> row = [1, 5]
>>> col = [2, 3, 1]
>>> random_table.mean(row, col)
array([[0.33333333, 0.5 , 0.16666667],
[1.66666667, 2.5 , 0.83333333]])
Alternatively, the object may be called (as a function) to fix the row
and column vector sums, returning a "frozen" distribution.
>>> dist = random_table(row, col)
>>> dist.rvs(random_state=123)
array([[1., 0., 0.],
[1., 3., 1.]])
References
----------
.. [1] J. Boyett, AS 144 Appl. Statist. 28 (1979) 329-332
.. [2] W.M. Patefield, AS 159 Appl. Statist. 30 (1981) 91-97
"""
def __init__(self, seed=None):
super().__init__(seed)
def __call__(self, row, col, *, seed=None):
"""Create a frozen distribution of tables with given marginals.
See `random_table_frozen` for more information.
"""
return random_table_frozen(row, col, seed=seed)
def logpmf(self, x, row, col):
"""Log-probability of table to occur in the distribution.
Parameters
----------
%(_doc_x)s
%(_doc_row_col)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`.
Notes
-----
%(_doc_row_col_note)s
If row and column marginals of `x` do not match `row` and `col`,
negative infinity is returned.
Examples
--------
>>> from scipy.stats import random_table
>>> import numpy as np
>>> x = [[1, 5, 1], [2, 3, 1]]
>>> row = np.sum(x, axis=1)
>>> col = np.sum(x, axis=0)
>>> random_table.logpmf(x, row, col)
-1.6306401200847027
Alternatively, the object may be called (as a function) to fix the row
and column vector sums, returning a "frozen" distribution.
>>> d = random_table(row, col)
>>> d.logpmf(x)
-1.6306401200847027
"""
r, c, n = self._process_parameters(row, col)
x = np.asarray(x)
if x.ndim < 2:
raise ValueError("`x` must be at least two-dimensional")
dtype_is_int = np.issubdtype(x.dtype, np.integer)
with np.errstate(invalid='ignore'):
if not dtype_is_int and not np.all(x.astype(int) == x):
raise ValueError("`x` must contain only integral values")
# x does not contain NaN if we arrive here
if np.any(x < 0):
raise ValueError("`x` must contain only non-negative values")
r2 = np.sum(x, axis=-1)
c2 = np.sum(x, axis=-2)
if r2.shape[-1] != len(r):
raise ValueError("shape of `x` must agree with `row`")
if c2.shape[-1] != len(c):
raise ValueError("shape of `x` must agree with `col`")
res = np.empty(x.shape[:-2])
mask = np.all(r2 == r, axis=-1) & np.all(c2 == c, axis=-1)
def lnfac(x):
return gammaln(x + 1)
res[mask] = (np.sum(lnfac(r), axis=-1) + np.sum(lnfac(c), axis=-1)
- lnfac(n) - np.sum(lnfac(x[mask]), axis=(-1, -2)))
res[~mask] = -np.inf
return res[()]
def pmf(self, x, row, col):
"""Probability of table to occur in the distribution.
Parameters
----------
%(_doc_x)s
%(_doc_row_col)s
Returns
-------
pmf : ndarray or scalar
Probability mass function evaluated at `x`.
Notes
-----
%(_doc_row_col_note)s
If row and column marginals of `x` do not match `row` and `col`,
zero is returned.
Examples
--------
>>> from scipy.stats import random_table
>>> import numpy as np
>>> x = [[1, 5, 1], [2, 3, 1]]
>>> row = np.sum(x, axis=1)
>>> col = np.sum(x, axis=0)
>>> random_table.pmf(x, row, col)
0.19580419580419592
Alternatively, the object may be called (as a function) to fix the row
and column vector sums, returning a "frozen" distribution.
>>> d = random_table(row, col)
>>> d.pmf(x)
0.19580419580419592
"""
return np.exp(self.logpmf(x, row, col))
def mean(self, row, col):
"""Mean of distribution of conditional tables.
%(_doc_mean_params)s
Returns
-------
mean: ndarray
Mean of the distribution.
Notes
-----
%(_doc_row_col_note)s
Examples
--------
>>> from scipy.stats import random_table
>>> row = [1, 5]
>>> col = [2, 3, 1]
>>> random_table.mean(row, col)
array([[0.33333333, 0.5 , 0.16666667],
[1.66666667, 2.5 , 0.83333333]])
Alternatively, the object may be called (as a function) to fix the row
and column vector sums, returning a "frozen" distribution.
>>> d = random_table(row, col)
>>> d.mean()
array([[0.33333333, 0.5 , 0.16666667],
[1.66666667, 2.5 , 0.83333333]])
"""
r, c, n = self._process_parameters(row, col)
return np.outer(r, c) / n
def rvs(self, row, col, *, size=None, method=None, random_state=None):
"""Draw random tables with fixed column and row marginals.
Parameters
----------
%(_doc_row_col)s
size : integer, optional
Number of samples to draw (default 1).
method : str, optional
Which method to use, "boyett" or "patefield". If None (default),
selects the fastest method for this input.
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random 2D tables of shape (`size`, `len(row)`, `len(col)`).
Notes
-----
%(_doc_row_col_note)s
Examples
--------
>>> from scipy.stats import random_table
>>> row = [1, 5]
>>> col = [2, 3, 1]
>>> random_table.rvs(row, col, random_state=123)
array([[1., 0., 0.],
[1., 3., 1.]])
Alternatively, the object may be called (as a function) to fix the row
and column vector sums, returning a "frozen" distribution.
>>> d = random_table(row, col)
>>> d.rvs(random_state=123)
array([[1., 0., 0.],
[1., 3., 1.]])
"""
r, c, n = self._process_parameters(row, col)
size, shape = self._process_size_shape(size, r, c)
random_state = self._get_random_state(random_state)
meth = self._process_rvs_method(method, r, c, n)
return meth(r, c, n, size, random_state).reshape(shape)
@staticmethod
def _process_parameters(row, col):
"""
Check that row and column vectors are one-dimensional, that they do
not contain negative or non-integer entries, and that the sums over
both vectors are equal.
"""
r = np.array(row, dtype=np.int64, copy=True)
c = np.array(col, dtype=np.int64, copy=True)
if np.ndim(r) != 1:
raise ValueError("`row` must be one-dimensional")
if np.ndim(c) != 1:
raise ValueError("`col` must be one-dimensional")
if np.any(r < 0):
raise ValueError("each element of `row` must be non-negative")
if np.any(c < 0):
raise ValueError("each element of `col` must be non-negative")
n = np.sum(r)
if n != np.sum(c):
raise ValueError("sums over `row` and `col` must be equal")
if not np.all(r == np.asarray(row)):
raise ValueError("each element of `row` must be an integer")
if not np.all(c == np.asarray(col)):
raise ValueError("each element of `col` must be an integer")
return r, c, n
@staticmethod
def _process_size_shape(size, r, c):
"""
Compute the number of samples to be drawn and the shape of the output
"""
shape = (len(r), len(c))
if size is None:
return 1, shape
size = np.atleast_1d(size)
if not np.issubdtype(size.dtype, np.integer) or np.any(size < 0):
raise ValueError("`size` must be a non-negative integer or `None`")
return np.prod(size), tuple(size) + shape
@classmethod
def _process_rvs_method(cls, method, r, c, n):
known_methods = {
None: cls._rvs_select(r, c, n),
"boyett": cls._rvs_boyett,
"patefield": cls._rvs_patefield,
}
try:
return known_methods[method]
except KeyError:
raise ValueError(f"'{method}' not recognized, "
f"must be one of {set(known_methods)}")
@classmethod
def _rvs_select(cls, r, c, n):
fac = 1.0 # benchmarks show that this value is about 1
k = len(r) * len(c) # number of cells
# n + 1 guards against failure if n == 0
if n > fac * np.log(n + 1) * k:
return cls._rvs_patefield
return cls._rvs_boyett
@staticmethod
def _rvs_boyett(row, col, ntot, size, random_state):
return _rcont.rvs_rcont1(row, col, ntot, size, random_state)
@staticmethod
def _rvs_patefield(row, col, ntot, size, random_state):
return _rcont.rvs_rcont2(row, col, ntot, size, random_state)
random_table = random_table_gen()
class random_table_frozen(multi_rv_frozen):
def __init__(self, row, col, *, seed=None):
self._dist = random_table_gen(seed)
self._params = self._dist._process_parameters(row, col)
# monkey patch self._dist
def _process_parameters(r, c):
return self._params
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, None, None)
def pmf(self, x):
return self._dist.pmf(x, None, None)
def mean(self):
return self._dist.mean(None, None)
def rvs(self, size=None, method=None, random_state=None):
# optimisations are possible here
return self._dist.rvs(None, None, size=size, method=method,
random_state=random_state)
_ctab_doc_row_col = """\
row : array_like
Sum of table entries in each row.
col : array_like
Sum of table entries in each column."""
_ctab_doc_x = """\
x : array-like
Two-dimensional table of non-negative integers, or a
multi-dimensional array with the last two dimensions
corresponding with the tables."""
_ctab_doc_row_col_note = """\
The row and column vectors must be one-dimensional, not empty,
and each sum up to the same value. They cannot contain negative
or noninteger entries."""
_ctab_doc_mean_params = f"""
Parameters
----------
{_ctab_doc_row_col}"""
_ctab_doc_row_col_note_frozen = """\
See class definition for a detailed description of parameters."""
_ctab_docdict = {
"_doc_random_state": _doc_random_state,
"_doc_row_col": _ctab_doc_row_col,
"_doc_x": _ctab_doc_x,
"_doc_mean_params": _ctab_doc_mean_params,
"_doc_row_col_note": _ctab_doc_row_col_note,
}
_ctab_docdict_frozen = _ctab_docdict.copy()
_ctab_docdict_frozen.update({
"_doc_row_col": "",
"_doc_mean_params": "",
"_doc_row_col_note": _ctab_doc_row_col_note_frozen,
})
def _docfill(obj, docdict, template=None):
obj.__doc__ = doccer.docformat(template or obj.__doc__, docdict)
# Set frozen generator docstrings from corresponding docstrings in
# random_table and fill in default strings in class docstrings
_docfill(random_table_gen, _ctab_docdict)
for name in ['logpmf', 'pmf', 'mean', 'rvs']:
method = random_table_gen.__dict__[name]
method_frozen = random_table_frozen.__dict__[name]
_docfill(method_frozen, _ctab_docdict_frozen, method.__doc__)
_docfill(method, _ctab_docdict)
class uniform_direction_gen(multi_rv_generic):
r"""A vector-valued uniform direction.
Return a random direction (unit vector). The `dim` keyword specifies
the dimensionality of the space.
Methods
-------
rvs(dim=None, size=1, random_state=None)
Draw random directions.
Parameters
----------
dim : scalar
Dimension of directions.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Notes
-----
This distribution generates unit vectors uniformly distributed on
the surface of a hypersphere. These can be interpreted as random
directions.
For example, if `dim` is 3, 3D vectors from the surface of :math:`S^2`
will be sampled.
References
----------
.. [1] Marsaglia, G. (1972). "Choosing a Point from the Surface of a
Sphere". Annals of Mathematical Statistics. 43 (2): 645-646.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import uniform_direction
>>> x = uniform_direction.rvs(3)
>>> np.linalg.norm(x)
1.
This generates one random direction, a vector on the surface of
:math:`S^2`.
Alternatively, the object may be called (as a function) to return a frozen
distribution with fixed `dim` parameter. Here,
we create a `uniform_direction` with ``dim=3`` and draw 5 observations.
The samples are then arranged in an array of shape 5x3.
>>> rng = np.random.default_rng()
>>> uniform_sphere_dist = uniform_direction(3)
>>> unit_vectors = uniform_sphere_dist.rvs(5, random_state=rng)
>>> unit_vectors
array([[ 0.56688642, -0.1332634 , -0.81294566],
[-0.427126 , -0.74779278, 0.50830044],
[ 0.3793989 , 0.92346629, 0.05715323],
[ 0.36428383, -0.92449076, -0.11231259],
[-0.27733285, 0.94410968, -0.17816678]])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen n-dimensional uniform direction distribution.
See `uniform_direction` for more information.
"""
return uniform_direction_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim < 1 or dim != int(dim):
raise ValueError("Dimension of vector must be specified, "
"and must be an integer greater than 0.")
return int(dim)
def rvs(self, dim, size=None, random_state=None):
"""Draw random samples from S(N-1).
Parameters
----------
dim : integer
Dimension of space (N).
size : int or tuple of ints, optional
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement.
Because each sample is N-dimensional, the output shape
is (m,n,k,N). If no shape is specified, a single (N-D)
sample is returned.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate resamples.
If `random_state` is ``None`` (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is
used, seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
Returns
-------
rvs : ndarray
Random direction vectors
"""
random_state = self._get_random_state(random_state)
if size is None:
size = np.array([], dtype=int)
size = np.atleast_1d(size)
dim = self._process_parameters(dim)
samples = _sample_uniform_direction(dim, size, random_state)
return samples
uniform_direction = uniform_direction_gen()
class uniform_direction_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""Create a frozen n-dimensional uniform direction distribution.
Parameters
----------
dim : int
Dimension of matrices
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import uniform_direction
>>> x = uniform_direction(3)
>>> x.rvs()
"""
self._dist = uniform_direction_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=None, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
def _sample_uniform_direction(dim, size, random_state):
"""
Private method to generate uniform directions
Reference: Marsaglia, G. (1972). "Choosing a Point from the Surface of a
Sphere". Annals of Mathematical Statistics. 43 (2): 645-646.
"""
samples_shape = np.append(size, dim)
samples = random_state.standard_normal(samples_shape)
samples /= np.linalg.norm(samples, axis=-1, keepdims=True)
return samples
_dirichlet_mn_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries along the last axis
determines the dimensionality of the distribution. Each entry must be
strictly positive.
n : int or array_like
The number of trials. Each element must be a strictly positive integer.
"""
_dirichlet_mn_doc_frozen_callparams = ""
_dirichlet_mn_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
dirichlet_mn_docdict_params = {
'_dirichlet_mn_doc_default_callparams': _dirichlet_mn_doc_default_callparams, # noqa
'_doc_random_state': _doc_random_state
}
dirichlet_mn_docdict_noparams = {
'_dirichlet_mn_doc_default_callparams': _dirichlet_mn_doc_frozen_callparams, # noqa
'_doc_random_state': _doc_random_state
}
def _dirichlet_multinomial_check_parameters(alpha, n, x=None):
alpha = np.asarray(alpha)
n = np.asarray(n)
if x is not None:
# Ensure that `x` and `alpha` are arrays. If the shapes are
# incompatible, NumPy will raise an appropriate error.
try:
x, alpha = np.broadcast_arrays(x, alpha)
except ValueError as e:
msg = "`x` and `alpha` must be broadcastable."
raise ValueError(msg) from e
x_int = np.floor(x)
if np.any(x < 0) or np.any(x != x_int):
raise ValueError("`x` must contain only non-negative integers.")
x = x_int
if np.any(alpha <= 0):
raise ValueError("`alpha` must contain only positive values.")
n_int = np.floor(n)
if np.any(n <= 0) or np.any(n != n_int):
raise ValueError("`n` must be a positive integer.")
n = n_int
sum_alpha = np.sum(alpha, axis=-1)
sum_alpha, n = np.broadcast_arrays(sum_alpha, n)
return (alpha, sum_alpha, n) if x is None else (alpha, sum_alpha, n, x)
class dirichlet_multinomial_gen(multi_rv_generic):
r"""A Dirichlet multinomial random variable.
The Dirichlet multinomial distribution is a compound probability
distribution: it is the multinomial distribution with number of trials
`n` and class probabilities ``p`` randomly sampled from a Dirichlet
distribution with concentration parameters ``alpha``.
Methods
-------
logpmf(x, alpha, n):
Log of the probability mass function.
pmf(x, alpha, n):
Probability mass function.
mean(alpha, n):
Mean of the Dirichlet multinomial distribution.
var(alpha, n):
Variance of the Dirichlet multinomial distribution.
cov(alpha, n):
The covariance of the Dirichlet multinomial distribution.
Parameters
----------
%(_dirichlet_mn_doc_default_callparams)s
%(_doc_random_state)s
See Also
--------
scipy.stats.dirichlet : The dirichlet distribution.
scipy.stats.multinomial : The multinomial distribution.
References
----------
.. [1] Dirichlet-multinomial distribution, Wikipedia,
https://www.wikipedia.org/wiki/Dirichlet-multinomial_distribution
Examples
--------
>>> from scipy.stats import dirichlet_multinomial
Get the PMF
>>> n = 6 # number of trials
>>> alpha = [3, 4, 5] # concentration parameters
>>> x = [1, 2, 3] # counts
>>> dirichlet_multinomial.pmf(x, alpha, n)
0.08484162895927604
If the sum of category counts does not equal the number of trials,
the probability mass is zero.
>>> dirichlet_multinomial.pmf(x, alpha, n=7)
0.0
Get the log of the PMF
>>> dirichlet_multinomial.logpmf(x, alpha, n)
-2.4669689491013327
Get the mean
>>> dirichlet_multinomial.mean(alpha, n)
array([1.5, 2. , 2.5])
Get the variance
>>> dirichlet_multinomial.var(alpha, n)
array([1.55769231, 1.84615385, 2.01923077])
Get the covariance
>>> dirichlet_multinomial.cov(alpha, n)
array([[ 1.55769231, -0.69230769, -0.86538462],
[-0.69230769, 1.84615385, -1.15384615],
[-0.86538462, -1.15384615, 2.01923077]])
Alternatively, the object may be called (as a function) to fix the
`alpha` and `n` parameters, returning a "frozen" Dirichlet multinomial
random variable.
>>> dm = dirichlet_multinomial(alpha, n)
>>> dm.pmf(x)
0.08484162895927579
All methods are fully vectorized. Each element of `x` and `alpha` is
a vector (along the last axis), each element of `n` is an
integer (scalar), and the result is computed element-wise.
>>> x = [[1, 2, 3], [4, 5, 6]]
>>> alpha = [[1, 2, 3], [4, 5, 6]]
>>> n = [6, 15]
>>> dirichlet_multinomial.pmf(x, alpha, n)
array([0.06493506, 0.02626937])
>>> dirichlet_multinomial.cov(alpha, n).shape # both covariance matrices
(2, 3, 3)
Broadcasting according to standard NumPy conventions is supported. Here,
we have four sets of concentration parameters (each a two element vector)
for each of three numbers of trials (each a scalar).
>>> alpha = [[3, 4], [4, 5], [5, 6], [6, 7]]
>>> n = [[6], [7], [8]]
>>> dirichlet_multinomial.mean(alpha, n).shape
(3, 4, 2)
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__,
dirichlet_mn_docdict_params)
def __call__(self, alpha, n, seed=None):
return dirichlet_multinomial_frozen(alpha, n, seed=seed)
def logpmf(self, x, alpha, n):
"""The log of the probability mass function.
Parameters
----------
x: ndarray
Category counts (non-negative integers). Must be broadcastable
with shape parameter ``alpha``. If multidimensional, the last axis
must correspond with the categories.
%(_dirichlet_mn_doc_default_callparams)s
Returns
-------
out: ndarray or scalar
Log of the probability mass function.
"""
a, Sa, n, x = _dirichlet_multinomial_check_parameters(alpha, n, x)
out = np.asarray(loggamma(Sa) + loggamma(n + 1) - loggamma(n + Sa))
out += (loggamma(x + a) - (loggamma(a) + loggamma(x + 1))).sum(axis=-1)
np.place(out, n != x.sum(axis=-1), -np.inf)
return out[()]
def pmf(self, x, alpha, n):
"""Probability mass function for a Dirichlet multinomial distribution.
Parameters
----------
x: ndarray
Category counts (non-negative integers). Must be broadcastable
with shape parameter ``alpha``. If multidimensional, the last axis
must correspond with the categories.
%(_dirichlet_mn_doc_default_callparams)s
Returns
-------
out: ndarray or scalar
Probability mass function.
"""
return np.exp(self.logpmf(x, alpha, n))
def mean(self, alpha, n):
"""Mean of a Dirichlet multinomial distribution.
Parameters
----------
%(_dirichlet_mn_doc_default_callparams)s
Returns
-------
out: ndarray
Mean of a Dirichlet multinomial distribution.
"""
a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n)
n, Sa = n[..., np.newaxis], Sa[..., np.newaxis]
return n * a / Sa
def var(self, alpha, n):
"""The variance of the Dirichlet multinomial distribution.
Parameters
----------
%(_dirichlet_mn_doc_default_callparams)s
Returns
-------
out: array_like
The variances of the components of the distribution. This is
the diagonal of the covariance matrix of the distribution.
"""
a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n)
n, Sa = n[..., np.newaxis], Sa[..., np.newaxis]
return n * a / Sa * (1 - a/Sa) * (n + Sa) / (1 + Sa)
def cov(self, alpha, n):
"""Covariance matrix of a Dirichlet multinomial distribution.
Parameters
----------
%(_dirichlet_mn_doc_default_callparams)s
Returns
-------
out : array_like
The covariance matrix of the distribution.
"""
a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n)
var = dirichlet_multinomial.var(a, n)
n, Sa = n[..., np.newaxis, np.newaxis], Sa[..., np.newaxis, np.newaxis]
aiaj = a[..., :, np.newaxis] * a[..., np.newaxis, :]
cov = -n * aiaj / Sa ** 2 * (n + Sa) / (1 + Sa)
ii = np.arange(cov.shape[-1])
cov[..., ii, ii] = var
return cov
dirichlet_multinomial = dirichlet_multinomial_gen()
class dirichlet_multinomial_frozen(multi_rv_frozen):
def __init__(self, alpha, n, seed=None):
alpha, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n)
self.alpha = alpha
self.n = n
self._dist = dirichlet_multinomial_gen(seed)
def logpmf(self, x):
return self._dist.logpmf(x, self.alpha, self.n)
def pmf(self, x):
return self._dist.pmf(x, self.alpha, self.n)
def mean(self):
return self._dist.mean(self.alpha, self.n)
def var(self):
return self._dist.var(self.alpha, self.n)
def cov(self):
return self._dist.cov(self.alpha, self.n)
# Set frozen generator docstrings from corresponding docstrings in
# dirichlet_multinomial and fill in default strings in class docstrings.
for name in ['logpmf', 'pmf', 'mean', 'var', 'cov']:
method = dirichlet_multinomial_gen.__dict__[name]
method_frozen = dirichlet_multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_mn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
dirichlet_mn_docdict_params)
class vonmises_fisher_gen(multi_rv_generic):
r"""A von Mises-Fisher variable.
The `mu` keyword specifies the mean direction vector. The `kappa` keyword
specifies the concentration parameter.
Methods
-------
pdf(x, mu=None, kappa=1)
Probability density function.
logpdf(x, mu=None, kappa=1)
Log of the probability density function.
rvs(mu=None, kappa=1, size=1, random_state=None)
Draw random samples from a von Mises-Fisher distribution.
entropy(mu=None, kappa=1)
Compute the differential entropy of the von Mises-Fisher distribution.
fit(data)
Fit a von Mises-Fisher distribution to data.
Parameters
----------
mu : array_like
Mean direction of the distribution. Must be a one-dimensional unit
vector of norm 1.
kappa : float
Concentration parameter. Must be positive.
seed : {None, int, np.random.RandomState, np.random.Generator}, optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
See Also
--------
scipy.stats.vonmises : Von-Mises Fisher distribution in 2D on a circle
uniform_direction : uniform distribution on the surface of a hypersphere
Notes
-----
The von Mises-Fisher distribution is a directional distribution on the
surface of the unit hypersphere. The probability density
function of a unit vector :math:`\mathbf{x}` is
.. math::
f(\mathbf{x}) = \frac{\kappa^{d/2-1}}{(2\pi)^{d/2}I_{d/2-1}(\kappa)}
\exp\left(\kappa \mathbf{\mu}^T\mathbf{x}\right),
where :math:`\mathbf{\mu}` is the mean direction, :math:`\kappa` the
concentration parameter, :math:`d` the dimension and :math:`I` the
modified Bessel function of the first kind. As :math:`\mu` represents
a direction, it must be a unit vector or in other words, a point
on the hypersphere: :math:`\mathbf{\mu}\in S^{d-1}`. :math:`\kappa` is a
concentration parameter, which means that it must be positive
(:math:`\kappa>0`) and that the distribution becomes more narrow with
increasing :math:`\kappa`. In that sense, the reciprocal value
:math:`1/\kappa` resembles the variance parameter of the normal
distribution.
The von Mises-Fisher distribution often serves as an analogue of the
normal distribution on the sphere. Intuitively, for unit vectors, a
useful distance measure is given by the angle :math:`\alpha` between
them. This is exactly what the scalar product
:math:`\mathbf{\mu}^T\mathbf{x}=\cos(\alpha)` in the
von Mises-Fisher probability density function describes: the angle
between the mean direction :math:`\mathbf{\mu}` and the vector
:math:`\mathbf{x}`. The larger the angle between them, the smaller the
probability to observe :math:`\mathbf{x}` for this particular mean
direction :math:`\mathbf{\mu}`.
In dimensions 2 and 3, specialized algorithms are used for fast sampling
[2]_, [3]_. For dimenions of 4 or higher the rejection sampling algorithm
described in [4]_ is utilized. This implementation is partially based on
the geomstats package [5]_, [6]_.
.. versionadded:: 1.11
References
----------
.. [1] Von Mises-Fisher distribution, Wikipedia,
https://en.wikipedia.org/wiki/Von_Mises%E2%80%93Fisher_distribution
.. [2] Mardia, K., and Jupp, P. Directional statistics. Wiley, 2000.
.. [3] J. Wenzel. Numerically stable sampling of the von Mises Fisher
distribution on S2.
https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf
.. [4] Wood, A. Simulation of the von mises fisher distribution.
Communications in statistics-simulation and computation 23,
1 (1994), 157-164. https://doi.org/10.1080/03610919408813161
.. [5] geomstats, Github. MIT License. Accessed: 06.01.2023.
https://github.com/geomstats/geomstats
.. [6] Miolane, N. et al. Geomstats: A Python Package for Riemannian
Geometry in Machine Learning. Journal of Machine Learning Research
21 (2020). http://jmlr.org/papers/v21/19-027.html
Examples
--------
**Visualization of the probability density**
Plot the probability density in three dimensions for increasing
concentration parameter. The density is calculated by the ``pdf``
method.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import vonmises_fisher
>>> from matplotlib.colors import Normalize
>>> n_grid = 100
>>> u = np.linspace(0, np.pi, n_grid)
>>> v = np.linspace(0, 2 * np.pi, n_grid)
>>> u_grid, v_grid = np.meshgrid(u, v)
>>> vertices = np.stack([np.cos(v_grid) * np.sin(u_grid),
... np.sin(v_grid) * np.sin(u_grid),
... np.cos(u_grid)],
... axis=2)
>>> x = np.outer(np.cos(v), np.sin(u))
>>> y = np.outer(np.sin(v), np.sin(u))
>>> z = np.outer(np.ones_like(u), np.cos(u))
>>> def plot_vmf_density(ax, x, y, z, vertices, mu, kappa):
... vmf = vonmises_fisher(mu, kappa)
... pdf_values = vmf.pdf(vertices)
... pdfnorm = Normalize(vmin=pdf_values.min(), vmax=pdf_values.max())
... ax.plot_surface(x, y, z, rstride=1, cstride=1,
... facecolors=plt.cm.viridis(pdfnorm(pdf_values)),
... linewidth=0)
... ax.set_aspect('equal')
... ax.view_init(azim=-130, elev=0)
... ax.axis('off')
... ax.set_title(rf"$\kappa={kappa}$")
>>> fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9, 4),
... subplot_kw={"projection": "3d"})
>>> left, middle, right = axes
>>> mu = np.array([-np.sqrt(0.5), -np.sqrt(0.5), 0])
>>> plot_vmf_density(left, x, y, z, vertices, mu, 5)
>>> plot_vmf_density(middle, x, y, z, vertices, mu, 20)
>>> plot_vmf_density(right, x, y, z, vertices, mu, 100)
>>> plt.subplots_adjust(top=1, bottom=0.0, left=0.0, right=1.0, wspace=0.)
>>> plt.show()
As we increase the concentration parameter, the points are getting more
clustered together around the mean direction.
**Sampling**
Draw 5 samples from the distribution using the ``rvs`` method resulting
in a 5x3 array.
>>> rng = np.random.default_rng()
>>> mu = np.array([0, 0, 1])
>>> samples = vonmises_fisher(mu, 20).rvs(5, random_state=rng)
>>> samples
array([[ 0.3884594 , -0.32482588, 0.86231516],
[ 0.00611366, -0.09878289, 0.99509023],
[-0.04154772, -0.01637135, 0.99900239],
[-0.14613735, 0.12553507, 0.98126695],
[-0.04429884, -0.23474054, 0.97104814]])
These samples are unit vectors on the sphere :math:`S^2`. To verify,
let us calculate their euclidean norms:
>>> np.linalg.norm(samples, axis=1)
array([1., 1., 1., 1., 1.])
Plot 20 observations drawn from the von Mises-Fisher distribution for
increasing concentration parameter :math:`\kappa`. The red dot highlights
the mean direction :math:`\mu`.
>>> def plot_vmf_samples(ax, x, y, z, mu, kappa):
... vmf = vonmises_fisher(mu, kappa)
... samples = vmf.rvs(20)
... ax.plot_surface(x, y, z, rstride=1, cstride=1, linewidth=0,
... alpha=0.2)
... ax.scatter(samples[:, 0], samples[:, 1], samples[:, 2], c='k', s=5)
... ax.scatter(mu[0], mu[1], mu[2], c='r', s=30)
... ax.set_aspect('equal')
... ax.view_init(azim=-130, elev=0)
... ax.axis('off')
... ax.set_title(rf"$\kappa={kappa}$")
>>> mu = np.array([-np.sqrt(0.5), -np.sqrt(0.5), 0])
>>> fig, axes = plt.subplots(nrows=1, ncols=3,
... subplot_kw={"projection": "3d"},
... figsize=(9, 4))
>>> left, middle, right = axes
>>> plot_vmf_samples(left, x, y, z, mu, 5)
>>> plot_vmf_samples(middle, x, y, z, mu, 20)
>>> plot_vmf_samples(right, x, y, z, mu, 100)
>>> plt.subplots_adjust(top=1, bottom=0.0, left=0.0,
... right=1.0, wspace=0.)
>>> plt.show()
The plots show that with increasing concentration :math:`\kappa` the
resulting samples are centered more closely around the mean direction.
**Fitting the distribution parameters**
The distribution can be fitted to data using the ``fit`` method returning
the estimated parameters. As a toy example let's fit the distribution to
samples drawn from a known von Mises-Fisher distribution.
>>> mu, kappa = np.array([0, 0, 1]), 20
>>> samples = vonmises_fisher(mu, kappa).rvs(1000, random_state=rng)
>>> mu_fit, kappa_fit = vonmises_fisher.fit(samples)
>>> mu_fit, kappa_fit
(array([0.01126519, 0.01044501, 0.99988199]), 19.306398751730995)
We see that the estimated parameters `mu_fit` and `kappa_fit` are
very close to the ground truth parameters.
"""
def __init__(self, seed=None):
super().__init__(seed)
def __call__(self, mu=None, kappa=1, seed=None):
"""Create a frozen von Mises-Fisher distribution.
See `vonmises_fisher_frozen` for more information.
"""
return vonmises_fisher_frozen(mu, kappa, seed=seed)
def _process_parameters(self, mu, kappa):
"""
Infer dimensionality from mu and ensure that mu is a one-dimensional
unit vector and kappa positive.
"""
mu = np.asarray(mu)
if mu.ndim > 1:
raise ValueError("'mu' must have one-dimensional shape.")
if not np.allclose(np.linalg.norm(mu), 1.):
raise ValueError("'mu' must be a unit vector of norm 1.")
if not mu.size > 1:
raise ValueError("'mu' must have at least two entries.")
kappa_error_msg = "'kappa' must be a positive scalar."
if not np.isscalar(kappa) or kappa < 0:
raise ValueError(kappa_error_msg)
if float(kappa) == 0.:
raise ValueError("For 'kappa=0' the von Mises-Fisher distribution "
"becomes the uniform distribution on the sphere "
"surface. Consider using "
"'scipy.stats.uniform_direction' instead.")
dim = mu.size
return dim, mu, kappa
def _check_data_vs_dist(self, x, dim):
if x.shape[-1] != dim:
raise ValueError("The dimensionality of the last axis of 'x' must "
"match the dimensionality of the "
"von Mises Fisher distribution.")
if not np.allclose(np.linalg.norm(x, axis=-1), 1.):
msg = "'x' must be unit vectors of norm 1 along last dimension."
raise ValueError(msg)
def _log_norm_factor(self, dim, kappa):
# normalization factor is given by
# c = kappa**(dim/2-1)/((2*pi)**(dim/2)*I[dim/2-1](kappa))
# = kappa**(dim/2-1)*exp(-kappa) /
# ((2*pi)**(dim/2)*I[dim/2-1](kappa)*exp(-kappa)
# = kappa**(dim/2-1)*exp(-kappa) /
# ((2*pi)**(dim/2)*ive[dim/2-1](kappa)
# Then the log is given by
# log c = 1/2*(dim -1)*log(kappa) - kappa - -1/2*dim*ln(2*pi) -
# ive[dim/2-1](kappa)
halfdim = 0.5 * dim
return (0.5 * (dim - 2)*np.log(kappa) - halfdim * _LOG_2PI -
np.log(ive(halfdim - 1, kappa)) - kappa)
def _logpdf(self, x, dim, mu, kappa):
"""Log of the von Mises-Fisher probability density function.
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
x = np.asarray(x)
self._check_data_vs_dist(x, dim)
dotproducts = np.einsum('i,...i->...', mu, x)
return self._log_norm_factor(dim, kappa) + kappa * dotproducts
def logpdf(self, x, mu=None, kappa=1):
"""Log of the von Mises-Fisher probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability
density function. The last axis of `x` must correspond
to unit vectors of the same dimensionality as the distribution.
mu : array_like, default: None
Mean direction of the distribution. Must be a one-dimensional unit
vector of norm 1.
kappa : float, default: 1
Concentration parameter. Must be positive.
Returns
-------
logpdf : ndarray or scalar
Log of the probability density function evaluated at `x`.
"""
dim, mu, kappa = self._process_parameters(mu, kappa)
return self._logpdf(x, dim, mu, kappa)
def pdf(self, x, mu=None, kappa=1):
"""Von Mises-Fisher probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability
density function. The last axis of `x` must correspond
to unit vectors of the same dimensionality as the distribution.
mu : array_like
Mean direction of the distribution. Must be a one-dimensional unit
vector of norm 1.
kappa : float
Concentration parameter. Must be positive.
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`.
"""
dim, mu, kappa = self._process_parameters(mu, kappa)
return np.exp(self._logpdf(x, dim, mu, kappa))
def _rvs_2d(self, mu, kappa, size, random_state):
"""
In 2D, the von Mises-Fisher distribution reduces to the
von Mises distribution which can be efficiently sampled by numpy.
This method is much faster than the general rejection
sampling based algorithm.
"""
mean_angle = np.arctan2(mu[1], mu[0])
angle_samples = random_state.vonmises(mean_angle, kappa, size=size)
samples = np.stack([np.cos(angle_samples), np.sin(angle_samples)],
axis=-1)
return samples
def _rvs_3d(self, kappa, size, random_state):
"""
Generate samples from a von Mises-Fisher distribution
with mu = [1, 0, 0] and kappa. Samples then have to be
rotated towards the desired mean direction mu.
This method is much faster than the general rejection
sampling based algorithm.
Reference: https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf
"""
if size is None:
sample_size = 1
else:
sample_size = size
# compute x coordinate acc. to equation from section 3.1
x = random_state.random(sample_size)
x = 1. + np.log(x + (1. - x) * np.exp(-2 * kappa))/kappa
# (y, z) are random 2D vectors that only have to be
# normalized accordingly. Then (x, y z) follow a VMF distribution
temp = np.sqrt(1. - np.square(x))
uniformcircle = _sample_uniform_direction(2, sample_size, random_state)
samples = np.stack([x, temp * uniformcircle[..., 0],
temp * uniformcircle[..., 1]],
axis=-1)
if size is None:
samples = np.squeeze(samples)
return samples
def _rejection_sampling(self, dim, kappa, size, random_state):
"""
Generate samples from a n-dimensional von Mises-Fisher distribution
with mu = [1, 0, ..., 0] and kappa via rejection sampling.
Samples then have to be rotated towards the desired mean direction mu.
Reference: https://doi.org/10.1080/03610919408813161
"""
dim_minus_one = dim - 1
# calculate number of requested samples
if size is not None:
if not np.iterable(size):
size = (size, )
n_samples = math.prod(size)
else:
n_samples = 1
# calculate envelope for rejection sampler (eq. 4)
sqrt = np.sqrt(4 * kappa ** 2. + dim_minus_one ** 2)
envelop_param = (-2 * kappa + sqrt) / dim_minus_one
if envelop_param == 0:
# the regular formula suffers from loss of precision for high
# kappa. This can only be detected by checking for 0 here.
# Workaround: expansion for sqrt variable
# https://www.wolframalpha.com/input?i=sqrt%284*x%5E2%2Bd%5E2%29
# e = (-2 * k + sqrt(k**2 + d**2)) / d
# ~ (-2 * k + 2 * k + d**2/(4 * k) - d**4/(64 * k**3)) / d
# = d/(4 * k) - d**3/(64 * k**3)
envelop_param = (dim_minus_one/4 * kappa**-1.
- dim_minus_one**3/64 * kappa**-3.)
# reference step 0
node = (1. - envelop_param) / (1. + envelop_param)
# t = ln(1 - ((1-x)/(1+x))**2)
# = ln(4 * x / (1+x)**2)
# = ln(4) + ln(x) - 2*log1p(x)
correction = (kappa * node + dim_minus_one
* (np.log(4) + np.log(envelop_param)
- 2 * np.log1p(envelop_param)))
n_accepted = 0
x = np.zeros((n_samples, ))
halfdim = 0.5 * dim_minus_one
# main loop
while n_accepted < n_samples:
# generate candidates acc. to reference step 1
sym_beta = random_state.beta(halfdim, halfdim,
size=n_samples - n_accepted)
coord_x = (1 - (1 + envelop_param) * sym_beta) / (
1 - (1 - envelop_param) * sym_beta)
# accept or reject: reference step 2
# reformulation for numerical stability:
# t = ln(1 - (1-x)/(1+x) * y)
# = ln((1 + x - y +x*y)/(1 +x))
accept_tol = random_state.random(n_samples - n_accepted)
criterion = (
kappa * coord_x
+ dim_minus_one * (np.log((1 + envelop_param - coord_x
+ coord_x * envelop_param) / (1 + envelop_param)))
- correction) > np.log(accept_tol)
accepted_iter = np.sum(criterion)
x[n_accepted:n_accepted + accepted_iter] = coord_x[criterion]
n_accepted += accepted_iter
# concatenate x and remaining coordinates: step 3
coord_rest = _sample_uniform_direction(dim_minus_one, n_accepted,
random_state)
coord_rest = np.einsum(
'...,...i->...i', np.sqrt(1 - x ** 2), coord_rest)
samples = np.concatenate([x[..., None], coord_rest], axis=1)
# reshape output to (size, dim)
if size is not None:
samples = samples.reshape(size + (dim, ))
else:
samples = np.squeeze(samples)
return samples
def _rotate_samples(self, samples, mu, dim):
"""A QR decomposition is used to find the rotation that maps the
north pole (1, 0,...,0) to the vector mu. This rotation is then
applied to all samples.
Parameters
----------
samples: array_like, shape = [..., n]
mu : array-like, shape=[n, ]
Point to parametrise the rotation.
Returns
-------
samples : rotated samples
"""
base_point = np.zeros((dim, ))
base_point[0] = 1.
embedded = np.concatenate([mu[None, :], np.zeros((dim - 1, dim))])
rotmatrix, _ = np.linalg.qr(np.transpose(embedded))
if np.allclose(np.matmul(rotmatrix, base_point[:, None])[:, 0], mu):
rotsign = 1
else:
rotsign = -1
# apply rotation
samples = np.einsum('ij,...j->...i', rotmatrix, samples) * rotsign
return samples
def _rvs(self, dim, mu, kappa, size, random_state):
if dim == 2:
samples = self._rvs_2d(mu, kappa, size, random_state)
elif dim == 3:
samples = self._rvs_3d(kappa, size, random_state)
else:
samples = self._rejection_sampling(dim, kappa, size,
random_state)
if dim != 2:
samples = self._rotate_samples(samples, mu, dim)
return samples
def rvs(self, mu=None, kappa=1, size=1, random_state=None):
"""Draw random samples from a von Mises-Fisher distribution.
Parameters
----------
mu : array_like
Mean direction of the distribution. Must be a one-dimensional unit
vector of norm 1.
kappa : float
Concentration parameter. Must be positive.
size : int or tuple of ints, optional
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement.
Because each sample is N-dimensional, the output shape
is (m,n,k,N). If no shape is specified, a single (N-D)
sample is returned.
random_state : {None, int, np.random.RandomState, np.random.Generator},
optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Returns
-------
rvs : ndarray
Random variates of shape (`size`, `N`), where `N` is the
dimension of the distribution.
"""
dim, mu, kappa = self._process_parameters(mu, kappa)
random_state = self._get_random_state(random_state)
samples = self._rvs(dim, mu, kappa, size, random_state)
return samples
def _entropy(self, dim, kappa):
halfdim = 0.5 * dim
return (-self._log_norm_factor(dim, kappa) - kappa *
ive(halfdim, kappa) / ive(halfdim - 1, kappa))
def entropy(self, mu=None, kappa=1):
"""Compute the differential entropy of the von Mises-Fisher
distribution.
Parameters
----------
mu : array_like, default: None
Mean direction of the distribution. Must be a one-dimensional unit
vector of norm 1.
kappa : float, default: 1
Concentration parameter. Must be positive.
Returns
-------
h : scalar
Entropy of the von Mises-Fisher distribution.
"""
dim, _, kappa = self._process_parameters(mu, kappa)
return self._entropy(dim, kappa)
def fit(self, x):
"""Fit the von Mises-Fisher distribution to data.
Parameters
----------
x : array-like
Data the distribution is fitted to. Must be two dimensional.
The second axis of `x` must be unit vectors of norm 1 and
determine the dimensionality of the fitted
von Mises-Fisher distribution.
Returns
-------
mu : ndarray
Estimated mean direction.
kappa : float
Estimated concentration parameter.
"""
# validate input data
x = np.asarray(x)
if x.ndim != 2:
raise ValueError("'x' must be two dimensional.")
if not np.allclose(np.linalg.norm(x, axis=-1), 1.):
msg = "'x' must be unit vectors of norm 1 along last dimension."
raise ValueError(msg)
dim = x.shape[-1]
# mu is simply the directional mean
dirstats = directional_stats(x)
mu = dirstats.mean_direction
r = dirstats.mean_resultant_length
# kappa is the solution to the equation:
# r = I[dim/2](kappa) / I[dim/2 -1](kappa)
# = I[dim/2](kappa) * exp(-kappa) / I[dim/2 -1](kappa) * exp(-kappa)
# = ive(dim/2, kappa) / ive(dim/2 -1, kappa)
halfdim = 0.5 * dim
def solve_for_kappa(kappa):
bessel_vals = ive([halfdim, halfdim - 1], kappa)
return bessel_vals[0]/bessel_vals[1] - r
root_res = root_scalar(solve_for_kappa, method="brentq",
bracket=(1e-8, 1e9))
kappa = root_res.root
return mu, kappa
vonmises_fisher = vonmises_fisher_gen()
class vonmises_fisher_frozen(multi_rv_frozen):
def __init__(self, mu=None, kappa=1, seed=None):
"""Create a frozen von Mises-Fisher distribution.
Parameters
----------
mu : array_like, default: None
Mean direction of the distribution.
kappa : float, default: 1
Concentration parameter. Must be positive.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
"""
self._dist = vonmises_fisher_gen(seed)
self.dim, self.mu, self.kappa = (
self._dist._process_parameters(mu, kappa)
)
def logpdf(self, x):
"""
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability
density function. The last axis of `x` must correspond
to unit vectors of the same dimensionality as the distribution.
Returns
-------
logpdf : ndarray or scalar
Log of probability density function evaluated at `x`.
"""
return self._dist._logpdf(x, self.dim, self.mu, self.kappa)
def pdf(self, x):
"""
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability
density function. The last axis of `x` must correspond
to unit vectors of the same dimensionality as the distribution.
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`.
"""
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
"""Draw random variates from the Von Mises-Fisher distribution.
Parameters
----------
size : int or tuple of ints, optional
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement.
Because each sample is N-dimensional, the output shape
is (m,n,k,N). If no shape is specified, a single (N-D)
sample is returned.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the distribution.
"""
random_state = self._dist._get_random_state(random_state)
return self._dist._rvs(self.dim, self.mu, self.kappa, size,
random_state)
def entropy(self):
"""
Calculate the differential entropy of the von Mises-Fisher
distribution.
Returns
-------
h: float
Entropy of the Von Mises-Fisher distribution.
"""
return self._dist._entropy(self.dim, self.kappa)
| 237,188
| 32.986101
| 122
|
py
|
scipy
|
scipy-main/scipy/stats/_tukeylambda_stats.py
|
import numpy as np
from numpy import poly1d
from scipy.special import beta
# The following code was used to generate the Pade coefficients for the
# Tukey Lambda variance function. Version 0.17 of mpmath was used.
#---------------------------------------------------------------------------
# import mpmath as mp
#
# mp.mp.dps = 60
#
# one = mp.mpf(1)
# two = mp.mpf(2)
#
# def mpvar(lam):
# if lam == 0:
# v = mp.pi**2 / three
# else:
# v = (two / lam**2) * (one / (one + two*lam) -
# mp.beta(lam + one, lam + one))
# return v
#
# t = mp.taylor(mpvar, 0, 8)
# p, q = mp.pade(t, 4, 4)
# print("p =", [mp.fp.mpf(c) for c in p])
# print("q =", [mp.fp.mpf(c) for c in q])
#---------------------------------------------------------------------------
# Pade coefficients for the Tukey Lambda variance function.
_tukeylambda_var_pc = [3.289868133696453, 0.7306125098871127,
-0.5370742306855439, 0.17292046290190008,
-0.02371146284628187]
_tukeylambda_var_qc = [1.0, 3.683605511659861, 4.184152498888124,
1.7660926747377275, 0.2643989311168465]
# numpy.poly1d instances for the numerator and denominator of the
# Pade approximation to the Tukey Lambda variance.
_tukeylambda_var_p = poly1d(_tukeylambda_var_pc[::-1])
_tukeylambda_var_q = poly1d(_tukeylambda_var_qc[::-1])
def tukeylambda_variance(lam):
"""Variance of the Tukey Lambda distribution.
Parameters
----------
lam : array_like
The lambda values at which to compute the variance.
Returns
-------
v : ndarray
The variance. For lam < -0.5, the variance is not defined, so
np.nan is returned. For lam = 0.5, np.inf is returned.
Notes
-----
In an interval around lambda=0, this function uses the [4,4] Pade
approximation to compute the variance. Otherwise it uses the standard
formula (https://en.wikipedia.org/wiki/Tukey_lambda_distribution). The
Pade approximation is used because the standard formula has a removable
discontinuity at lambda = 0, and does not produce accurate numerical
results near lambda = 0.
"""
lam = np.asarray(lam)
shp = lam.shape
lam = np.atleast_1d(lam).astype(np.float64)
# For absolute values of lam less than threshold, use the Pade
# approximation.
threshold = 0.075
# Play games with masks to implement the conditional evaluation of
# the distribution.
# lambda < -0.5: var = nan
low_mask = lam < -0.5
# lambda == -0.5: var = inf
neghalf_mask = lam == -0.5
# abs(lambda) < threshold: use Pade approximation
small_mask = np.abs(lam) < threshold
# else the "regular" case: use the explicit formula.
reg_mask = ~(low_mask | neghalf_mask | small_mask)
# Get the 'lam' values for the cases where they are needed.
small = lam[small_mask]
reg = lam[reg_mask]
# Compute the function for each case.
v = np.empty_like(lam)
v[low_mask] = np.nan
v[neghalf_mask] = np.inf
if small.size > 0:
# Use the Pade approximation near lambda = 0.
v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small)
if reg.size > 0:
v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) -
beta(reg + 1, reg + 1))
v.shape = shp
return v
# The following code was used to generate the Pade coefficients for the
# Tukey Lambda kurtosis function. Version 0.17 of mpmath was used.
#---------------------------------------------------------------------------
# import mpmath as mp
#
# mp.mp.dps = 60
#
# one = mp.mpf(1)
# two = mp.mpf(2)
# three = mp.mpf(3)
# four = mp.mpf(4)
#
# def mpkurt(lam):
# if lam == 0:
# k = mp.mpf(6)/5
# else:
# numer = (one/(four*lam+one) - four*mp.beta(three*lam+one, lam+one) +
# three*mp.beta(two*lam+one, two*lam+one))
# denom = two*(one/(two*lam+one) - mp.beta(lam+one,lam+one))**2
# k = numer / denom - three
# return k
#
# # There is a bug in mpmath 0.17: when we use the 'method' keyword of the
# # taylor function and we request a degree 9 Taylor polynomial, we actually
# # get degree 8.
# t = mp.taylor(mpkurt, 0, 9, method='quad', radius=0.01)
# t = [mp.chop(c, tol=1e-15) for c in t]
# p, q = mp.pade(t, 4, 4)
# print("p =", [mp.fp.mpf(c) for c in p])
# print("q =", [mp.fp.mpf(c) for c in q])
#---------------------------------------------------------------------------
# Pade coefficients for the Tukey Lambda kurtosis function.
_tukeylambda_kurt_pc = [1.2, -5.853465139719495, -22.653447381131077,
0.20601184383406815, 4.59796302262789]
_tukeylambda_kurt_qc = [1.0, 7.171149192233599, 12.96663094361842,
0.43075235247853005, -2.789746758009912]
# numpy.poly1d instances for the numerator and denominator of the
# Pade approximation to the Tukey Lambda kurtosis.
_tukeylambda_kurt_p = poly1d(_tukeylambda_kurt_pc[::-1])
_tukeylambda_kurt_q = poly1d(_tukeylambda_kurt_qc[::-1])
def tukeylambda_kurtosis(lam):
"""Kurtosis of the Tukey Lambda distribution.
Parameters
----------
lam : array_like
The lambda values at which to compute the variance.
Returns
-------
v : ndarray
The variance. For lam < -0.25, the variance is not defined, so
np.nan is returned. For lam = 0.25, np.inf is returned.
"""
lam = np.asarray(lam)
shp = lam.shape
lam = np.atleast_1d(lam).astype(np.float64)
# For absolute values of lam less than threshold, use the Pade
# approximation.
threshold = 0.055
# Use masks to implement the conditional evaluation of the kurtosis.
# lambda < -0.25: kurtosis = nan
low_mask = lam < -0.25
# lambda == -0.25: kurtosis = inf
negqrtr_mask = lam == -0.25
# lambda near 0: use Pade approximation
small_mask = np.abs(lam) < threshold
# else the "regular" case: use the explicit formula.
reg_mask = ~(low_mask | negqrtr_mask | small_mask)
# Get the 'lam' values for the cases where they are needed.
small = lam[small_mask]
reg = lam[reg_mask]
# Compute the function for each case.
k = np.empty_like(lam)
k[low_mask] = np.nan
k[negqrtr_mask] = np.inf
if small.size > 0:
k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small)
if reg.size > 0:
numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) +
3 * beta(2 * reg + 1, 2 * reg + 1))
denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2
k[reg_mask] = numer / denom - 3
# The return value will be a numpy array; resetting the shape ensures that
# if `lam` was a scalar, the return value is a 0-d array.
k.shape = shp
return k
| 6,871
| 33.36
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/mstats_basic.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.stats` namespace for importing the functions
# included below.
import warnings
from . import _mstats_basic
__all__ = [ # noqa: F822
'argstoarray',
'count_tied_groups',
'describe',
'f_oneway', 'find_repeats','friedmanchisquare',
'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
'ks_twosamp', 'ks_2samp', 'kurtosis', 'kurtosistest',
'ks_1samp', 'kstest',
'linregress',
'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
'normaltest',
'obrientransform',
'pearsonr','plotting_positions','pointbiserialr',
'rankdata',
'scoreatpercentile','sem',
'sen_seasonal_slopes','skew','skewtest','spearmanr',
'siegelslopes', 'theilslopes',
'tmax','tmean','tmin','trim','trimboth',
'trimtail','trima','trimr','trimmed_mean','trimmed_std',
'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
'ttest_ind','ttest_rel','tvar',
'variation',
'winsorize',
'brunnermunzel', 'ma', 'masked', 'nomask', 'namedtuple',
'distributions', 'stats_linregress', 'stats_LinregressResult',
'stats_theilslopes', 'stats_siegelslopes', 'ModeResult',
'SpearmanrResult', 'KendalltauResult', 'PointbiserialrResult',
'Ttest_1sampResult', 'Ttest_indResult', 'Ttest_relResult',
'MannwhitneyuResult', 'KruskalResult', 'trimdoc', 'trim1',
'DescribeResult', 'stde_median', 'SkewtestResult', 'KurtosistestResult',
'NormaltestResult', 'F_onewayResult', 'FriedmanchisquareResult',
'BrunnerMunzelResult'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.stats.mstats_basic is deprecated and has no attribute "
f"{name}. Try looking in scipy.stats instead.")
warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
"the `scipy.stats.mstats_basic` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mstats_basic, name)
| 2,123
| 35
| 76
|
py
|
scipy
|
scipy-main/scipy/stats/_kde.py
|
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to SciPy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import warnings
# SciPy imports.
from scipy import linalg, special
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
from . import _mvn
from ._stats import gaussian_kernel_estimate, gaussian_kernel_estimate_log
__all__ = ['gaussian_kde']
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`. The square
of `kde.factor` multiplies the covariance matrix of the data in the kde
estimation.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
`gaussian_kde` does not currently support data that lies in a
lower-dimensional subspace of the space in which it is expressed. For such
data, consider performing principle component analysis / dimensionality
reduction and using `gaussian_kde` with the transformed data.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> import numpy as np
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
# This can be converted to a warning once gh-10205 is resolved
if self.d > self.n:
msg = ("Number of dimensions is greater than number of samples. "
"This results in a singular data covariance matrix, which "
"cannot be treated using the algorithms implemented in "
"`gaussian_kde`. Note that `gaussian_kde` interprets each "
"*column* of `dataset` to be a point; consider transposing "
"the input to `dataset`.")
raise ValueError(msg)
try:
self.set_bandwidth(bw_method=bw_method)
except linalg.LinAlgError as e:
msg = ("The data appears to lie in a lower-dimensional subspace "
"of the space in which it is expressed. This has resulted "
"in a singular data covariance matrix, which cannot be "
"treated using the algorithms implemented in "
"`gaussian_kde`. Consider performing principle component "
"analysis / dimensionality reduction and using "
"`gaussian_kde` with the transformed data.")
raise linalg.LinAlgError(msg) from e
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = (f"points have dimension {d}, "
f"dataset has dimension {self.d}")
raise ValueError(msg)
output_dtype, spec = _get_output_dtype(self.covariance, points)
result = gaussian_kernel_estimate[spec](
self.dataset.T, self.weights[:, None],
points.T, self.cho_cov, output_dtype)
return result[:, 0]
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = _mvn.mvnun_weighted(low_bounds, high_bounds,
self.dataset, self.weights,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in _mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import numpy as np
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and Cholesky decomp of covariance
if not hasattr(self, '_data_cho_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_cho_cov = linalg.cholesky(self._data_covariance,
lower=True)
self.covariance = self._data_covariance * self.factor**2
self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64)
self.log_det = 2*np.log(np.diag(self.cho_cov
* np.sqrt(2*pi))).sum()
@property
def inv_cov(self):
# Re-compute from scratch each time because I'm not sure how this is
# used in the wild. (Perhaps users change the `dataset`, since it's
# not a private attribute?) `_compute_covariance` used to recalculate
# all these, so we'll recalculate everything now that this is a
# a property.
self.factor = self.covariance_factor()
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False, aweights=self.weights))
return linalg.inv(self._data_covariance) / self.factor**2
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = (f"points have dimension {d}, "
f"dataset has dimension {self.d}")
raise ValueError(msg)
output_dtype, spec = _get_output_dtype(self.covariance, points)
result = gaussian_kernel_estimate_log[spec](
self.dataset.T, self.weights[:, None],
points.T, self.cho_cov, output_dtype)
return result[:, 0]
def marginal(self, dimensions):
"""Return a marginal KDE distribution
Parameters
----------
dimensions : int or 1-d array_like
The dimensions of the multivariate distribution corresponding
with the marginal variables, that is, the indices of the dimensions
that are being retained. The other dimensions are marginalized out.
Returns
-------
marginal_kde : gaussian_kde
An object representing the marginal distribution.
Notes
-----
.. versionadded:: 1.10.0
"""
dims = np.atleast_1d(dimensions)
if not np.issubdtype(dims.dtype, np.integer):
msg = ("Elements of `dimensions` must be integers - the indices "
"of the marginal variables being retained.")
raise ValueError(msg)
n = len(self.dataset) # number of dimensions
original_dims = dims.copy()
dims[dims < 0] = n + dims[dims < 0]
if len(np.unique(dims)) != len(dims):
msg = ("All elements of `dimensions` must be unique.")
raise ValueError(msg)
i_invalid = (dims < 0) | (dims >= n)
if np.any(i_invalid):
msg = (f"Dimensions {original_dims[i_invalid]} are invalid "
f"for a distribution in {n} dimensions.")
raise ValueError(msg)
dataset = self.dataset[dims]
weights = self.weights
return gaussian_kde(dataset, bw_method=self.covariance_factor(),
weights=weights)
@property
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
@property
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
def _get_output_dtype(covariance, points):
"""
Calculates the output dtype and the "spec" (=C type name).
This was necessary in order to deal with the fused types in the Cython
routine `gaussian_kernel_estimate`. See gh-10824 for details.
"""
output_dtype = np.common_type(covariance, points)
itemsize = np.dtype(output_dtype).itemsize
if itemsize == 4:
spec = 'float'
elif itemsize == 8:
spec = 'double'
elif itemsize in (12, 16):
spec = 'long double'
else:
raise ValueError(
f"{output_dtype} has unexpected item size: {itemsize}"
)
return output_dtype, spec
| 24,989
| 33.421488
| 90
|
py
|
scipy
|
scipy-main/scipy/stats/__init__.py
|
"""
.. _statsrefmanual:
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. currentmodule:: scipy.stats
This module contains a large number of probability distributions,
summary and frequency statistics, correlation functions and statistical
tests, masked statistics, kernel density estimation, quasi-Monte Carlo
functionality, and more.
Statistics is a very large area, and there are topics that are out of scope
for SciPy and are covered by other packages. Some of the most important ones
are:
- `statsmodels <https://www.statsmodels.org/stable/index.html>`__:
regression, linear models, time series analysis, extensions to topics
also covered by ``scipy.stats``.
- `Pandas <https://pandas.pydata.org/>`__: tabular data, time series
functionality, interfaces to other statistical languages.
- `PyMC <https://docs.pymc.io/>`__: Bayesian statistical
modeling, probabilistic machine learning.
- `scikit-learn <https://scikit-learn.org/>`__: classification, regression,
model selection.
- `Seaborn <https://seaborn.pydata.org/>`__: statistical data visualization.
- `rpy2 <https://rpy2.github.io/>`__: Python to R bridge.
Probability distributions
=========================
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
rv_histogram
Continuous distributions
------------------------
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
argus -- Argus
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
crystalball -- Crystalball
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
genhyperbolic -- Generalized Hyperbolic
geninvgauss -- Generalized Inverse Gaussian
gibrat -- Gibrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic
kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic
kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
laplace -- Laplace
laplace_asymmetric -- Asymmetric Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
loguniform -- Log-Uniform
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
moyal -- Moyal
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
norminvgauss -- Normal Inverse Gaussian
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
rayleigh -- Rayleigh
rel_breitwigner -- Relativistic Breit-Wigner
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewcauchy -- Skew Cauchy
skewnorm -- Skew normal
studentized_range -- Studentized Range
t -- Student's T
trapezoid -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
truncpareto -- Truncated Pareto
truncweibull_min -- Truncated minimum Weibull distribution
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
The ``fit`` method of the univariate continuous distributions uses
maximum likelihood estimation to fit the distribution to a data set.
The ``fit`` method can accept regular data or *censored data*.
Censored data is represented with instances of the `CensoredData`
class.
.. autosummary::
:toctree: generated/
CensoredData
Multivariate distributions
--------------------------
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
dirichlet_multinomial -- Dirichlet multinomial distribution
wishart -- Wishart
invwishart -- Inverse Wishart
multinomial -- Multinomial distribution
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
unitary_group -- U(N) group
random_correlation -- random correlation matrices
multivariate_t -- Multivariate t-distribution
multivariate_hypergeom -- Multivariate hypergeometric distribution
random_table -- Distribution of random tables with given marginals
uniform_direction -- Uniform distribution on S(N-1)
vonmises_fisher -- Von Mises-Fisher distribution
`scipy.stats.multivariate_normal` methods accept instances
of the following class to represent the covariance.
.. autosummary::
:toctree: generated/
Covariance -- Representation of a covariance matrix
Discrete distributions
----------------------
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
betabinom -- Beta-Binomial
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
nchypergeom_fisher -- Fisher's Noncentral Hypergeometric
nchypergeom_wallenius -- Wallenius's Noncentral Hypergeometric
nhypergeom -- Negative Hypergeometric
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
yulesimon -- Yule-Simon
zipf -- Zipf (Zeta)
zipfian -- Zipfian
An overview of statistical functions is given below. Many of these functions
have a similar version in `scipy.stats.mstats` which work for masked arrays.
Summary statistics
==================
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
pmean -- Power mean
kurtosis -- Fisher or Pearson kurtosis
mode -- Modal value
moment -- Central moment
expectile -- Expectile
skew -- Skewness
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
rankdata
tiecorrect
trim_mean
gstd -- Geometric Standard Deviation
iqr
sem
bayes_mvs
mvsdist
entropy
differential_entropy
median_abs_deviation
Frequency statistics
====================
.. autosummary::
:toctree: generated/
cumfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
Hypothesis Tests and related functions
======================================
SciPy has many functions for performing hypothesis tests that return a
test statistic and a p-value, and several of them return confidence intervals
and/or other related information.
The headings below are based on common uses of the functions within, but due to
the wide variety of statistical procedures, any attempt at coarse-grained
categorization will be imperfect. Also, note that tests within the same heading
are not interchangeable in general (e.g. many have different distributional
assumptions).
One Sample Tests / Paired Sample Tests
--------------------------------------
One sample tests are typically used to assess whether a single sample was
drawn from a specified distribution or a distribution with specified properties
(e.g. zero mean).
.. autosummary::
:toctree: generated/
ttest_1samp
binomtest
skewtest
kurtosistest
normaltest
jarque_bera
shapiro
anderson
cramervonmises
ks_1samp
goodness_of_fit
chisquare
power_divergence
Paired sample tests are often used to assess whether two samples were drawn
from the same distribution; they differ from the independent sample tests below
in that each observation in one sample is treated as paired with a
closely-related observation in the other sample (e.g. when environmental
factors are controlled between observations within a pair but not among pairs).
They can also be interpreted or used as one-sample tests (e.g. tests on the
mean or median of *differences* between paired observations).
.. autosummary::
:toctree: generated/
ttest_rel
wilcoxon
Association/Correlation Tests
-----------------------------
These tests are often used to assess whether there is a relationship (e.g.
linear) between paired observations in multiple samples or among the
coordinates of multivariate observations.
.. autosummary::
:toctree: generated/
linregress
pearsonr
spearmanr
pointbiserialr
kendalltau
weightedtau
somersd
siegelslopes
theilslopes
page_trend_test
multiscale_graphcorr
These association tests and are to work with samples in the form of contingency
tables. Supporting functions are available in `scipy.stats.contingency`.
.. autosummary::
:toctree: generated/
chi2_contingency
fisher_exact
barnard_exact
boschloo_exact
Independent Sample Tests
------------------------
Independent sample tests are typically used to assess whether multiple samples
were independently drawn from the same distribution or different distributions
with a shared property (e.g. equal means).
Some tests are specifically for comparing two samples.
.. autosummary::
:toctree: generated/
ttest_ind_from_stats
poisson_means_test
ttest_ind
mannwhitneyu
bws_test
ranksums
brunnermunzel
mood
ansari
cramervonmises_2samp
epps_singleton_2samp
ks_2samp
kstest
Others are generalized to multiple samples.
.. autosummary::
:toctree: generated/
f_oneway
tukey_hsd
dunnett
kruskal
alexandergovern
fligner
levene
bartlett
median_test
friedmanchisquare
anderson_ksamp
Resampling and Monte Carlo Methods
----------------------------------
The following functions can reproduce the p-value and confidence interval
results of most of the functions above, and often produce accurate results in a
wider variety of conditions. They can also be used to perform hypothesis tests
and generate confidence intervals for custom statistics. This flexibility comes
at the cost of greater computational requirements and stochastic results.
.. autosummary::
:toctree: generated/
monte_carlo_test
permutation_test
bootstrap
Instances of the following object can be passed into some hypothesis test
functions to perform a resampling or Monte Carlo version of the hypothesis
test.
.. autosummary::
:toctree: generated/
MonteCarloMethod
PermutationMethod
BootstrapMethod
Multiple Hypothesis Testing and Meta-Analysis
---------------------------------------------
These functions are for assessing the results of individual tests as a whole.
Functions for performing specific multiple hypothesis tests (e.g. post hoc
tests) are listed above.
.. autosummary::
:toctree: generated/
combine_pvalues
false_discovery_control
The following functions are related to the tests above but do not belong in the
above categories.
Quasi-Monte Carlo
=================
.. toctree::
:maxdepth: 4
stats.qmc
Contingency Tables
==================
.. toctree::
:maxdepth: 4
stats.contingency
Masked statistics functions
===========================
.. toctree::
stats.mstats
Other statistical functionality
===============================
Transformations
---------------
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
yeojohnson
yeojohnson_normmax
yeojohnson_llf
obrientransform
sigmaclip
trimboth
trim1
zmap
zscore
gzscore
Statistical distances
---------------------
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
Sampling
--------
.. toctree::
:maxdepth: 4
stats.sampling
Random variate generation / CDF Inversion
-----------------------------------------
.. autosummary::
:toctree: generated/
rvs_ratio_uniforms
Fitting / Survival Analysis
---------------------------
.. autosummary::
:toctree: generated/
fit
ecdf
logrank
Directional statistical functions
---------------------------------
.. autosummary::
:toctree: generated/
directional_stats
circmean
circvar
circstd
Sensitivity Analysis
--------------------
.. autosummary::
:toctree: generated/
sobol_indices
Plot-tests
----------
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
yeojohnson_normplot
Univariate and multivariate kernel density estimation
-----------------------------------------------------
.. autosummary::
:toctree: generated/
gaussian_kde
Warnings / Errors used in :mod:`scipy.stats`
--------------------------------------------
.. autosummary::
:toctree: generated/
DegenerateDataWarning
ConstantInputWarning
NearConstantInputWarning
FitError
Result classes used in :mod:`scipy.stats`
-----------------------------------------
.. warning::
These classes are private, but they are included here because instances
of them are returned by other statistical functions. User import and
instantiation is not supported.
.. toctree::
:maxdepth: 2
stats._result_classes
"""
from ._warnings_errors import (ConstantInputWarning, NearConstantInputWarning,
DegenerateDataWarning, FitError)
from ._stats_py import *
from ._variation import variation
from .distributions import *
from ._morestats import *
from ._multicomp import *
from ._binomtest import binomtest
from ._binned_statistic import *
from ._kde import gaussian_kde
from . import mstats
from . import qmc
from ._multivariate import *
from . import contingency
from .contingency import chi2_contingency
from ._censored_data import CensoredData # noqa
from ._resampling import (bootstrap, monte_carlo_test, permutation_test,
MonteCarloMethod, PermutationMethod, BootstrapMethod)
from ._entropy import *
from ._hypotests import *
from ._rvs_sampling import rvs_ratio_uniforms
from ._page_trend_test import page_trend_test
from ._mannwhitneyu import mannwhitneyu
from ._bws_test import bws_test
from ._fit import fit, goodness_of_fit
from ._covariance import Covariance
from ._sensitivity_analysis import *
from ._survival import *
# Deprecated namespaces, to be removed in v2.0.0
from . import (
biasedurn, kde, morestats, mstats_basic, mstats_extras, mvn, stats
)
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 18,012
| 27.145313
| 100
|
py
|
scipy
|
scipy-main/scipy/stats/_stats_pythran.py
|
import numpy as np
#pythran export _Aij(float[:,:], int, int)
#pythran export _Aij(int[:,:], int, int)
def _Aij(A, i, j):
"""Sum of upper-left and lower right blocks of contingency table."""
# See `somersd` References [2] bottom of page 309
return A[:i, :j].sum() + A[i+1:, j+1:].sum()
#pythran export _Dij(float[:,:], int, int)
#pythran export _Dij(int[:,:], int, int)
def _Dij(A, i, j):
"""Sum of lower-left and upper-right blocks of contingency table."""
# See `somersd` References [2] bottom of page 309
return A[i+1:, :j].sum() + A[:i, j+1:].sum()
# pythran export _concordant_pairs(float[:,:])
# pythran export _concordant_pairs(int[:,:])
def _concordant_pairs(A):
"""Twice the number of concordant pairs, excluding ties."""
# See `somersd` References [2] bottom of page 309
m, n = A.shape
count = 0
for i in range(m):
for j in range(n):
count += A[i, j]*_Aij(A, i, j)
return count
# pythran export _discordant_pairs(float[:,:])
# pythran export _discordant_pairs(int[:,:])
def _discordant_pairs(A):
"""Twice the number of discordant pairs, excluding ties."""
# See `somersd` References [2] bottom of page 309
m, n = A.shape
count = 0
for i in range(m):
for j in range(n):
count += A[i, j]*_Dij(A, i, j)
return count
#pythran export _a_ij_Aij_Dij2(float[:,:])
#pythran export _a_ij_Aij_Dij2(int[:,:])
def _a_ij_Aij_Dij2(A):
"""A term that appears in the ASE of Kendall's tau and Somers' D."""
# See `somersd` References [2] section 4: Modified ASEs to test the null hypothesis...
m, n = A.shape
count = 0
for i in range(m):
for j in range(n):
count += A[i, j]*(_Aij(A, i, j) - _Dij(A, i, j))**2
return count
#pythran export _compute_outer_prob_inside_method(int64, int64, int64, int64)
def _compute_outer_prob_inside_method(m, n, g, h):
"""
Count the proportion of paths that do not stay strictly inside two
diagonal lines.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The proportion of paths that do not stay inside the two lines.
The classical algorithm counts the integer lattice paths from (0, 0)
to (m, n) which satisfy |x/m - y/n| < h / lcm(m, n).
The paths make steps of size +1 in either positive x or positive y
directions.
We are, however, interested in 1 - proportion to computes p-values,
so we change the recursion to compute 1 - p directly while staying
within the "inside method" a described by Hodges.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
For the recursion for 1-p see
Viehmann, T.: "Numerically more stable computation of the p-values
for the two-sample Kolmogorov-Smirnov test," arXiv: 2102.08037
"""
# Probability is symmetrical in m, n. Computation below uses m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Count the integer lattice paths from (0, 0) to (m, n) which satisfy
# |nx/g - my/g| < h.
# Compute matrix A such that:
# A(x, 0) = A(0, y) = 1
# A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that
# A(x, y) = 0 if |x/m - y/n|>= h
# Probability is A(m, n)/binom(m+n, n)
# Optimizations exist for m==n, m==n*p.
# Only need to preserve a single column of A, and only a
# sliding window of it.
# minj keeps track of the slide.
minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1)
curlen = maxj - minj
# Make a vector long enough to hold maximum window needed.
lenA = min(2 * maxj + 2, n + 1)
# This is an integer calculation, but the entries are essentially
# binomial coefficients, hence grow quickly.
# Scaling after each column is computed avoids dividing by a
# large binomial coefficient at the end, but is not sufficient to avoid
# the large dyanamic range which appears during the calculation.
# Instead we rescale based on the magnitude of the right most term in
# the column and keep track of an exponent separately and apply
# it at the end of the calculation. Similarly when multiplying by
# the binomial coefficient
dtype = np.float64
A = np.ones(lenA, dtype=dtype)
# Initialize the first column
A[minj:maxj] = 0.0
for i in range(1, m + 1):
# Generate the next column.
# First calculate the sliding window
lastminj, lastlen = minj, curlen
minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)
minj = min(minj, n)
maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)
if maxj <= minj:
return 1.0
# Now fill in the values. We cannot use cumsum, unfortunately.
val = 0.0 if minj == 0 else 1.0
for jj in range(maxj - minj):
j = jj + minj
val = (A[jj + minj - lastminj] * i + val * j) / (i + j)
A[jj] = val
curlen = maxj - minj
if lastlen > curlen:
# Set some carried-over elements to 1
A[maxj - minj:maxj - minj + (lastlen - curlen)] = 1
return A[maxj - minj - 1]
# pythran export siegelslopes(float32[:], float32[:], str)
# pythran export siegelslopes(float64[:], float64[:], str)
def siegelslopes(y, x, method):
deltax = np.expand_dims(x, 1) - x
deltay = np.expand_dims(y, 1) - y
slopes, intercepts = [], []
for j in range(len(x)):
id_nonzero, = np.nonzero(deltax[j, :])
slopes_j = deltay[j, id_nonzero] / deltax[j, id_nonzero]
medslope_j = np.median(slopes_j)
slopes.append(medslope_j)
if method == 'separate':
z = y*x[j] - y[j]*x
medintercept_j = np.median(z[id_nonzero] / deltax[j, id_nonzero])
intercepts.append(medintercept_j)
medslope = np.median(np.asarray(slopes))
if method == "separate":
medinter = np.median(np.asarray(intercepts))
else:
medinter = np.median(y - medslope*x)
return medslope, medinter
| 6,306
| 34.038889
| 90
|
py
|
scipy
|
scipy-main/scipy/stats/_sampling.py
|
import numpy as np
from scipy._lib._util import check_random_state
class RatioUniforms:
"""
Generate random samples from a probability density function using the
ratio-of-uniforms method.
Parameters
----------
pdf : callable
A function with signature `pdf(x)` that is proportional to the
probability density function of the distribution.
umax : float
The upper bound of the bounding rectangle in the u-direction.
vmin : float
The lower bound of the bounding rectangle in the v-direction.
vmax : float
The upper bound of the bounding rectangle in the v-direction.
c : float, optional.
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
Notes
-----
Given a univariate probability density function `pdf` and a constant `c`,
define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
If ``(U, V)`` is a random vector uniformly distributed over ``A``,
then ``V/U + c`` follows a distribution according to `pdf`.
The above result (see [1]_, [2]_) can be used to sample random variables
using only the PDF, i.e. no inversion of the CDF is required. Typical
choices of `c` are zero or the mode of `pdf`. The set ``A`` is a subset of
the rectangle ``R = [0, umax] x [vmin, vmax]`` where
- ``umax = sup sqrt(pdf(x))``
- ``vmin = inf (x - c) sqrt(pdf(x))``
- ``vmax = sup (x - c) sqrt(pdf(x))``
In particular, these values are finite if `pdf` is bounded and
``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
One can generate ``(U, V)`` uniformly on ``R`` and return
``V/U + c`` if ``(U, V)`` are also in ``A`` which can be directly
verified.
The algorithm is not changed if one replaces `pdf` by k * `pdf` for any
constant k > 0. Thus, it is often convenient to work with a function
that is proportional to the probability density function by dropping
unnecessary normalization factors.
Intuitively, the method works well if ``A`` fills up most of the
enclosing rectangle such that the probability is high that ``(U, V)``
lies in ``A`` whenever it lies in ``R`` as the number of required
iterations becomes too large otherwise. To be more precise, note that
the expected number of iterations to draw ``(U, V)`` uniformly
distributed on ``R`` such that ``(U, V)`` is also in ``A`` is given by
the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin) / area(pdf)``,
where `area(pdf)` is the integral of `pdf` (which is equal to one if the
probability density function is used but can take on other values if a
function proportional to the density is used). The equality holds since
the area of ``A`` is equal to ``0.5 * area(pdf)`` (Theorem 7.1 in [1]_).
If the sampling fails to generate a single random variate after 50000
iterations (i.e. not a single draw is in ``A``), an exception is raised.
If the bounding rectangle is not correctly specified (i.e. if it does not
contain ``A``), the algorithm samples from a distribution different from
the one given by `pdf`. It is therefore recommended to perform a
test such as `~scipy.stats.kstest` as a check.
References
----------
.. [1] L. Devroye, "Non-Uniform Random Variate Generation",
Springer-Verlag, 1986.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
.. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
Variables Using the Ratio of Uniform Deviates",
ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> from scipy.stats.sampling import RatioUniforms
>>> rng = np.random.default_rng()
Simulate normally distributed random variables. It is easy to compute the
bounding rectangle explicitly in that case. For simplicity, we drop the
normalization factor of the density.
>>> f = lambda x: np.exp(-x**2 / 2)
>>> v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
>>> umax = np.sqrt(f(0))
>>> gen = RatioUniforms(f, umax=umax, vmin=-v, vmax=v, random_state=rng)
>>> r = gen.rvs(size=2500)
The K-S test confirms that the random variates are indeed normally
distributed (normality is not rejected at 5% significance level):
>>> stats.kstest(r, 'norm')[1]
0.250634764150542
The exponential distribution provides another example where the bounding
rectangle can be determined explicitly.
>>> gen = RatioUniforms(lambda x: np.exp(-x), umax=1, vmin=0,
... vmax=2*np.exp(-1), random_state=rng)
>>> r = gen.rvs(1000)
>>> stats.kstest(r, 'expon')[1]
0.21121052054580314
"""
def __init__(self, pdf, *, umax, vmin, vmax, c=0, random_state=None):
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
self._pdf = pdf
self._umax = umax
self._vmin = vmin
self._vmax = vmax
self._c = c
self._rng = check_random_state(random_state)
def rvs(self, size=1):
"""Sampling of random variates
Parameters
----------
size : int or tuple of ints, optional
Number of random variates to be generated (default is 1).
Returns
-------
rvs : ndarray
The random variates distributed according to the probability
distribution defined by the pdf.
"""
size1d = tuple(np.atleast_1d(size))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# start sampling using ratio of uniforms method
x = np.zeros(N)
simulated, i = 0, 1
# loop until N rvs have been generated: expected runtime is finite.
# to avoid infinite loop, raise exception if not a single rv has been
# generated after 50000 tries. even if the expected numer of iterations
# is 1000, the probability of this event is (1-1/1000)**50000
# which is of order 10e-22
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u1 = self._umax * self._rng.uniform(size=k)
v1 = self._rng.uniform(self._vmin, self._vmax, size=k)
# apply rejection method
rvs = v1 / u1 + self._c
accept = (u1**2 <= self._pdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = (
f"Not a single random variate could be generated in {i*N} "
"attempts. The ratio of uniforms method does not appear "
"to work for the provided parameters. Please check the "
"pdf and the bounds."
)
raise RuntimeError(msg)
i += 1
return np.reshape(x, size1d)
| 7,742
| 38.912371
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_mannwhitneyu.py
|
import numpy as np
from collections import namedtuple
from scipy import special
from scipy import stats
from ._axis_nan_policy import _axis_nan_policy_factory
def _broadcast_concatenate(x, y, axis):
'''Broadcast then concatenate arrays, leaving concatenation axis last'''
x = np.moveaxis(x, axis, -1)
y = np.moveaxis(y, axis, -1)
z = np.broadcast(x[..., 0], y[..., 0])
x = np.broadcast_to(x, z.shape + (x.shape[-1],))
y = np.broadcast_to(y, z.shape + (y.shape[-1],))
z = np.concatenate((x, y), axis=-1)
return x, y, z
class _MWU:
'''Distribution of MWU statistic under the null hypothesis'''
# Possible improvement: if m and n are small enough, use integer arithmetic
def __init__(self):
'''Minimal initializer'''
self._fmnks = -np.ones((1, 1, 1))
self._recursive = None
def pmf(self, k, m, n):
if (self._recursive is None and m <= 500 and n <= 500
or self._recursive):
return self.pmf_recursive(k, m, n)
else:
return self.pmf_iterative(k, m, n)
def pmf_recursive(self, k, m, n):
'''Probability mass function, recursive version'''
self._resize_fmnks(m, n, np.max(k))
# could loop over just the unique elements, but probably not worth
# the time to find them
for i in np.ravel(k):
self._f(m, n, i)
return self._fmnks[m, n, k] / special.binom(m + n, m)
def pmf_iterative(self, k, m, n):
'''Probability mass function, iterative version'''
fmnks = {}
for i in np.ravel(k):
fmnks = _mwu_f_iterative(m, n, i, fmnks)
return (np.array([fmnks[(m, n, ki)] for ki in k])
/ special.binom(m + n, m))
def cdf(self, k, m, n):
'''Cumulative distribution function'''
# We could use the fact that the distribution is symmetric to avoid
# summing more than m*n/2 terms, but it might not be worth the
# overhead. Let's leave that to an improvement.
pmfs = self.pmf(np.arange(0, np.max(k) + 1), m, n)
cdfs = np.cumsum(pmfs)
return cdfs[k]
def sf(self, k, m, n):
'''Survival function'''
# Use the fact that the distribution is symmetric; i.e.
# _f(m, n, m*n-k) = _f(m, n, k), and sum from the left
k = m*n - k
# Note that both CDF and SF include the PMF at k. The p-value is
# calculated from the SF and should include the mass at k, so this
# is desirable
return self.cdf(k, m, n)
def _resize_fmnks(self, m, n, k):
'''If necessary, expand the array that remembers PMF values'''
# could probably use `np.pad` but I'm not sure it would save code
shape_old = np.array(self._fmnks.shape)
shape_new = np.array((m+1, n+1, k+1))
if np.any(shape_new > shape_old):
shape = np.maximum(shape_old, shape_new)
fmnks = -np.ones(shape) # create the new array
m0, n0, k0 = shape_old
fmnks[:m0, :n0, :k0] = self._fmnks # copy remembered values
self._fmnks = fmnks
def _f(self, m, n, k):
'''Recursive implementation of function of [3] Theorem 2.5'''
# [3] Theorem 2.5 Line 1
if k < 0 or m < 0 or n < 0 or k > m*n:
return 0
# if already calculated, return the value
if self._fmnks[m, n, k] >= 0:
return self._fmnks[m, n, k]
if k == 0 and m >= 0 and n >= 0: # [3] Theorem 2.5 Line 2
fmnk = 1
else: # [3] Theorem 2.5 Line 3 / Equation 3
fmnk = self._f(m-1, n, k-n) + self._f(m, n-1, k)
self._fmnks[m, n, k] = fmnk # remember result
return fmnk
# Maintain state for faster repeat calls to mannwhitneyu w/ method='exact'
_mwu_state = _MWU()
def _mwu_f_iterative(m, n, k, fmnks):
'''Iterative implementation of function of [3] Theorem 2.5'''
def _base_case(m, n, k):
'''Base cases from recursive version'''
# if already calculated, return the value
if fmnks.get((m, n, k), -1) >= 0:
return fmnks[(m, n, k)]
# [3] Theorem 2.5 Line 1
elif k < 0 or m < 0 or n < 0 or k > m*n:
return 0
# [3] Theorem 2.5 Line 2
elif k == 0 and m >= 0 and n >= 0:
return 1
return None
stack = [(m, n, k)]
fmnk = None
while stack:
# Popping only if necessary would save a tiny bit of time, but NWI.
m, n, k = stack.pop()
# If we're at a base case, continue (stack unwinds)
fmnk = _base_case(m, n, k)
if fmnk is not None:
fmnks[(m, n, k)] = fmnk
continue
# If both terms are base cases, continue (stack unwinds)
f1 = _base_case(m-1, n, k-n)
f2 = _base_case(m, n-1, k)
if f1 is not None and f2 is not None:
# [3] Theorem 2.5 Line 3 / Equation 3
fmnk = f1 + f2
fmnks[(m, n, k)] = fmnk
continue
# recurse deeper
stack.append((m, n, k))
if f1 is None:
stack.append((m-1, n, k-n))
if f2 is None:
stack.append((m, n-1, k))
return fmnks
def _tie_term(ranks):
"""Tie correction term"""
# element i of t is the number of elements sharing rank i
_, t = np.unique(ranks, return_counts=True, axis=-1)
return (t**3 - t).sum(axis=-1)
def _get_mwu_z(U, n1, n2, ranks, axis=0, continuity=True):
'''Standardized MWU statistic'''
# Follows mannwhitneyu [2]
mu = n1 * n2 / 2
n = n1 + n2
# Tie correction according to [2]
tie_term = np.apply_along_axis(_tie_term, -1, ranks)
s = np.sqrt(n1*n2/12 * ((n + 1) - tie_term/(n*(n-1))))
# equivalent to using scipy.stats.tiecorrect
# T = np.apply_along_axis(stats.tiecorrect, -1, ranks)
# s = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
numerator = U - mu
# Continuity correction.
# Because SF is always used to calculate the p-value, we can always
# _subtract_ 0.5 for the continuity correction. This always increases the
# p-value to account for the rest of the probability mass _at_ q = U.
if continuity:
numerator -= 0.5
# no problem evaluating the norm SF at an infinity
with np.errstate(divide='ignore', invalid='ignore'):
z = numerator / s
return z
def _mwu_input_validation(x, y, use_continuity, alternative, axis, method):
''' Input validation and standardization for mannwhitneyu '''
# Would use np.asarray_chkfinite, but infs are OK
x, y = np.atleast_1d(x), np.atleast_1d(y)
if np.isnan(x).any() or np.isnan(y).any():
raise ValueError('`x` and `y` must not contain NaNs.')
if np.size(x) == 0 or np.size(y) == 0:
raise ValueError('`x` and `y` must be of nonzero size.')
bools = {True, False}
if use_continuity not in bools:
raise ValueError(f'`use_continuity` must be one of {bools}.')
alternatives = {"two-sided", "less", "greater"}
alternative = alternative.lower()
if alternative not in alternatives:
raise ValueError(f'`alternative` must be one of {alternatives}.')
axis_int = int(axis)
if axis != axis_int:
raise ValueError('`axis` must be an integer.')
methods = {"asymptotic", "exact", "auto"}
method = method.lower()
if method not in methods:
raise ValueError(f'`method` must be one of {methods}.')
return x, y, use_continuity, alternative, axis_int, method
def _tie_check(xy):
"""Find any ties in data"""
_, t = np.unique(xy, return_counts=True, axis=-1)
return np.any(t != 1)
def _mwu_choose_method(n1, n2, xy, method):
"""Choose method 'asymptotic' or 'exact' depending on input size, ties"""
# if both inputs are large, asymptotic is OK
if n1 > 8 and n2 > 8:
return "asymptotic"
# if there are any ties, asymptotic is preferred
if np.apply_along_axis(_tie_check, -1, xy).any():
return "asymptotic"
return "exact"
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
@_axis_nan_policy_factory(MannwhitneyuResult, n_samples=2)
def mannwhitneyu(x, y, use_continuity=True, alternative="two-sided",
axis=0, method="auto"):
r'''Perform the Mann-Whitney U rank test on two independent samples.
The Mann-Whitney U test is a nonparametric test of the null hypothesis
that the distribution underlying sample `x` is the same as the
distribution underlying sample `y`. It is often used as a test of
difference in location between distributions.
Parameters
----------
x, y : array-like
N-d arrays of samples. The arrays must be broadcastable except along
the dimension given by `axis`.
use_continuity : bool, optional
Whether a continuity correction (1/2) should be applied.
Default is True when `method` is ``'asymptotic'``; has no effect
otherwise.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
Let *F(u)* and *G(u)* be the cumulative distribution functions of the
distributions underlying `x` and `y`, respectively. Then the following
alternative hypotheses are available:
* 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for
at least one *u*.
* 'less': the distribution underlying `x` is stochastically less
than the distribution underlying `y`, i.e. *F(u) > G(u)* for all *u*.
* 'greater': the distribution underlying `x` is stochastically greater
than the distribution underlying `y`, i.e. *F(u) < G(u)* for all *u*.
Under a more restrictive set of assumptions, the alternative hypotheses
can be expressed in terms of the locations of the distributions;
see [5] section 5.1.
axis : int, optional
Axis along which to perform the test. Default is 0.
method : {'auto', 'asymptotic', 'exact'}, optional
Selects the method used to calculate the *p*-value.
Default is 'auto'. The following options are available.
* ``'asymptotic'``: compares the standardized test statistic
against the normal distribution, correcting for ties.
* ``'exact'``: computes the exact *p*-value by comparing the observed
:math:`U` statistic against the exact distribution of the :math:`U`
statistic under the null hypothesis. No correction is made for ties.
* ``'auto'``: chooses ``'exact'`` when the size of one of the samples
is less than or equal to 8 and there are no ties;
chooses ``'asymptotic'`` otherwise.
Returns
-------
res : MannwhitneyuResult
An object containing attributes:
statistic : float
The Mann-Whitney U statistic corresponding with sample `x`. See
Notes for the test statistic corresponding with sample `y`.
pvalue : float
The associated *p*-value for the chosen `alternative`.
Notes
-----
If ``U1`` is the statistic corresponding with sample `x`, then the
statistic corresponding with sample `y` is
`U2 = `x.shape[axis] * y.shape[axis] - U1``.
`mannwhitneyu` is for independent samples. For related / paired samples,
consider `scipy.stats.wilcoxon`.
`method` ``'exact'`` is recommended when there are no ties and when either
sample size is less than 8 [1]_. The implementation follows the recurrence
relation originally proposed in [1]_ as it is described in [3]_.
Note that the exact method is *not* corrected for ties, but
`mannwhitneyu` will not raise errors or warnings if there are ties in the
data.
The Mann-Whitney U test is a non-parametric version of the t-test for
independent samples. When the means of samples from the populations
are normally distributed, consider `scipy.stats.ttest_ind`.
See Also
--------
scipy.stats.wilcoxon, scipy.stats.ranksums, scipy.stats.ttest_ind
References
----------
.. [1] H.B. Mann and D.R. Whitney, "On a test of whether one of two random
variables is stochastically larger than the other", The Annals of
Mathematical Statistics, Vol. 18, pp. 50-60, 1947.
.. [2] Mann-Whitney U Test, Wikipedia,
http://en.wikipedia.org/wiki/Mann-Whitney_U_test
.. [3] A. Di Bucchianico, "Combinatorics, computer algebra, and the
Wilcoxon-Mann-Whitney test", Journal of Statistical Planning and
Inference, Vol. 79, pp. 349-364, 1999.
.. [4] Rosie Shier, "Statistics: 2.3 The Mann-Whitney U Test", Mathematics
Learning Support Centre, 2004.
.. [5] Michael P. Fay and Michael A. Proschan. "Wilcoxon-Mann-Whitney
or t-test? On assumptions for hypothesis tests and multiple \
interpretations of decision rules." Statistics surveys, Vol. 4, pp.
1-39, 2010. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2857732/
Examples
--------
We follow the example from [4]_: nine randomly sampled young adults were
diagnosed with type II diabetes at the ages below.
>>> males = [19, 22, 16, 29, 24]
>>> females = [20, 11, 17, 12]
We use the Mann-Whitney U test to assess whether there is a statistically
significant difference in the diagnosis age of males and females.
The null hypothesis is that the distribution of male diagnosis ages is
the same as the distribution of female diagnosis ages. We decide
that a confidence level of 95% is required to reject the null hypothesis
in favor of the alternative that the distributions are different.
Since the number of samples is very small and there are no ties in the
data, we can compare the observed test statistic against the *exact*
distribution of the test statistic under the null hypothesis.
>>> from scipy.stats import mannwhitneyu
>>> U1, p = mannwhitneyu(males, females, method="exact")
>>> print(U1)
17.0
`mannwhitneyu` always reports the statistic associated with the first
sample, which, in this case, is males. This agrees with :math:`U_M = 17`
reported in [4]_. The statistic associated with the second statistic
can be calculated:
>>> nx, ny = len(males), len(females)
>>> U2 = nx*ny - U1
>>> print(U2)
3.0
This agrees with :math:`U_F = 3` reported in [4]_. The two-sided
*p*-value can be calculated from either statistic, and the value produced
by `mannwhitneyu` agrees with :math:`p = 0.11` reported in [4]_.
>>> print(p)
0.1111111111111111
The exact distribution of the test statistic is asymptotically normal, so
the example continues by comparing the exact *p*-value against the
*p*-value produced using the normal approximation.
>>> _, pnorm = mannwhitneyu(males, females, method="asymptotic")
>>> print(pnorm)
0.11134688653314041
Here `mannwhitneyu`'s reported *p*-value appears to conflict with the
value :math:`p = 0.09` given in [4]_. The reason is that [4]_
does not apply the continuity correction performed by `mannwhitneyu`;
`mannwhitneyu` reduces the distance between the test statistic and the
mean :math:`\mu = n_x n_y / 2` by 0.5 to correct for the fact that the
discrete statistic is being compared against a continuous distribution.
Here, the :math:`U` statistic used is less than the mean, so we reduce
the distance by adding 0.5 in the numerator.
>>> import numpy as np
>>> from scipy.stats import norm
>>> U = min(U1, U2)
>>> N = nx + ny
>>> z = (U - nx*ny/2 + 0.5) / np.sqrt(nx*ny * (N + 1)/ 12)
>>> p = 2 * norm.cdf(z) # use CDF to get p-value from smaller statistic
>>> print(p)
0.11134688653314041
If desired, we can disable the continuity correction to get a result
that agrees with that reported in [4]_.
>>> _, pnorm = mannwhitneyu(males, females, use_continuity=False,
... method="asymptotic")
>>> print(pnorm)
0.0864107329737
Regardless of whether we perform an exact or asymptotic test, the
probability of the test statistic being as extreme or more extreme by
chance exceeds 5%, so we do not consider the results statistically
significant.
Suppose that, before seeing the data, we had hypothesized that females
would tend to be diagnosed at a younger age than males.
In that case, it would be natural to provide the female ages as the
first input, and we would have performed a one-sided test using
``alternative = 'less'``: females are diagnosed at an age that is
stochastically less than that of males.
>>> res = mannwhitneyu(females, males, alternative="less", method="exact")
>>> print(res)
MannwhitneyuResult(statistic=3.0, pvalue=0.05555555555555555)
Again, the probability of getting a sufficiently low value of the
test statistic by chance under the null hypothesis is greater than 5%,
so we do not reject the null hypothesis in favor of our alternative.
If it is reasonable to assume that the means of samples from the
populations are normally distributed, we could have used a t-test to
perform the analysis.
>>> from scipy.stats import ttest_ind
>>> res = ttest_ind(females, males, alternative="less")
>>> print(res)
Ttest_indResult(statistic=-2.239334696520584, pvalue=0.030068441095757924)
Under this assumption, the *p*-value would be low enough to reject the
null hypothesis in favor of the alternative.
'''
x, y, use_continuity, alternative, axis_int, method = (
_mwu_input_validation(x, y, use_continuity, alternative, axis, method))
x, y, xy = _broadcast_concatenate(x, y, axis)
n1, n2 = x.shape[-1], y.shape[-1]
if method == "auto":
method = _mwu_choose_method(n1, n2, xy, method)
# Follows [2]
ranks = stats.rankdata(xy, axis=-1) # method 2, step 1
R1 = ranks[..., :n1].sum(axis=-1) # method 2, step 2
U1 = R1 - n1*(n1+1)/2 # method 2, step 3
U2 = n1 * n2 - U1 # as U1 + U2 = n1 * n2
if alternative == "greater":
U, f = U1, 1 # U is the statistic to use for p-value, f is a factor
elif alternative == "less":
U, f = U2, 1 # Due to symmetry, use SF of U2 rather than CDF of U1
else:
U, f = np.maximum(U1, U2), 2 # multiply SF by two for two-sided test
if method == "exact":
p = _mwu_state.sf(U.astype(int), n1, n2)
elif method == "asymptotic":
z = _get_mwu_z(U, n1, n2, ranks, continuity=use_continuity)
p = stats.norm.sf(z)
p *= f
# Ensure that test statistic is not greater than 1
# This could happen for exact test when U = m*n/2
p = np.clip(p, 0, 1)
return MannwhitneyuResult(U1, p)
| 18,965
| 37.392713
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_common.py
|
from collections import namedtuple
ConfidenceInterval = namedtuple("ConfidenceInterval", ["low", "high"])
ConfidenceInterval. __doc__ = "Class for confidence intervals."
| 172
| 27.833333
| 70
|
py
|
scipy
|
scipy-main/scipy/stats/_variation.py
|
import numpy as np
from scipy._lib._util import _nan_allsame, _contains_nan, normalize_axis_index
from ._stats_py import _chk_asarray
def _nanvariation(a, *, axis=0, ddof=0, keepdims=False):
"""
Private version of `variation` that ignores nan.
`a` must be a numpy array.
`axis` is assumed to be normalized, i.e. 0 <= axis < a.ndim.
"""
#
# In theory, this should be as simple as something like
# nanstd(a, ddof=ddof, axis=axis, keepdims=keepdims) /
# nanmean(a, axis=axis, keepdims=keepdims)
# In practice, annoying issues arise. Specifically, numpy
# generates warnings in certain edge cases that we don't want
# to propagate to the user. Unfortunately, there does not
# appear to be a thread-safe way to filter out the warnings,
# so we have to do the calculation in a way that doesn't
# generate numpy warnings.
#
# Let N be the number of non-nan inputs in a slice.
# Conditions that generate nan:
# * empty input (i.e. N = 0)
# * All non-nan values 0
# * N < ddof
# * N == ddof and the input is constant
# Conditions that generate inf:
# * non-constant input and either
# * the mean is 0, or
# * N == ddof
#
a_isnan = np.isnan(a)
all_nan = a_isnan.all(axis=axis, keepdims=True)
all_nan_full = np.broadcast_to(all_nan, a.shape)
all_zero = (a_isnan | (a == 0)).all(axis=axis, keepdims=True) & ~all_nan
# ngood is the number of non-nan values in each slice.
ngood = (a.shape[axis] -
np.expand_dims(np.count_nonzero(a_isnan, axis=axis), axis))
# The return value is nan where ddof > ngood.
ddof_too_big = ddof > ngood
# If ddof == ngood, the return value is nan where the input is constant and
# inf otherwise.
ddof_equal_n = ddof == ngood
is_const = _nan_allsame(a, axis=axis, keepdims=True)
a2 = a.copy()
# If an entire slice is nan, `np.nanmean` will generate a warning,
# so we replace those nan's with 1.0 before computing the mean.
# We'll fix the corresponding output later.
a2[all_nan_full] = 1.0
mean_a = np.nanmean(a2, axis=axis, keepdims=True)
# If ddof >= ngood (the number of non-nan values in the slice), `np.nanstd`
# will generate a warning, so set all the values in such a slice to 1.0.
# We'll fix the corresponding output later.
a2[np.broadcast_to(ddof_too_big, a2.shape) | ddof_equal_n] = 1.0
with np.errstate(invalid='ignore'):
std_a = np.nanstd(a2, axis=axis, ddof=ddof, keepdims=True)
del a2
sum_zero = np.nansum(a, axis=axis, keepdims=True) == 0
# Where the sum along the axis is 0, replace mean_a with 1. This avoids
# division by zero. We'll fix the corresponding output later.
mean_a[sum_zero] = 1.0
# Here--finally!--is the calculation of the variation.
result = std_a / mean_a
# Now fix the values that were given fake data to avoid warnings.
result[~is_const & sum_zero] = np.inf
signed_inf_mask = ~is_const & ddof_equal_n
result[signed_inf_mask] = np.sign(mean_a[signed_inf_mask]) * np.inf
nan_mask = all_zero | all_nan | ddof_too_big | (ddof_equal_n & is_const)
result[nan_mask] = np.nan
if not keepdims:
result = np.squeeze(result, axis=axis)
if result.shape == ():
result = result[()]
return result
def variation(a, axis=0, nan_policy='propagate', ddof=0, *, keepdims=False):
"""
Compute the coefficient of variation.
The coefficient of variation is the standard deviation divided by the
mean. This function is equivalent to::
np.std(x, axis=axis, ddof=ddof) / np.mean(x)
The default for ``ddof`` is 0, but many definitions of the coefficient
of variation use the square root of the unbiased sample variance
for the sample standard deviation, which corresponds to ``ddof=1``.
The function does not take the absolute value of the mean of the data,
so the return value is negative if the mean is negative.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation.
Default is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains ``nan``.
The following options are available:
* 'propagate': return ``nan``
* 'raise': raise an exception
* 'omit': perform the calculation with ``nan`` values omitted
The default is 'propagate'.
ddof : int, optional
Gives the "Delta Degrees Of Freedom" used when computing the
standard deviation. The divisor used in the calculation of the
standard deviation is ``N - ddof``, where ``N`` is the number of
elements. `ddof` must be less than ``N``; if it isn't, the result
will be ``nan`` or ``inf``, depending on ``N`` and the values in
the array. By default `ddof` is zero for backwards compatibility,
but it is recommended to use ``ddof=1`` to ensure that the sample
standard deviation is computed as the square root of the unbiased
sample variance.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
Notes
-----
There are several edge cases that are handled without generating a
warning:
* If both the mean and the standard deviation are zero, ``nan``
is returned.
* If the mean is zero and the standard deviation is nonzero, ``inf``
is returned.
* If the input has length zero (either because the array has zero
length, or all the input values are ``nan`` and ``nan_policy`` is
``'omit'``), ``nan`` is returned.
* If the input contains ``inf``, ``nan`` is returned.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5], ddof=1)
0.5270462766947299
Compute the variation along a given dimension of an array that contains
a few ``nan`` values:
>>> x = np.array([[ 10.0, np.nan, 11.0, 19.0, 23.0, 29.0, 98.0],
... [ 29.0, 30.0, 32.0, 33.0, 35.0, 56.0, 57.0],
... [np.nan, np.nan, 12.0, 13.0, 16.0, 16.0, 17.0]])
>>> variation(x, axis=1, ddof=1, nan_policy='omit')
array([1.05109361, 0.31428986, 0.146483 ])
"""
a, axis = _chk_asarray(a, axis)
axis = normalize_axis_index(axis, ndim=a.ndim)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
return _nanvariation(a, axis=axis, ddof=ddof, keepdims=keepdims)
if a.size == 0 or ddof > n:
# Handle as a special case to avoid spurious warnings.
# The return values, if any, are all nan.
shp = list(a.shape)
if keepdims:
shp[axis] = 1
else:
del shp[axis]
if len(shp) == 0:
result = np.nan
else:
result = np.full(shp, fill_value=np.nan)
return result
mean_a = a.mean(axis, keepdims=True)
if ddof == n:
# Another special case. Result is either inf or nan.
std_a = a.std(axis=axis, ddof=0, keepdims=True)
result = np.full_like(std_a, fill_value=np.nan)
result.flat[std_a.flat > 0] = (np.sign(mean_a) * np.inf).flat
if result.shape == ():
result = result[()]
return result
with np.errstate(divide='ignore', invalid='ignore'):
std_a = a.std(axis, ddof=ddof, keepdims=True)
result = std_a / mean_a
if not keepdims:
result = np.squeeze(result, axis=axis)
if result.shape == ():
result = result[()]
return result
| 8,295
| 36.201794
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_distn_infrastructure.py
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
from itertools import zip_longest
from scipy._lib import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import comb, entr
# for root finding for continuous distribution ppf, and maximum likelihood
# estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy._lib._finite_differences import _derivative
# for scipy.stats.entropy. Attempts to import just that function or file
# have cause import problems
from scipy import stats
from numpy import (arange, putmask, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX, _LOGXMAX
from ._censored_data import CensoredData
from scipy.stats._warnings_errors import FitError
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(order, %(shapes)s, loc=0, scale=1)
Non-central moment of the specified order.
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data)
Parameter estimates for generic data.
See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the
keyword arguments.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(confidence, %(shapes)s, loc=0, scale=1)
Confidence interval with equal areas around the median.
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> import numpy as np
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, bins='auto', histtype='stepfilled', alpha=0.2)
>>> ax.set_xlim([x[0], x[-1]])
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``. Note that shifting the location of a distribution
does not make it a "noncentral" distribution; noncentral generalizations of
some distributions are available in separate classes.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> import numpy as np
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""kurtosis is fourth central moment / variance**2 - 3."""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _fit_determine_optimizer(optimizer):
if not callable(optimizer) and isinstance(optimizer, str):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError as e:
raise ValueError("%s is not a valid optimizer" % optimizer) from e
return optimizer
def _sum_finite(x):
"""
For a 1D array x, return a tuple containing the sum of the
finite values of x and the number of nonfinite values.
This is a utility function used when evaluating the negative
loglikelihood for a distribution and an array of samples.
Examples
--------
>>> tot, nbad = _sum_finite(np.array([-2, -np.inf, 5, 1]))
>>> tot
4.0
>>> nbad
1
"""
finite_x = np.isfinite(x)
bad_count = finite_x.size - np.count_nonzero(finite_x)
return np.sum(x[finite_x]), bad_count
# Frozen RV class
class rv_frozen:
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, order=None):
return self.dist.moment(order, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def interval(self, confidence=None):
return self.dist.interval(confidence, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
class rv_discrete_frozen(rv_frozen):
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k): # No error
return self.dist.logpmf(k, *self.args, **self.kwds)
class rv_continuous_frozen(rv_frozen):
def pdf(self, x):
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def argsreduce(cond, *args):
"""Clean arguments to:
1. Ensure all arguments are iterable (arrays of dimension at least one
2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is
True, in 1D.
Return list of processed arguments.
Examples
--------
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> A = rng.random((4, 5))
>>> B = 2
>>> C = rng.random((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(4, 5)
>>> B1.shape
(1,)
>>> C1.shape
(1, 5)
>>> cond[2,:] = 0
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(15,)
>>> B1.shape
(1,)
>>> C1.shape
(15,)
"""
# some distributions assume arguments are iterable.
newargs = np.atleast_1d(*args)
# np.atleast_1d returns an array if only one argument, or a list of arrays
# if more than one argument.
if not isinstance(newargs, list):
newargs = [newargs, ]
if np.all(cond):
# broadcast arrays with cond
*newargs, cond = np.broadcast_arrays(*newargs, cond)
return [arg.ravel() for arg in newargs]
s = cond.shape
# np.extract returns flattened arrays, which are not broadcastable together
# unless they are either the same size or size == 1.
return [(arg if np.size(arg) == 1
else np.extract(cond, np.broadcast_to(arg, s)))
for arg in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
class rv_generic:
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super().__init__()
# figure out if _stats signature has 'moments' keyword
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
@property
def random_state(self):
"""Get or set the generator object for generating random variates.
If `random_state` is None (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance, that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# attaches the dynamically created methods on each instance.
# if a subclass overrides rv_generic.__setstate__, or implements
# it's own _attach_methods, then it must make sure that
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_* instance.
This method must be overridden by subclasses, and must itself call
_attach_argparser_methods. This method is called in __init__ in
subclasses, and in __setstate__
"""
raise NotImplementedError
def _attach_argparser_methods(self):
"""
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Should be called from `_attach_methods`, typically in __init__ and
during unpickling (__setstate__)
"""
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser string for the shape arguments.
This method should be called in __init__ of a class for each
distribution. It creates the `_parse_arg_template` attribute that is
then used by `_attach_argparser_methods` to dynamically create and
attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
methods to the instance.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = f'>>> {self.shapes} = {vals}'
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
"distribution \"%s\": %s" %
(self.name, repr(e))) from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
self.__doc__ = ''.join([f'{longname} {discrete} random variable.',
'\n\n%(before_notes)s\n', docheaders['notes'],
'\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
if isinstance(self, rv_continuous):
return rv_continuous_frozen(self, *args, **kwds)
else:
return rv_discrete_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
f"the parameters. {size}, {size_}, {bcast_shape}")
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `random_state` is None (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is
used, seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance, that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
message = ("Domain error in arguments. The `scale` parameter must "
"be positive for all distributions, and many "
"distributions have restrictions on shape parameters. "
f"Please see the `scipy.stats.{self.name}` "
"documentation for details.")
raise ValueError(message)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete and not isinstance(self, rv_sample):
if size == ():
vals = int(vals)
else:
vals = vals.astype(np.int64)
return vals
def stats(self, *args, **kwds):
"""Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if g1 is None:
mu3 = None
else:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
output = [out[()] for out in output]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> import numpy as np
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output[()]
def moment(self, order, *args, **kwds):
"""non-central moment of distribution of specified order.
Parameters
----------
order : int, order >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
n = order
shapes, loc, scale = self._parse_args(*args, **kwds)
args = np.broadcast_arrays(*(*shapes, loc, scale))
*shapes, loc, scale = args
i0 = np.logical_and(self._argcheck(*shapes), scale > 0)
i1 = np.logical_and(i0, loc == 0)
i2 = np.logical_and(i0, loc != 0)
args = argsreduce(i0, *shapes, loc, scale)
*shapes, loc, scale = args
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'mvsk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*shapes, **mdict)
val = np.empty(loc.shape) # val needs to be indexed by loc
val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
result = zeros(i0.shape)
place(result, ~i0, self.badvalue)
if i1.any():
res1 = scale[loc == 0]**n * val[loc == 0]
place(result, i1, res1)
if i2.any():
mom = [mu, mu2, g1, g2]
arrs = [i for i in mom if i is not None]
idx = [i for i in range(4) if mom[i] is not None]
if any(idx):
arrs = argsreduce(loc != 0, *arrs)
j = 0
for i in idx:
mom[i] = arrs[j]
j += 1
mu, mu2, g1, g2 = mom
args = argsreduce(loc != 0, *shapes, loc, scale, val)
*shapes, loc, scale, val = args
res2 = zeros(loc.shape, dtype='d')
fac = scale / loc
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp,
shapes)
res2 += comb(n, k, exact=True)*fac**k * valk
res2 += fac**n * val
res2 *= loc**n
place(result, i2, res2)
return result[()]
def median(self, *args, **kwds):
"""Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, confidence, *args, **kwds):
"""Confidence interval with equal areas around the median.
Parameters
----------
confidence : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
Notes
-----
This is implemented as ``ppf([p_tail, 1-p_tail])``, where
``ppf`` is the inverse cumulative distribution function and
``p_tail = (1-confidence)/2``. Suppose ``[c, d]`` is the support of a
discrete distribution; then ``ppf([0, 1]) == (c-1, d)``. Therefore,
when ``confidence=1`` and the distribution is discrete, the left end
of the interval will be beyond the support of the distribution.
For discrete distributions, the interval will limit the probability
in each tail to be less than or equal to ``p_tail`` (usually
strictly less).
"""
alpha = confidence
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""Support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : array_like
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def nnlf(self, theta, x):
"""Negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = (asarray(x)-loc) / scale
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf(self, x, *args):
return -np.sum(self._logpxf(x, *args), axis=0)
def _nlff_and_penalty(self, x, args, log_fitfun):
# negative log fit function
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logff = log_fitfun(x, *args)
finite_logff = np.isfinite(logff)
n_bad += np.sum(~finite_logff, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logff[finite_logff], axis=0) + penalty
return -np.sum(logff, axis=0)
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nlff_and_penalty(x, args, self._logpxf) + n_log_scale
def _penalized_nlpsf(self, theta, x):
"""Penalized negative log product spacing function.
i.e., - sum (log (diff (cdf (x, theta))), axis=0) + penalty
where theta are the parameters (including loc and scale)
Follows reference [1] of scipy.stats.fit
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = (np.sort(x) - loc)/scale
def log_psf(x, *args):
x, lj = np.unique(x, return_counts=True) # fast for sorted x
cdf_data = self._cdf(x, *args) if x.size else []
if not (x.size and 1 - cdf_data[-1] <= 0):
cdf = np.concatenate(([0], cdf_data, [1]))
lj = np.concatenate((lj, [1]))
else:
cdf = np.concatenate(([0], cdf_data))
# here we could use logcdf w/ logsumexp trick to take differences,
# but in the context of the method, it seems unlikely to matter
return lj * np.log(np.diff(cdf) / lj)
return self._nlff_and_penalty(x, args, log_psf)
class _ShapeInfo:
def __init__(self, name, integrality=False, domain=(-np.inf, np.inf),
inclusive=(True, True)):
self.name = name
self.integrality = integrality
domain = list(domain)
if np.isfinite(domain[0]) and not inclusive[0]:
domain[0] = np.nextafter(domain[0], np.inf)
if np.isfinite(domain[1]) and not inclusive[1]:
domain[1] = np.nextafter(domain[1], -np.inf)
self.domain = domain
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
# continuous random variables: implement maybe later
#
# hf --- Hazard Function (PDF / SF)
# chf --- Cumulative hazard function (-log(SF))
# psf --- Probability sparsity function (reciprocal of the pdf) in
# units of percent-point-function (as a function of q).
# Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
**Deepcopying / Pickling**
If a distribution or frozen distribution is deepcopied (pickled/unpickled,
etc.), any underlying random number generator is deepcopied with it. An
implication is that if a distribution relies on the singleton RandomState
before copying, it will rely on a copy of that random state after copying,
and ``np.random.seed`` will no longer control the state.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, seed=None):
super().__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
self._attach_methods()
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
# _random_state attribute is taken care of by rv_generic
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""
Attaches dynamically created methods to the rv_continuous instance.
"""
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
if self.moment_type == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
factor = 10.
left, right = self._get_support(*args)
if np.isinf(left):
left = min(-factor, right)
while self._ppf_to_solve(left, q, *args) > 0.:
left, right = left * factor, left
# left is now such that cdf(left) <= q
# if right has changed, then cdf(right) > q
if np.isinf(right):
right = max(factor, left)
while self._ppf_to_solve(right, q, *args) < 0.:
left, right = right, right * factor
# right is now such that cdf(right) >= q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return _derivative(self._cdf, x, dx=1e-5, args=args, order=5)
# Could also define any of these
def _logpdf(self, x, *args):
p = self._pdf(x, *args)
with np.errstate(divide='ignore'):
return log(p)
def _logpxf(self, x, *args):
# continuous distributions have PDF, discrete have PMF, but sometimes
# the distinction doesn't matter. This lets us use `_logpxf` for both
# discrete and continuous distributions.
return self._logpdf(x, *args)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
# generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
# in rv_generic
def pdf(self, x, *args, **kwds):
"""Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.promote_types(x.dtype, np.float64)
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.promote_types(x.dtype, np.float64)
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.promote_types(x.dtype, np.float64)
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.promote_types(x.dtype, np.float64)
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.promote_types(x.dtype, np.float64)
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.promote_types(x.dtype, np.float64)
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _nnlf_and_penalty(self, x, args):
"""
Compute the penalized negative log-likelihood for the
"standardized" data (i.e. already shifted by loc and
scaled by scale) for the shape parameters in `args`.
`x` can be a 1D numpy array or a CensoredData instance.
"""
if isinstance(x, CensoredData):
# Filter out the data that is not in the support.
xs = x._supported(*self._get_support(*args))
n_bad = len(x) - len(xs)
i1, i2 = xs._interval.T
terms = [
# logpdf of the noncensored data.
self._logpdf(xs._uncensored, *args),
# logcdf of the left-censored data.
self._logcdf(xs._left, *args),
# logsf of the right-censored data.
self._logsf(xs._right, *args),
# log of probability of the interval-censored data.
np.log(self._delta_cdf(i1, i2, *args)),
]
else:
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
terms = [self._logpdf(x, *args)]
totals, bad_counts = zip(*[_sum_finite(term) for term in terms])
total = sum(totals)
n_bad += sum(bad_counts)
return -total + n_bad * _LOGXMAX * 100
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
if isinstance(x, CensoredData):
x = (x - loc) / scale
n_log_scale = (len(x) - x.num_censored()) * log(scale)
else:
x = (x - loc) / scale
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
def _fitstart(self, data, args=None):
"""Starting point for fit (shape arguments + loc + scale)."""
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds, data=None):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
shapes = []
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
methods = {"mle", "mm"}
method = kwds.pop('method', "mle").lower()
if method == "mm":
n_params = len(shapes) + 2 - len(fixedn)
exponents = (np.arange(1, n_params+1))[:, np.newaxis]
data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
def objective(theta, x):
return self._moment_error(theta, x, data_moments)
elif method == "mle":
objective = self._penalized_nnlf
else:
raise ValueError("Method '{}' not available; must be one of {}"
.format(method, methods))
if len(fixedn) == 0:
func = objective
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return objective(newtheta, x)
return x0, func, restore, args
def _moment_error(self, theta, x, data_moments):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
for i in range(len(data_moments))])
if np.any(np.isnan(dist_moments)):
raise ValueError("Method of moments encountered a non-finite "
"distribution moment and cannot continue. "
"Consider trying method='MLE'.")
return (((data_moments - dist_moments) /
np.maximum(np.abs(data_moments), 1e-8))**2).sum()
def fit(self, data, *args, **kwds):
"""
Return estimates of shape (if applicable), location, and scale
parameters from data. The default estimation method is Maximum
Likelihood Estimation (MLE), but Method of Moments (MM)
is also available.
Starting estimates for the fit are given by input arguments;
for any arguments not provided with starting estimates,
``self._fitstart(data)`` is called to generate such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like or `CensoredData` instance
Data to use in estimating the distribution parameters.
arg1, arg2, arg3,... : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
**kwds : floats, optional
- `loc`: initial guess of the distribution's location parameter.
- `scale`: initial guess of the distribution's scale parameter.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take
``func`` and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
- method : The method to use. The default is "MLE" (Maximum
Likelihood Estimate); "MM" (Method of Moments)
is also available.
Raises
------
TypeError, ValueError
If an input is invalid
`~scipy.stats.FitError`
If fitting fails or the fit produced would be invalid
Returns
-------
parameter_tuple : tuple of floats
Estimates for any shape parameters (if applicable), followed by
those for location and scale. For most random variables, shape
statistics will be returned, but there are exceptions (e.g.
``norm``).
Notes
-----
With ``method="MLE"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="MM"``, the fit is computed by minimizing the L2 norm
of the relative errors between the first *k* raw (about zero) data
moments and the corresponding distribution moments, where *k* is the
number of non-fixed parameters.
More precisely, the objective function is::
(((data_moments - dist_moments)
/ np.maximum(np.abs(data_moments), 1e-8))**2).sum()
where the constant ``1e-8`` avoids division by zero in case of
vanishing data moments. Typically, this error norm can be reduced to
zero.
Note that the standard method of moments can produce parameters for
which some data are outside the support of the fitted distribution;
this implementation does nothing to prevent this.
For either method,
the returned answer is not guaranteed to be globally optimal; it
may only be locally optimal, or the optimization may fail altogether.
If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``,
the `fit` method will raise a ``RuntimeError``.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and
``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
method = kwds.get('method', "mle").lower()
censored = isinstance(data, CensoredData)
if censored:
if method != 'mle':
raise ValueError('For censored data, the method must'
' be "MLE".')
if data.num_censored() == 0:
# There are no censored values in data, so replace the
# CensoredData instance with a regular array.
data = data._uncensored
censored = False
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
# Check the finiteness of data only if data is not an instance of
# CensoredData. The arrays in a CensoredData instance have already
# been validated.
if not censored:
# Note: `ravel()` is called for backwards compatibility.
data = np.asarray(data).ravel()
if not np.isfinite(data).all():
raise ValueError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds, data=data)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
optimizer = _fit_determine_optimizer(optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# In some cases, method of moments can be done with fsolve/root
# instead of an optimizer, but sometimes no solution exists,
# especially when the user fixes parameters. Minimizing the sum
# of squares of the error generalizes to these cases.
vals = optimizer(func, x0, args=(data,), disp=0)
obj = func(vals, data)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
loc, scale, shapes = self._unpack_loc_scale(vals)
if not (np.all(self._argcheck(*shapes)) and scale > 0):
raise FitError("Optimization converged to parameters that are "
"outside the range allowed by the distribution.")
if method == 'mm':
if not np.isfinite(obj):
raise FitError("Optimization failed: either a data moment "
"or fitted distribution moment is "
"non-finite.")
return vals
def _fit_loc_scale_support(self, data, *args):
"""Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
if isinstance(data, CensoredData):
# For this estimate, "uncensor" the data by taking the
# given endpoints as the data for the left- or right-censored
# data, and the mean for the interval-censored data.
data = data._uncensor()
else:
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
with np.errstate(invalid='ignore'):
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
with np.errstate(over='ignore'):
h = integrate.quad(integ, _a, _b)[0]
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
Likewise, the accuracy of results is not verified by the function.
`scipy.integrate.quad` is typically reliable for integrals that are
numerically favorable, but it is not guaranteed to converge
to a correct value for all possible intervals and integrands. This
function is provided for convenience; for critical applications,
check results against other integration methods.
The function is not vectorized.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
The integrand can be treated as a complex-valued function
by passing ``complex_func=True`` to `scipy.integrate.quad` .
>>> import numpy as np
>>> from scipy.stats import vonmises
>>> res = vonmises(loc=2, kappa=1).expect(lambda x: np.exp(1j*x),
... complex_func=True)
>>> res
(-0.18576377217422957+0.40590124735052263j)
>>> np.angle(res) # location of the (circular) distribution
2.0
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
cdf_bounds = self.cdf([lb, ub], *args, **lockwds)
invfac = cdf_bounds[1] - cdf_bounds[0]
kwds['args'] = args
# split interval to help integrator w/ infinite support; see gh-8928
alpha = 0.05 # split body from tails at probability mass `alpha`
inner_bounds = np.array([alpha, 1-alpha])
cdf_inner_bounds = cdf_bounds[0] + invfac * inner_bounds
c, d = loc + self._ppf(cdf_inner_bounds, *args) * scale
# Do not silence warnings from integration.
lbc = integrate.quad(fun, lb, c, **kwds)[0]
cd = integrate.quad(fun, c, d, **kwds)[0]
dub = integrate.quad(fun, d, ub, **kwds)[0]
vals = (lbc + cd + dub)
if conditional:
vals /= invfac
return np.array(vals)[()] # make it a numpy scalar like other methods
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False))
scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False))
param_info = shape_info + [loc_info, scale_info]
return param_info
# For now, _delta_cdf is a private method.
def _delta_cdf(self, x1, x2, *args, loc=0, scale=1):
"""
Compute CDF(x2) - CDF(x1).
Where x1 is greater than the median, compute SF(x1) - SF(x2),
otherwise compute CDF(x2) - CDF(x1).
This function is only useful if `dist.sf(x, ...)` has an implementation
that is numerically more accurate than `1 - dist.cdf(x, ...)`.
"""
cdf1 = self.cdf(x1, *args, loc=loc, scale=scale)
# Possible optimizations (needs investigation-these might not be
# better):
# * Use _lazywhere instead of np.where
# * Instead of cdf1 > 0.5, compare x1 to the median.
result = np.where(cdf1 > 0.5,
(self.sf(x1, *args, loc=loc, scale=scale)
- self.sf(x2, *args, loc=loc, scale=scale)),
self.cdf(x2, *args, loc=loc, scale=scale) - cdf1)
if result.ndim == 0:
result = result[()]
return result
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape, and ``xk`` must be unique.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are as follows.
- The support of the distribution is a set of integers.
- Instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- There is no ``scale`` parameter.
- The default implementations of methods (e.g. ``_cdf``) are not designed
for distributions with support that is unbounded below (i.e.
``a=-np.inf``), so they must be overridden.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
**Deepcopying / Pickling**
If a distribution or frozen distribution is deepcopied (pickled/unpickled,
etc.), any underlying random number generator is deepcopied with it. An
implication is that if a distribution relies on the singleton RandomState
before copying, it will rely on a copy of that random state after copying,
and ``np.random.seed`` will no longer control the state.
Examples
--------
Custom made discrete distribution:
>>> import numpy as np
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, seed=None):
if values is not None:
# dispatch to a subclass
return super().__new__(rv_sample)
else:
# business as usual
return super().__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, seed=None):
super().__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_discrete instance."""
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = types.MethodType(_vec_generic_moment, self)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = types.MethodType(_vppf, self)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
def _construct_docstrings(self, name, longname):
if name is None:
name = 'Distribution'
self.name = name
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _logpxf(self, k, *args):
# continuous distributions have PDF, discrete have PMF, but sometimes
# the distinction doesn't matter. This lets us use `_logpxf` for both
# discrete and continuous distributions.
return self._logpmf(k, *args)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-1]
scale = 1
args = tuple(theta[:-1])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `random_state` is None (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is
used, seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance, that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super().rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b)
if not isinstance(self, rv_sample):
cond1 = cond1 & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b)
if not isinstance(self, rv_sample):
cond1 = cond1 & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond3 = np.isneginf(k)
cond = cond0 & cond1 & np.isfinite(k)
output = zeros(shape(cond), 'd')
place(output, cond2*(cond0 == cond0), 1.0)
place(output, cond3*(cond0 == cond0), 0.0)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = ((k < _a) | np.isneginf(k)) & cond0
cond = cond0 & cond1 & np.isfinite(k)
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1 + loc)
place(output, cond2, _b + loc)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond3 = (q == 0) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
lower_bound = _a - 1 + loc
upper_bound = _b + loc
place(output, cond2*(cond == cond), lower_bound)
place(output, cond3*(cond == cond), upper_bound)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return stats.entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``lb <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``lb <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or
may not exist,
depending on the function, `func`. If it does exist, but the
sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result,
but may also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
if isinstance(self, rv_sample):
res = self._expect(fun, lb, ub)
return res / invfac
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False))
param_info = shape_info + [loc_info]
return param_info
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
if not len(set(np.ravel(xk))) == np.size(xk):
raise ValueError("xk may not contain duplicate values.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created argparser methods."""
self._attach_argparser_methods()
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| 145,678
| 34.961244
| 195
|
py
|
scipy
|
scipy-main/scipy/stats/_constants.py
|
"""
Statistics-related constants.
"""
import numpy as np
# The smallest representable positive number such that 1.0 + _EPS != 1.0.
_EPS = np.finfo(float).eps
# The largest [in magnitude] usable floating value.
_XMAX = np.finfo(float).max
# The log of the largest usable floating value; useful for knowing
# when exp(something) will overflow
_LOGXMAX = np.log(_XMAX)
# The smallest [in magnitude] usable (i.e. not subnormal) double precision
# floating value.
_XMIN = np.finfo(float).tiny
# The log of the smallest [in magnitude] usable (i.e not subnormal)
# double precision floating value.
_LOGXMIN = np.log(_XMIN)
# -special.psi(1)
_EULER = 0.577215664901532860606512090082402431042
# special.zeta(3, 1) Apery's constant
_ZETA3 = 1.202056903159594285399738161511449990765
# sqrt(pi)
_SQRT_PI = 1.772453850905516027298167483341145182798
# sqrt(2/pi)
_SQRT_2_OVER_PI = 0.7978845608028654
# log(sqrt(2/pi))
_LOG_SQRT_2_OVER_PI = -0.22579135264472744
| 962
| 23.075
| 74
|
py
|
scipy
|
scipy-main/scipy/stats/_warnings_errors.py
|
# Warnings
class DegenerateDataWarning(RuntimeWarning):
"""Warns when data is degenerate and results may not be reliable."""
def __init__(self, msg=None):
if msg is None:
msg = ("Degenerate data encountered; results may not be reliable.")
self.args = (msg,)
class ConstantInputWarning(DegenerateDataWarning):
"""Warns when all values in data are exactly equal."""
def __init__(self, msg=None):
if msg is None:
msg = ("All values in data are exactly equal; "
"results may not be reliable.")
self.args = (msg,)
class NearConstantInputWarning(DegenerateDataWarning):
"""Warns when all values in data are nearly equal."""
def __init__(self, msg=None):
if msg is None:
msg = ("All values in data are nearly equal; "
"results may not be reliable.")
self.args = (msg,)
# Errors
class FitError(RuntimeError):
"""Represents an error condition when fitting a distribution to data."""
def __init__(self, msg=None):
if msg is None:
msg = ("An error occurred when fitting a distribution to data.")
self.args = (msg,)
| 1,196
| 29.692308
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_fit.py
|
import warnings
from collections import namedtuple
import numpy as np
from scipy import optimize, stats
from scipy._lib._util import check_random_state
def _combine_bounds(name, user_bounds, shape_domain, integral):
"""Intersection of user-defined bounds and distribution PDF/PMF domain"""
user_bounds = np.atleast_1d(user_bounds)
if user_bounds[0] > user_bounds[1]:
message = (f"There are no values for `{name}` on the interval "
f"{list(user_bounds)}.")
raise ValueError(message)
bounds = (max(user_bounds[0], shape_domain[0]),
min(user_bounds[1], shape_domain[1]))
if integral and (np.ceil(bounds[0]) > np.floor(bounds[1])):
message = (f"There are no integer values for `{name}` on the interval "
f"defined by the user-provided bounds and the domain "
"of the distribution.")
raise ValueError(message)
elif not integral and (bounds[0] > bounds[1]):
message = (f"There are no values for `{name}` on the interval "
f"defined by the user-provided bounds and the domain "
"of the distribution.")
raise ValueError(message)
if not np.all(np.isfinite(bounds)):
message = (f"The intersection of user-provided bounds for `{name}` "
f"and the domain of the distribution is not finite. Please "
f"provide finite bounds for shape `{name}` in `bounds`.")
raise ValueError(message)
return bounds
class FitResult:
r"""Result of fitting a discrete or continuous distribution to data
Attributes
----------
params : namedtuple
A namedtuple containing the maximum likelihood estimates of the
shape parameters, location, and (if applicable) scale of the
distribution.
success : bool or None
Whether the optimizer considered the optimization to terminate
successfully or not.
message : str or None
Any status message provided by the optimizer.
"""
def __init__(self, dist, data, discrete, res):
self._dist = dist
self._data = data
self.discrete = discrete
self.pxf = getattr(dist, "pmf", None) or getattr(dist, "pdf", None)
shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
if not discrete:
FitParams = namedtuple('FitParams', shape_names + ['loc', 'scale'])
else:
FitParams = namedtuple('FitParams', shape_names + ['loc'])
self.params = FitParams(*res.x)
# Optimizer can report success even when nllf is infinite
if res.success and not np.isfinite(self.nllf()):
res.success = False
res.message = ("Optimization converged to parameter values that "
"are inconsistent with the data.")
self.success = getattr(res, "success", None)
self.message = getattr(res, "message", None)
def __repr__(self):
keys = ["params", "success", "message"]
m = max(map(len, keys)) + 1
return '\n'.join([key.rjust(m) + ': ' + repr(getattr(self, key))
for key in keys if getattr(self, key) is not None])
def nllf(self, params=None, data=None):
"""Negative log-likelihood function
Evaluates the negative of the log-likelihood function of the provided
data at the provided parameters.
Parameters
----------
params : tuple, optional
The shape parameters, location, and (if applicable) scale of the
distribution as a single tuple. Default is the maximum likelihood
estimates (``self.params``).
data : array_like, optional
The data for which the log-likelihood function is to be evaluated.
Default is the data to which the distribution was fit.
Returns
-------
nllf : float
The negative of the log-likelihood function.
"""
params = params if params is not None else self.params
data = data if data is not None else self._data
return self._dist.nnlf(theta=params, x=data)
def plot(self, ax=None, *, plot_type="hist"):
"""Visually compare the data against the fitted distribution.
Available only if ``matplotlib`` is installed.
Parameters
----------
ax : matplotlib.axes.Axes
Axes object to draw the plot onto, otherwise uses the current Axes.
plot_type : {"hist", "qq", "pp", "cdf"}
Type of plot to draw. Options include:
- "hist": Superposes the PDF/PMF of the fitted distribution
over a normalized histogram of the data.
- "qq": Scatter plot of theoretical quantiles against the
empirical quantiles. Specifically, the x-coordinates are the
values of the fitted distribution PPF evaluated at the
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is the
number of data points, and the y-coordinates are the sorted
data points.
- "pp": Scatter plot of theoretical percentiles against the
observed percentiles. Specifically, the x-coordinates are the
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
the number of data points, and the y-coordinates are the values
of the fitted distribution CDF evaluated at the sorted
data points.
- "cdf": Superposes the CDF of the fitted distribution over the
empirical CDF. Specifically, the x-coordinates of the empirical
CDF are the sorted data points, and the y-coordinates are the
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
the number of data points.
Returns
-------
ax : matplotlib.axes.Axes
The matplotlib Axes object on which the plot was drawn.
"""
try:
import matplotlib # noqa
except ModuleNotFoundError as exc:
message = "matplotlib must be installed to use method `plot`."
raise ModuleNotFoundError(message) from exc
plots = {'histogram': self._hist_plot, 'qq': self._qq_plot,
'pp': self._pp_plot, 'cdf': self._cdf_plot,
'hist': self._hist_plot}
if plot_type.lower() not in plots:
message = f"`plot_type` must be one of {set(plots.keys())}"
raise ValueError(message)
plot = plots[plot_type.lower()]
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
fit_params = np.atleast_1d(self.params)
return plot(ax=ax, fit_params=fit_params)
def _hist_plot(self, ax, fit_params):
from matplotlib.ticker import MaxNLocator
support = self._dist.support(*fit_params)
lb = support[0] if np.isfinite(support[0]) else min(self._data)
ub = support[1] if np.isfinite(support[1]) else max(self._data)
pxf = "PMF" if self.discrete else "PDF"
if self.discrete:
x = np.arange(lb, ub + 2)
y = self.pxf(x, *fit_params)
ax.vlines(x[:-1], 0, y[:-1], label='Fitted Distribution PMF',
color='C0')
options = dict(density=True, bins=x, align='left', color='C1')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('k')
ax.set_ylabel('PMF')
else:
x = np.linspace(lb, ub, 200)
y = self.pxf(x, *fit_params)
ax.plot(x, y, '--', label='Fitted Distribution PDF', color='C0')
options = dict(density=True, bins=50, align='mid', color='C1')
ax.set_xlabel('x')
ax.set_ylabel('PDF')
if len(self._data) > 50 or self.discrete:
ax.hist(self._data, label="Histogram of Data", **options)
else:
ax.plot(self._data, np.zeros_like(self._data), "*",
label='Data', color='C1')
ax.set_title(rf"Fitted $\tt {self._dist.name}$ {pxf} and Histogram")
ax.legend(*ax.get_legend_handles_labels())
return ax
def _qp_plot(self, ax, fit_params, qq):
data = np.sort(self._data)
ps = self._plotting_positions(len(self._data))
if qq:
qp = "Quantiles"
plot_type = 'Q-Q'
x = self._dist.ppf(ps, *fit_params)
y = data
else:
qp = "Percentiles"
plot_type = 'P-P'
x = ps
y = self._dist.cdf(data, *fit_params)
ax.plot(x, y, '.', label=f'Fitted Distribution {plot_type}',
color='C0', zorder=1)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
lim = [min(xlim[0], ylim[0]), max(xlim[1], ylim[1])]
if not qq:
lim = max(lim[0], 0), min(lim[1], 1)
if self.discrete and qq:
q_min, q_max = int(lim[0]), int(lim[1]+1)
q_ideal = np.arange(q_min, q_max)
# q_ideal = np.unique(self._dist.ppf(ps, *fit_params))
ax.plot(q_ideal, q_ideal, 'o', label='Reference', color='k',
alpha=0.25, markerfacecolor='none', clip_on=True)
elif self.discrete and not qq:
# The intent of this is to match the plot that would be produced
# if x were continuous on [0, 1] and y were cdf(ppf(x)).
# It can be approximated by letting x = np.linspace(0, 1, 1000),
# but this might not look great when zooming in. The vertical
# portions are included to indicate where the transition occurs
# where the data completely obscures the horizontal portions.
p_min, p_max = lim
a, b = self._dist.support(*fit_params)
p_min = max(p_min, 0 if np.isfinite(a) else 1e-3)
p_max = min(p_max, 1 if np.isfinite(b) else 1-1e-3)
q_min, q_max = self._dist.ppf([p_min, p_max], *fit_params)
qs = np.arange(q_min-1, q_max+1)
ps = self._dist.cdf(qs, *fit_params)
ax.step(ps, ps, '-', label='Reference', color='k', alpha=0.25,
clip_on=True)
else:
ax.plot(lim, lim, '-', label='Reference', color='k', alpha=0.25,
clip_on=True)
ax.set_xlim(lim)
ax.set_ylim(lim)
ax.set_xlabel(rf"Fitted $\tt {self._dist.name}$ Theoretical {qp}")
ax.set_ylabel(f"Data {qp}")
ax.set_title(rf"Fitted $\tt {self._dist.name}$ {plot_type} Plot")
ax.legend(*ax.get_legend_handles_labels())
ax.set_aspect('equal')
return ax
def _qq_plot(self, **kwargs):
return self._qp_plot(qq=True, **kwargs)
def _pp_plot(self, **kwargs):
return self._qp_plot(qq=False, **kwargs)
def _plotting_positions(self, n, a=.5):
# See https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot#Plotting_positions
k = np.arange(1, n+1)
return (k-a) / (n + 1 - 2*a)
def _cdf_plot(self, ax, fit_params):
data = np.sort(self._data)
ecdf = self._plotting_positions(len(self._data))
ls = '--' if len(np.unique(data)) < 30 else '.'
xlabel = 'k' if self.discrete else 'x'
ax.step(data, ecdf, ls, label='Empirical CDF', color='C1', zorder=0)
xlim = ax.get_xlim()
q = np.linspace(*xlim, 300)
tcdf = self._dist.cdf(q, *fit_params)
ax.plot(q, tcdf, label='Fitted Distribution CDF', color='C0', zorder=1)
ax.set_xlim(xlim)
ax.set_ylim(0, 1)
ax.set_xlabel(xlabel)
ax.set_ylabel("CDF")
ax.set_title(rf"Fitted $\tt {self._dist.name}$ and Empirical CDF")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
return ax
def fit(dist, data, bounds=None, *, guess=None, method='mle',
optimizer=optimize.differential_evolution):
r"""Fit a discrete or continuous distribution to data
Given a distribution, data, and bounds on the parameters of the
distribution, return maximum likelihood estimates of the parameters.
Parameters
----------
dist : `scipy.stats.rv_continuous` or `scipy.stats.rv_discrete`
The object representing the distribution to be fit to the data.
data : 1D array_like
The data to which the distribution is to be fit. If the data contain
any of ``np.nan``, ``np.inf``, or -``np.inf``, the fit method will
raise a ``ValueError``.
bounds : dict or sequence of tuples, optional
If a dictionary, each key is the name of a parameter of the
distribution, and the corresponding value is a tuple containing the
lower and upper bound on that parameter. If the distribution is
defined only for a finite range of values of that parameter, no entry
for that parameter is required; e.g., some distributions have
parameters which must be on the interval [0, 1]. Bounds for parameters
location (``loc``) and scale (``scale``) are optional; by default,
they are fixed to 0 and 1, respectively.
If a sequence, element *i* is a tuple containing the lower and upper
bound on the *i*\ th parameter of the distribution. In this case,
bounds for *all* distribution shape parameters must be provided.
Optionally, bounds for location and scale may follow the
distribution shape parameters.
If a shape is to be held fixed (e.g. if it is known), the
lower and upper bounds may be equal. If a user-provided lower or upper
bound is beyond a bound of the domain for which the distribution is
defined, the bound of the distribution's domain will replace the
user-provided value. Similarly, parameters which must be integral
will be constrained to integral values within the user-provided bounds.
guess : dict or array_like, optional
If a dictionary, each key is the name of a parameter of the
distribution, and the corresponding value is a guess for the value
of the parameter.
If a sequence, element *i* is a guess for the *i*\ th parameter of the
distribution. In this case, guesses for *all* distribution shape
parameters must be provided.
If `guess` is not provided, guesses for the decision variables will
not be passed to the optimizer. If `guess` is provided, guesses for
any missing parameters will be set at the mean of the lower and
upper bounds. Guesses for parameters which must be integral will be
rounded to integral values, and guesses that lie outside the
intersection of the user-provided bounds and the domain of the
distribution will be clipped.
method : {'mle', 'mse'}
With ``method="mle"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="mse"``, the fit is computed by minimizing
the negative log-product spacing function. The same penalty is applied
for observations beyond the support. We follow the approach of [1]_,
which is generalized for samples with repeated observations.
optimizer : callable, optional
`optimizer` is a callable that accepts the following positional
argument.
fun : callable
The objective function to be optimized. `fun` accepts one argument
``x``, candidate shape parameters of the distribution, and returns
the objective function value given ``x``, `dist`, and the provided
`data`.
The job of `optimizer` is to find values of the decision variables
that minimizes `fun`.
`optimizer` must also accept the following keyword argument.
bounds : sequence of tuples
The bounds on values of the decision variables; each element will
be a tuple containing the lower and upper bound on a decision
variable.
If `guess` is provided, `optimizer` must also accept the following
keyword argument.
x0 : array_like
The guesses for each decision variable.
If the distribution has any shape parameters that must be integral or
if the distribution is discrete and the location parameter is not
fixed, `optimizer` must also accept the following keyword argument.
integrality : array_like of bools
For each decision variable, True if the decision variable
must be constrained to integer values and False if the decision
variable is continuous.
`optimizer` must return an object, such as an instance of
`scipy.optimize.OptimizeResult`, which holds the optimal values of
the decision variables in an attribute ``x``. If attributes
``fun``, ``status``, or ``message`` are provided, they will be
included in the result object returned by `fit`.
Returns
-------
result : `~scipy.stats._result_classes.FitResult`
An object with the following fields.
params : namedtuple
A namedtuple containing the maximum likelihood estimates of the
shape parameters, location, and (if applicable) scale of the
distribution.
success : bool or None
Whether the optimizer considered the optimization to terminate
successfully or not.
message : str or None
Any status message provided by the optimizer.
The object has the following method:
nllf(params=None, data=None)
By default, the negative log-likehood function at the fitted
`params` for the given `data`. Accepts a tuple containing
alternative shapes, location, and scale of the distribution and
an array of alternative data.
plot(ax=None)
Superposes the PDF/PMF of the fitted distribution over a normalized
histogram of the data.
See Also
--------
rv_continuous, rv_discrete
Notes
-----
Optimization is more likely to converge to the maximum likelihood estimate
when the user provides tight bounds containing the maximum likelihood
estimate. For example, when fitting a binomial distribution to data, the
number of experiments underlying each sample may be known, in which case
the corresponding shape parameter ``n`` can be fixed.
References
----------
.. [1] Shao, Yongzhao, and Marjorie G. Hahn. "Maximum product of spacings
method: a unified formulation with illustration of strong
consistency." Illinois Journal of Mathematics 43.3 (1999): 489-499.
Examples
--------
Suppose we wish to fit a distribution to the following data.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> dist = stats.nbinom
>>> shapes = (5, 0.5)
>>> data = dist.rvs(*shapes, size=1000, random_state=rng)
Suppose we do not know how the data were generated, but we suspect that
it follows a negative binomial distribution with parameters *n* and *p*\.
(See `scipy.stats.nbinom`.) We believe that the parameter *n* was fewer
than 30, and we know that the parameter *p* must lie on the interval
[0, 1]. We record this information in a variable `bounds` and pass
this information to `fit`.
>>> bounds = [(0, 30), (0, 1)]
>>> res = stats.fit(dist, data, bounds)
`fit` searches within the user-specified `bounds` for the
values that best match the data (in the sense of maximum likelihood
estimation). In this case, it found shape values similar to those
from which the data were actually generated.
>>> res.params
FitParams(n=5.0, p=0.5028157644634368, loc=0.0) # may vary
We can visualize the results by superposing the probability mass function
of the distribution (with the shapes fit to the data) over a normalized
histogram of the data.
>>> import matplotlib.pyplot as plt # matplotlib must be installed to plot
>>> res.plot()
>>> plt.show()
Note that the estimate for *n* was exactly integral; this is because
the domain of the `nbinom` PMF includes only integral *n*, and the `nbinom`
object "knows" that. `nbinom` also knows that the shape *p* must be a
value between 0 and 1. In such a case - when the domain of the distribution
with respect to a parameter is finite - we are not required to specify
bounds for the parameter.
>>> bounds = {'n': (0, 30)} # omit parameter p using a `dict`
>>> res2 = stats.fit(dist, data, bounds)
>>> res2.params
FitParams(n=5.0, p=0.5016492009232932, loc=0.0) # may vary
If we wish to force the distribution to be fit with *n* fixed at 6, we can
set both the lower and upper bounds on *n* to 6. Note, however, that the
value of the objective function being optimized is typically worse (higher)
in this case.
>>> bounds = {'n': (6, 6)} # fix parameter `n`
>>> res3 = stats.fit(dist, data, bounds)
>>> res3.params
FitParams(n=6.0, p=0.5486556076755706, loc=0.0) # may vary
>>> res3.nllf() > res.nllf()
True # may vary
Note that the numerical results of the previous examples are typical, but
they may vary because the default optimizer used by `fit`,
`scipy.optimize.differential_evolution`, is stochastic. However, we can
customize the settings used by the optimizer to ensure reproducibility -
or even use a different optimizer entirely - using the `optimizer`
parameter.
>>> from scipy.optimize import differential_evolution
>>> rng = np.random.default_rng(767585560716548)
>>> def optimizer(fun, bounds, *, integrality):
... return differential_evolution(fun, bounds, strategy='best2bin',
... seed=rng, integrality=integrality)
>>> bounds = [(0, 30), (0, 1)]
>>> res4 = stats.fit(dist, data, bounds, optimizer=optimizer)
>>> res4.params
FitParams(n=5.0, p=0.5015183149259951, loc=0.0)
"""
# --- Input Validation / Standardization --- #
user_bounds = bounds
user_guess = guess
# distribution input validation and information collection
if hasattr(dist, "pdf"): # can't use isinstance for types
default_bounds = {'loc': (0, 0), 'scale': (1, 1)}
discrete = False
elif hasattr(dist, "pmf"):
default_bounds = {'loc': (0, 0)}
discrete = True
else:
message = ("`dist` must be an instance of `rv_continuous` "
"or `rv_discrete.`")
raise ValueError(message)
try:
param_info = dist._param_info()
except AttributeError as e:
message = (f"Distribution `{dist.name}` is not yet supported by "
"`scipy.stats.fit` because shape information has "
"not been defined.")
raise ValueError(message) from e
# data input validation
data = np.asarray(data)
if data.ndim != 1:
message = "`data` must be exactly one-dimensional."
raise ValueError(message)
if not (np.issubdtype(data.dtype, np.number)
and np.all(np.isfinite(data))):
message = "All elements of `data` must be finite numbers."
raise ValueError(message)
# bounds input validation and information collection
n_params = len(param_info)
n_shapes = n_params - (1 if discrete else 2)
param_list = [param.name for param in param_info]
param_names = ", ".join(param_list)
shape_names = ", ".join(param_list[:n_shapes])
if user_bounds is None:
user_bounds = {}
if isinstance(user_bounds, dict):
default_bounds.update(user_bounds)
user_bounds = default_bounds
user_bounds_array = np.empty((n_params, 2))
for i in range(n_params):
param_name = param_info[i].name
user_bound = user_bounds.pop(param_name, None)
if user_bound is None:
user_bound = param_info[i].domain
user_bounds_array[i] = user_bound
if user_bounds:
message = ("Bounds provided for the following unrecognized "
f"parameters will be ignored: {set(user_bounds)}")
warnings.warn(message, RuntimeWarning, stacklevel=2)
else:
try:
user_bounds = np.asarray(user_bounds, dtype=float)
if user_bounds.size == 0:
user_bounds = np.empty((0, 2))
except ValueError as e:
message = ("Each element of a `bounds` sequence must be a tuple "
"containing two elements: the lower and upper bound of "
"a distribution parameter.")
raise ValueError(message) from e
if (user_bounds.ndim != 2 or user_bounds.shape[1] != 2):
message = ("Each element of `bounds` must be a tuple specifying "
"the lower and upper bounds of a shape parameter")
raise ValueError(message)
if user_bounds.shape[0] < n_shapes:
message = (f"A `bounds` sequence must contain at least {n_shapes} "
"elements: tuples specifying the lower and upper "
f"bounds of all shape parameters {shape_names}.")
raise ValueError(message)
if user_bounds.shape[0] > n_params:
message = ("A `bounds` sequence may not contain more than "
f"{n_params} elements: tuples specifying the lower and "
"upper bounds of distribution parameters "
f"{param_names}.")
raise ValueError(message)
user_bounds_array = np.empty((n_params, 2))
user_bounds_array[n_shapes:] = list(default_bounds.values())
user_bounds_array[:len(user_bounds)] = user_bounds
user_bounds = user_bounds_array
validated_bounds = []
for i in range(n_params):
name = param_info[i].name
user_bound = user_bounds_array[i]
param_domain = param_info[i].domain
integral = param_info[i].integrality
combined = _combine_bounds(name, user_bound, param_domain, integral)
validated_bounds.append(combined)
bounds = np.asarray(validated_bounds)
integrality = [param.integrality for param in param_info]
# guess input validation
if user_guess is None:
guess_array = None
elif isinstance(user_guess, dict):
default_guess = {param.name: np.mean(bound)
for param, bound in zip(param_info, bounds)}
unrecognized = set(user_guess) - set(default_guess)
if unrecognized:
message = ("Guesses provided for the following unrecognized "
f"parameters will be ignored: {unrecognized}")
warnings.warn(message, RuntimeWarning, stacklevel=2)
default_guess.update(user_guess)
message = ("Each element of `guess` must be a scalar "
"guess for a distribution parameter.")
try:
guess_array = np.asarray([default_guess[param.name]
for param in param_info], dtype=float)
except ValueError as e:
raise ValueError(message) from e
else:
message = ("Each element of `guess` must be a scalar "
"guess for a distribution parameter.")
try:
user_guess = np.asarray(user_guess, dtype=float)
except ValueError as e:
raise ValueError(message) from e
if user_guess.ndim != 1:
raise ValueError(message)
if user_guess.shape[0] < n_shapes:
message = (f"A `guess` sequence must contain at least {n_shapes} "
"elements: scalar guesses for the distribution shape "
f"parameters {shape_names}.")
raise ValueError(message)
if user_guess.shape[0] > n_params:
message = ("A `guess` sequence may not contain more than "
f"{n_params} elements: scalar guesses for the "
f"distribution parameters {param_names}.")
raise ValueError(message)
guess_array = np.mean(bounds, axis=1)
guess_array[:len(user_guess)] = user_guess
if guess_array is not None:
guess_rounded = guess_array.copy()
guess_rounded[integrality] = np.round(guess_rounded[integrality])
rounded = np.where(guess_rounded != guess_array)[0]
for i in rounded:
message = (f"Guess for parameter `{param_info[i].name}` "
f"rounded from {guess_array[i]} to {guess_rounded[i]}.")
warnings.warn(message, RuntimeWarning, stacklevel=2)
guess_clipped = np.clip(guess_rounded, bounds[:, 0], bounds[:, 1])
clipped = np.where(guess_clipped != guess_rounded)[0]
for i in clipped:
message = (f"Guess for parameter `{param_info[i].name}` "
f"clipped from {guess_rounded[i]} to "
f"{guess_clipped[i]}.")
warnings.warn(message, RuntimeWarning, stacklevel=2)
guess = guess_clipped
else:
guess = None
# --- Fitting --- #
def nllf(free_params, data=data): # bind data NOW
with np.errstate(invalid='ignore', divide='ignore'):
return dist._penalized_nnlf(free_params, data)
def nlpsf(free_params, data=data): # bind data NOW
with np.errstate(invalid='ignore', divide='ignore'):
return dist._penalized_nlpsf(free_params, data)
methods = {'mle': nllf, 'mse': nlpsf}
objective = methods[method.lower()]
with np.errstate(invalid='ignore', divide='ignore'):
kwds = {}
if bounds is not None:
kwds['bounds'] = bounds
if np.any(integrality):
kwds['integrality'] = integrality
if guess is not None:
kwds['x0'] = guess
res = optimizer(objective, **kwds)
return FitResult(dist, data, discrete, res)
GoodnessOfFitResult = namedtuple('GoodnessOfFitResult',
('fit_result', 'statistic', 'pvalue',
'null_distribution'))
def goodness_of_fit(dist, data, *, known_params=None, fit_params=None,
guessed_params=None, statistic='ad', n_mc_samples=9999,
random_state=None):
r"""
Perform a goodness of fit test comparing data to a distribution family.
Given a distribution family and data, perform a test of the null hypothesis
that the data were drawn from a distribution in that family. Any known
parameters of the distribution may be specified. Remaining parameters of
the distribution will be fit to the data, and the p-value of the test
is computed accordingly. Several statistics for comparing the distribution
to data are available.
Parameters
----------
dist : `scipy.stats.rv_continuous`
The object representing the distribution family under the null
hypothesis.
data : 1D array_like
Finite, uncensored data to be tested.
known_params : dict, optional
A dictionary containing name-value pairs of known distribution
parameters. Monte Carlo samples are randomly drawn from the
null-hypothesized distribution with these values of the parameters.
Before the statistic is evaluated for each Monte Carlo sample, only
remaining unknown parameters of the null-hypothesized distribution
family are fit to the samples; the known parameters are held fixed.
If all parameters of the distribution family are known, then the step
of fitting the distribution family to each sample is omitted.
fit_params : dict, optional
A dictionary containing name-value pairs of distribution parameters
that have already been fit to the data, e.g. using `scipy.stats.fit`
or the ``fit`` method of `dist`. Monte Carlo samples are drawn from the
null-hypothesized distribution with these specified values of the
parameter. On those Monte Carlo samples, however, these and all other
unknown parameters of the null-hypothesized distribution family are
fit before the statistic is evaluated.
guessed_params : dict, optional
A dictionary containing name-value pairs of distribution parameters
which have been guessed. These parameters are always considered as
free parameters and are fit both to the provided `data` as well as
to the Monte Carlo samples drawn from the null-hypothesized
distribution. The purpose of these `guessed_params` is to be used as
initial values for the numerical fitting procedure.
statistic : {"ad", "ks", "cvm", "filliben"}, optional
The statistic used to compare data to a distribution after fitting
unknown parameters of the distribution family to the data. The
Anderson-Darling ("ad") [1]_, Kolmogorov-Smirnov ("ks") [1]_,
Cramer-von Mises ("cvm") [1]_, and Filliben ("filliben") [7]_
statistics are available.
n_mc_samples : int, default: 9999
The number of Monte Carlo samples drawn from the null hypothesized
distribution to form the null distribution of the statistic. The
sample size of each is the same as the given `data`.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate the Monte Carlo
samples.
If `random_state` is ``None`` (default), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance, then the provided instance is used.
Returns
-------
res : GoodnessOfFitResult
An object with the following attributes.
fit_result : `~scipy.stats._result_classes.FitResult`
An object representing the fit of the provided `dist` to `data`.
This object includes the values of distribution family parameters
that fully define the null-hypothesized distribution, that is,
the distribution from which Monte Carlo samples are drawn.
statistic : float
The value of the statistic comparing provided `data` to the
null-hypothesized distribution.
pvalue : float
The proportion of elements in the null distribution with
statistic values at least as extreme as the statistic value of the
provided `data`.
null_distribution : ndarray
The value of the statistic for each Monte Carlo sample
drawn from the null-hypothesized distribution.
Notes
-----
This is a generalized Monte Carlo goodness-of-fit procedure, special cases
of which correspond with various Anderson-Darling tests, Lilliefors' test,
etc. The test is described in [2]_, [3]_, and [4]_ as a parametric
bootstrap test. This is a Monte Carlo test in which parameters that
specify the distribution from which samples are drawn have been estimated
from the data. We describe the test using "Monte Carlo" rather than
"parametric bootstrap" throughout to avoid confusion with the more familiar
nonparametric bootstrap, and describe how the test is performed below.
*Traditional goodness of fit tests*
Traditionally, critical values corresponding with a fixed set of
significance levels are pre-calculated using Monte Carlo methods. Users
perform the test by calculating the value of the test statistic only for
their observed `data` and comparing this value to tabulated critical
values. This practice is not very flexible, as tables are not available for
all distributions and combinations of known and unknown parameter values.
Also, results can be inaccurate when critical values are interpolated from
limited tabulated data to correspond with the user's sample size and
fitted parameter values. To overcome these shortcomings, this function
allows the user to perform the Monte Carlo trials adapted to their
particular data.
*Algorithmic overview*
In brief, this routine executes the following steps:
1. Fit unknown parameters to the given `data`, thereby forming the
"null-hypothesized" distribution, and compute the statistic of
this pair of data and distribution.
2. Draw random samples from this null-hypothesized distribution.
3. Fit the unknown parameters to each random sample.
4. Calculate the statistic between each sample and the distribution that
has been fit to the sample.
5. Compare the value of the statistic corresponding with `data` from (1)
against the values of the statistic corresponding with the random
samples from (4). The p-value is the proportion of samples with a
statistic value greater than or equal to the statistic of the observed
data.
In more detail, the steps are as follows.
First, any unknown parameters of the distribution family specified by
`dist` are fit to the provided `data` using maximum likelihood estimation.
(One exception is the normal distribution with unknown location and scale:
we use the bias-corrected standard deviation ``np.std(data, ddof=1)`` for
the scale as recommended in [1]_.)
These values of the parameters specify a particular member of the
distribution family referred to as the "null-hypothesized distribution",
that is, the distribution from which the data were sampled under the null
hypothesis. The `statistic`, which compares data to a distribution, is
computed between `data` and the null-hypothesized distribution.
Next, many (specifically `n_mc_samples`) new samples, each containing the
same number of observations as `data`, are drawn from the
null-hypothesized distribution. All unknown parameters of the distribution
family `dist` are fit to *each resample*, and the `statistic` is computed
between each sample and its corresponding fitted distribution. These
values of the statistic form the Monte Carlo null distribution (not to be
confused with the "null-hypothesized distribution" above).
The p-value of the test is the proportion of statistic values in the Monte
Carlo null distribution that are at least as extreme as the statistic value
of the provided `data`. More precisely, the p-value is given by
.. math::
p = \frac{b + 1}
{m + 1}
where :math:`b` is the number of statistic values in the Monte Carlo null
distribution that are greater than or equal to the the statistic value
calculated for `data`, and :math:`m` is the number of elements in the
Monte Carlo null distribution (`n_mc_samples`). The addition of :math:`1`
to the numerator and denominator can be thought of as including the
value of the statistic corresponding with `data` in the null distribution,
but a more formal explanation is given in [5]_.
*Limitations*
The test can be very slow for some distribution families because unknown
parameters of the distribution family must be fit to each of the Monte
Carlo samples, and for most distributions in SciPy, distribution fitting
performed via numerical optimization.
*Anti-Pattern*
For this reason, it may be tempting
to treat parameters of the distribution pre-fit to `data` (by the user)
as though they were `known_params`, as specification of all parameters of
the distribution precludes the need to fit the distribution to each Monte
Carlo sample. (This is essentially how the original Kilmogorov-Smirnov
test is performed.) Although such a test can provide evidence against the
null hypothesis, the test is conservative in the sense that small p-values
will tend to (greatly) *overestimate* the probability of making a type I
error (that is, rejecting the null hypothesis although it is true), and the
power of the test is low (that is, it is less likely to reject the null
hypothesis even when the null hypothesis is false).
This is because the Monte Carlo samples are less likely to agree with the
null-hypothesized distribution as well as `data`. This tends to increase
the values of the statistic recorded in the null distribution, so that a
larger number of them exceed the value of statistic for `data`, thereby
inflating the p-value.
References
----------
.. [1] M. A. Stephens (1974). "EDF Statistics for Goodness of Fit and
Some Comparisons." Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [2] W. Stute, W. G. Manteiga, and M. P. Quindimil (1993).
"Bootstrap based goodness-of-fit-tests." Metrika 40.1: 243-256.
.. [3] C. Genest, & B Rémillard. (2008). "Validity of the parametric
bootstrap for goodness-of-fit testing in semiparametric models."
Annales de l'IHP Probabilités et statistiques. Vol. 44. No. 6.
.. [4] I. Kojadinovic and J. Yan (2012). "Goodness-of-fit testing based on
a weighted bootstrap: A fast large-sample alternative to the
parametric bootstrap." Canadian Journal of Statistics 40.3: 480-500.
.. [5] B. Phipson and G. K. Smyth (2010). "Permutation P-values Should
Never Be Zero: Calculating Exact P-values When Permutations Are
Randomly Drawn." Statistical Applications in Genetics and Molecular
Biology 9.1.
.. [6] H. W. Lilliefors (1967). "On the Kolmogorov-Smirnov test for
normality with mean and variance unknown." Journal of the American
statistical Association 62.318: 399-402.
.. [7] Filliben, James J. "The probability plot correlation coefficient
test for normality." Technometrics 17.1 (1975): 111-117.
Examples
--------
A well-known test of the null hypothesis that data were drawn from a
given distribution is the Kolmogorov-Smirnov (KS) test, available in SciPy
as `scipy.stats.ks_1samp`. Suppose we wish to test whether the following
data:
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = stats.uniform.rvs(size=75, random_state=rng)
were sampled from a normal distribution. To perform a KS test, the
empirical distribution function of the observed data will be compared
against the (theoretical) cumulative distribution function of a normal
distribution. Of course, to do this, the normal distribution under the null
hypothesis must be fully specified. This is commonly done by first fitting
the ``loc`` and ``scale`` parameters of the distribution to the observed
data, then performing the test.
>>> loc, scale = np.mean(x), np.std(x, ddof=1)
>>> cdf = stats.norm(loc, scale).cdf
>>> stats.ks_1samp(x, cdf)
KstestResult(statistic=0.1119257570456813, pvalue=0.2827756409939257)
An advantage of the KS-test is that the p-value - the probability of
obtaining a value of the test statistic under the null hypothesis as
extreme as the value obtained from the observed data - can be calculated
exactly and efficiently. `goodness_of_fit` can only approximate these
results.
>>> known_params = {'loc': loc, 'scale': scale}
>>> res = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
... statistic='ks', random_state=rng)
>>> res.statistic, res.pvalue
(0.1119257570456813, 0.2788)
The statistic matches exactly, but the p-value is estimated by forming
a "Monte Carlo null distribution", that is, by explicitly drawing random
samples from `scipy.stats.norm` with the provided parameters and
calculating the stastic for each. The fraction of these statistic values
at least as extreme as ``res.statistic`` approximates the exact p-value
calculated by `scipy.stats.ks_1samp`.
However, in many cases, we would prefer to test only that the data were
sampled from one of *any* member of the normal distribution family, not
specifically from the normal distribution with the location and scale
fitted to the observed sample. In this case, Lilliefors [6]_ argued that
the KS test is far too conservative (that is, the p-value overstates
the actual probability of rejecting a true null hypothesis) and thus lacks
power - the ability to reject the null hypothesis when the null hypothesis
is actually false.
Indeed, our p-value above is approximately 0.28, which is far too large
to reject the null hypothesis at any common significance level.
Consider why this might be. Note that in the KS test above, the statistic
always compares data against the CDF of a normal distribution fitted to the
*observed data*. This tends to reduce the value of the statistic for the
observed data, but it is "unfair" when computing the statistic for other
samples, such as those we randomly draw to form the Monte Carlo null
distribution. It is easy to correct for this: whenever we compute the KS
statistic of a sample, we use the CDF of a normal distribution fitted
to *that sample*. The null distribution in this case has not been
calculated exactly and is tyically approximated using Monte Carlo methods
as described above. This is where `goodness_of_fit` excels.
>>> res = stats.goodness_of_fit(stats.norm, x, statistic='ks',
... random_state=rng)
>>> res.statistic, res.pvalue
(0.1119257570456813, 0.0196)
Indeed, this p-value is much smaller, and small enough to (correctly)
reject the null hypothesis at common signficance levels, including 5% and
2.5%.
However, the KS statistic is not very sensitive to all deviations from
normality. The original advantage of the KS statistic was the ability
to compute the null distribution theoretically, but a more sensitive
statistic - resulting in a higher test power - can be used now that we can
approximate the null distribution
computationally. The Anderson-Darling statistic [1]_ tends to be more
sensitive, and critical values of the this statistic have been tabulated
for various significance levels and sample sizes using Monte Carlo methods.
>>> res = stats.anderson(x, 'norm')
>>> print(res.statistic)
1.2139573337497467
>>> print(res.critical_values)
[0.549 0.625 0.75 0.875 1.041]
>>> print(res.significance_level)
[15. 10. 5. 2.5 1. ]
Here, the observed value of the statistic exceeds the critical value
corresponding with a 1% significance level. This tells us that the p-value
of the observed data is less than 1%, but what is it? We could interpolate
from these (already-interpolated) values, but `goodness_of_fit` can
estimate it directly.
>>> res = stats.goodness_of_fit(stats.norm, x, statistic='ad',
... random_state=rng)
>>> res.statistic, res.pvalue
(1.2139573337497467, 0.0034)
A further advantage is that use of `goodness_of_fit` is not limited to
a particular set of distributions or conditions on which parameters
are known versus which must be estimated from data. Instead,
`goodness_of_fit` can estimate p-values relatively quickly for any
distribution with a sufficiently fast and reliable ``fit`` method. For
instance, here we perform a goodness of fit test using the Cramer-von Mises
statistic against the Rayleigh distribution with known location and unknown
scale.
>>> rng = np.random.default_rng()
>>> x = stats.chi(df=2.2, loc=0, scale=2).rvs(size=1000, random_state=rng)
>>> res = stats.goodness_of_fit(stats.rayleigh, x, statistic='cvm',
... known_params={'loc': 0}, random_state=rng)
This executes fairly quickly, but to check the reliability of the ``fit``
method, we should inspect the fit result.
>>> res.fit_result # location is as specified, and scale is reasonable
params: FitParams(loc=0.0, scale=2.1026719844231243)
success: True
message: 'The fit was performed successfully.'
>>> import matplotlib.pyplot as plt # matplotlib must be installed to plot
>>> res.fit_result.plot()
>>> plt.show()
If the distribution is not fit to the observed data as well as possible,
the test may not control the type I error rate, that is, the chance of
rejecting the null hypothesis even when it is true.
We should also look for extreme outliers in the null distribution that
may be caused by unreliable fitting. These do not necessarily invalidate
the result, but they tend to reduce the test's power.
>>> _, ax = plt.subplots()
>>> ax.hist(np.log10(res.null_distribution))
>>> ax.set_xlabel("log10 of CVM statistic under the null hypothesis")
>>> ax.set_ylabel("Frequency")
>>> ax.set_title("Histogram of the Monte Carlo null distribution")
>>> plt.show()
This plot seems reassuring.
If ``fit`` method is working reliably, and if the distribution of the test
statistic is not particularly sensitive to the values of the fitted
parameters, then the p-value provided by `goodness_of_fit` is expected to
be a good approximation.
>>> res.statistic, res.pvalue
(0.2231991510248692, 0.0525)
"""
args = _gof_iv(dist, data, known_params, fit_params, guessed_params,
statistic, n_mc_samples, random_state)
(dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
guessed_rfd_params, statistic, n_mc_samples_int, random_state) = args
# Fit null hypothesis distribution to data
nhd_fit_fun = _get_fit_fun(dist, data, guessed_nhd_params,
fixed_nhd_params)
nhd_vals = nhd_fit_fun(data)
nhd_dist = dist(*nhd_vals)
def rvs(size):
return nhd_dist.rvs(size=size, random_state=random_state)
# Define statistic
fit_fun = _get_fit_fun(dist, data, guessed_rfd_params, fixed_rfd_params)
compare_fun = _compare_dict[statistic]
alternative = getattr(compare_fun, 'alternative', 'greater')
def statistic_fun(data, axis=-1):
# Make things simple by always working along the last axis.
data = np.moveaxis(data, axis, -1)
rfd_vals = fit_fun(data)
rfd_dist = dist(*rfd_vals)
return compare_fun(rfd_dist, data)
res = stats.monte_carlo_test(data, rvs, statistic_fun, vectorized=True,
n_resamples=n_mc_samples, axis=-1,
alternative=alternative)
opt_res = optimize.OptimizeResult()
opt_res.success = True
opt_res.message = "The fit was performed successfully."
opt_res.x = nhd_vals
# Only continuous distributions for now, hence discrete=False
# There's no fundamental limitation; it's just that we're not using
# stats.fit, discrete distributions don't have `fit` method, and
# we haven't written any vectorized fit functions for a discrete
# distribution yet.
return GoodnessOfFitResult(FitResult(dist, data, False, opt_res),
res.statistic, res.pvalue,
res.null_distribution)
def _get_fit_fun(dist, data, guessed_params, fixed_params):
shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
param_names = shape_names + ['loc', 'scale']
fparam_names = ['f'+name for name in param_names]
all_fixed = not set(fparam_names).difference(fixed_params)
guessed_shapes = [guessed_params.pop(x, None)
for x in shape_names if x in guessed_params]
if all_fixed:
def fit_fun(data):
return [fixed_params[name] for name in fparam_names]
# Define statistic, including fitting distribution to data
elif dist in _fit_funs:
def fit_fun(data):
params = _fit_funs[dist](data, **fixed_params)
params = np.asarray(np.broadcast_arrays(*params))
if params.ndim > 1:
params = params[..., np.newaxis]
return params
else:
def fit_fun_1d(data):
return dist.fit(data, *guessed_shapes, **guessed_params,
**fixed_params)
def fit_fun(data):
params = np.apply_along_axis(fit_fun_1d, axis=-1, arr=data)
if params.ndim > 1:
params = params.T[..., np.newaxis]
return params
return fit_fun
# Vectorized fitting functions. These are to accept ND `data` in which each
# row (slice along last axis) is a sample to fit and scalar fixed parameters.
# They return a tuple of shape parameter arrays, each of shape data.shape[:-1].
def _fit_norm(data, floc=None, fscale=None):
loc = floc
scale = fscale
if loc is None and scale is None:
loc = np.mean(data, axis=-1)
scale = np.std(data, ddof=1, axis=-1)
elif loc is None:
loc = np.mean(data, axis=-1)
elif scale is None:
scale = np.sqrt(((data - loc)**2).mean(axis=-1))
return loc, scale
_fit_funs = {stats.norm: _fit_norm} # type: ignore[attr-defined]
# Vectorized goodness of fit statistic functions. These accept a frozen
# distribution object and `data` in which each row (slice along last axis) is
# a sample.
def _anderson_darling(dist, data):
x = np.sort(data, axis=-1)
n = data.shape[-1]
i = np.arange(1, n+1)
Si = (2*i - 1)/n * (dist.logcdf(x) + dist.logsf(x[..., ::-1]))
S = np.sum(Si, axis=-1)
return -n - S
def _compute_dplus(cdfvals): # adapted from _stats_py before gh-17062
n = cdfvals.shape[-1]
return (np.arange(1.0, n + 1) / n - cdfvals).max(axis=-1)
def _compute_dminus(cdfvals, axis=-1):
n = cdfvals.shape[-1]
return (cdfvals - np.arange(0.0, n)/n).max(axis=-1)
def _kolmogorov_smirnov(dist, data):
x = np.sort(data, axis=-1)
cdfvals = dist.cdf(x)
Dplus = _compute_dplus(cdfvals) # always works along last axis
Dminus = _compute_dminus(cdfvals)
return np.maximum(Dplus, Dminus)
def _corr(X, M):
# Correlation coefficient r, simplified and vectorized as we need it.
# See [7] Equation (2). Lemma 1/2 are only for distributions symmetric
# about 0.
Xm = X.mean(axis=-1, keepdims=True)
Mm = M.mean(axis=-1, keepdims=True)
num = np.sum((X - Xm) * (M - Mm), axis=-1)
den = np.sqrt(np.sum((X - Xm)**2, axis=-1) * np.sum((M - Mm)**2, axis=-1))
return num/den
def _filliben(dist, data):
# [7] Section 8 # 1
X = np.sort(data, axis=-1)
# [7] Section 8 # 2
n = data.shape[-1]
k = np.arange(1, n+1)
# Filliben used an approximation for the uniform distribution order
# statistic medians.
# m = (k - .3175)/(n + 0.365)
# m[-1] = 0.5**(1/n)
# m[0] = 1 - m[-1]
# We can just as easily use the (theoretically) exact values. See e.g.
# https://en.wikipedia.org/wiki/Order_statistic
# "Order statistics sampled from a uniform distribution"
m = stats.beta(k, n + 1 - k).median()
# [7] Section 8 # 3
M = dist.ppf(m)
# [7] Section 8 # 4
return _corr(X, M)
_filliben.alternative = 'less' # type: ignore[attr-defined]
def _cramer_von_mises(dist, data):
x = np.sort(data, axis=-1)
n = data.shape[-1]
cdfvals = dist.cdf(x)
u = (2*np.arange(1, n+1) - 1)/(2*n)
w = 1 / (12*n) + np.sum((u - cdfvals)**2, axis=-1)
return w
_compare_dict = {"ad": _anderson_darling, "ks": _kolmogorov_smirnov,
"cvm": _cramer_von_mises, "filliben": _filliben}
def _gof_iv(dist, data, known_params, fit_params, guessed_params, statistic,
n_mc_samples, random_state):
if not isinstance(dist, stats.rv_continuous):
message = ("`dist` must be a (non-frozen) instance of "
"`stats.rv_continuous`.")
raise TypeError(message)
data = np.asarray(data, dtype=float)
if not data.ndim == 1:
message = "`data` must be a one-dimensional array of numbers."
raise ValueError(message)
# Leave validation of these key/value pairs to the `fit` method,
# but collect these into dictionaries that will be used
known_params = known_params or dict()
fit_params = fit_params or dict()
guessed_params = guessed_params or dict()
known_params_f = {("f"+key): val for key, val in known_params.items()}
fit_params_f = {("f"+key): val for key, val in fit_params.items()}
# These the the values of parameters of the null distribution family
# with which resamples are drawn
fixed_nhd_params = known_params_f.copy()
fixed_nhd_params.update(fit_params_f)
# These are fixed when fitting the distribution family to resamples
fixed_rfd_params = known_params_f.copy()
# These are used as guesses when fitting the distribution family to
# the original data
guessed_nhd_params = guessed_params.copy()
# These are used as guesses when fitting the distribution family to
# resamples
guessed_rfd_params = fit_params.copy()
guessed_rfd_params.update(guessed_params)
statistic = statistic.lower()
statistics = {'ad', 'ks', 'cvm', 'filliben'}
if statistic not in statistics:
message = f"`statistic` must be one of {statistics}."
raise ValueError(message)
n_mc_samples_int = int(n_mc_samples)
if n_mc_samples_int != n_mc_samples:
message = "`n_mc_samples` must be an integer."
raise TypeError(message)
random_state = check_random_state(random_state)
return (dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
guessed_rfd_params, statistic, n_mc_samples_int, random_state)
| 58,447
| 43.145015
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_ksstats.py
|
# Compute the two-sided one-sample Kolmogorov-Smirnov Prob(Dn <= d) where:
# D_n = sup_x{|F_n(x) - F(x)|},
# F_n(x) is the empirical CDF for a sample of size n {x_i: i=1,...,n},
# F(x) is the CDF of a probability distribution.
#
# Exact methods:
# Prob(D_n >= d) can be computed via a matrix algorithm of Durbin[1]
# or a recursion algorithm due to Pomeranz[2].
# Marsaglia, Tsang & Wang[3] gave a computation-efficient way to perform
# the Durbin algorithm.
# D_n >= d <==> D_n+ >= d or D_n- >= d (the one-sided K-S statistics), hence
# Prob(D_n >= d) = 2*Prob(D_n+ >= d) - Prob(D_n+ >= d and D_n- >= d).
# For d > 0.5, the latter intersection probability is 0.
#
# Approximate methods:
# For d close to 0.5, ignoring that intersection term may still give a
# reasonable approximation.
# Li-Chien[4] and Korolyuk[5] gave an asymptotic formula extending
# Kolmogorov's initial asymptotic, suitable for large d. (See
# scipy.special.kolmogorov for that asymptotic)
# Pelz-Good[6] used the functional equation for Jacobi theta functions to
# transform the Li-Chien/Korolyuk formula produce a computational formula
# suitable for small d.
#
# Simard and L'Ecuyer[7] provided an algorithm to decide when to use each of
# the above approaches and it is that which is used here.
#
# Other approaches:
# Carvalho[8] optimizes Durbin's matrix algorithm for large values of d.
# Moscovich and Nadler[9] use FFTs to compute the convolutions.
# References:
# [1] Durbin J (1968).
# "The Probability that the Sample Distribution Function Lies Between Two
# Parallel Straight Lines."
# Annals of Mathematical Statistics, 39, 398-411.
# [2] Pomeranz J (1974).
# "Exact Cumulative Distribution of the Kolmogorov-Smirnov Statistic for
# Small Samples (Algorithm 487)."
# Communications of the ACM, 17(12), 703-704.
# [3] Marsaglia G, Tsang WW, Wang J (2003).
# "Evaluating Kolmogorov's Distribution."
# Journal of Statistical Software, 8(18), 1-4.
# [4] LI-CHIEN, C. (1956).
# "On the exact distribution of the statistics of A. N. Kolmogorov and
# their asymptotic expansion."
# Acta Matematica Sinica, 6, 55-81.
# [5] KOROLYUK, V. S. (1960).
# "Asymptotic analysis of the distribution of the maximum deviation in
# the Bernoulli scheme."
# Theor. Probability Appl., 4, 339-366.
# [6] Pelz W, Good IJ (1976).
# "Approximating the Lower Tail-areas of the Kolmogorov-Smirnov One-sample
# Statistic."
# Journal of the Royal Statistical Society, Series B, 38(2), 152-156.
# [7] Simard, R., L'Ecuyer, P. (2011)
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution",
# Journal of Statistical Software, Vol 39, 11, 1-18.
# [8] Carvalho, Luis (2015)
# "An Improved Evaluation of Kolmogorov's Distribution"
# Journal of Statistical Software, Code Snippets; Vol 65(3), 1-8.
# [9] Amit Moscovich, Boaz Nadler (2017)
# "Fast calculation of boundary crossing probabilities for Poisson
# processes",
# Statistics & Probability Letters, Vol 123, 177-182.
import numpy as np
import scipy.special
import scipy.special._ufuncs as scu
from scipy._lib._finite_differences import _derivative
_E128 = 128
_EP128 = np.ldexp(np.longdouble(1), _E128)
_EM128 = np.ldexp(np.longdouble(1), -_E128)
_SQRT2PI = np.sqrt(2 * np.pi)
_LOG_2PI = np.log(2 * np.pi)
_MIN_LOG = -708
_SQRT3 = np.sqrt(3)
_PI_SQUARED = np.pi ** 2
_PI_FOUR = np.pi ** 4
_PI_SIX = np.pi ** 6
# [Lifted from _loggamma.pxd.] If B_m are the Bernoulli numbers,
# then Stirling coeffs are B_{2j}/(2j)/(2j-1) for j=8,...1.
_STIRLING_COEFFS = [-2.955065359477124183e-2, 6.4102564102564102564e-3,
-1.9175269175269175269e-3, 8.4175084175084175084e-4,
-5.952380952380952381e-4, 7.9365079365079365079e-4,
-2.7777777777777777778e-3, 8.3333333333333333333e-2]
def _log_nfactorial_div_n_pow_n(n):
# Computes n! / n**n
# = (n-1)! / n**(n-1)
# Uses Stirling's approximation, but removes n*log(n) up-front to
# avoid subtractive cancellation.
# = log(n)/2 - n + log(sqrt(2pi)) + sum B_{2j}/(2j)/(2j-1)/n**(2j-1)
rn = 1.0/n
return np.log(n)/2 - n + _LOG_2PI/2 + rn * np.polyval(_STIRLING_COEFFS, rn/n)
def _clip_prob(p):
"""clips a probability to range 0<=p<=1."""
return np.clip(p, 0.0, 1.0)
def _select_and_clip_prob(cdfprob, sfprob, cdf=True):
"""Selects either the CDF or SF, and then clips to range 0<=p<=1."""
p = np.where(cdf, cdfprob, sfprob)
return _clip_prob(p)
def _kolmogn_DMTW(n, d, cdf=True):
r"""Computes the Kolmogorov CDF: Pr(D_n <= d) using the MTW approach to
the Durbin matrix algorithm.
Durbin (1968); Marsaglia, Tsang, Wang (2003). [1], [3].
"""
# Write d = (k-h)/n, where k is positive integer and 0 <= h < 1
# Generate initial matrix H of size m*m where m=(2k-1)
# Compute k-th row of (n!/n^n) * H^n, scaling intermediate results.
# Requires memory O(m^2) and computation O(m^2 log(n)).
# Most suitable for small m.
if d >= 1.0:
return _select_and_clip_prob(1.0, 0.0, cdf)
nd = n * d
if nd <= 0.5:
return _select_and_clip_prob(0.0, 1.0, cdf)
k = int(np.ceil(nd))
h = k - nd
m = 2 * k - 1
H = np.zeros([m, m])
# Initialize: v is first column (and last row) of H
# v[j] = (1-h^(j+1)/(j+1)! (except for v[-1])
# w[j] = 1/(j)!
# q = k-th row of H (actually i!/n^i*H^i)
intm = np.arange(1, m + 1)
v = 1.0 - h ** intm
w = np.empty(m)
fac = 1.0
for j in intm:
w[j - 1] = fac
fac /= j # This might underflow. Isn't a problem.
v[j - 1] *= fac
tt = max(2 * h - 1.0, 0)**m - 2*h**m
v[-1] = (1.0 + tt) * fac
for i in range(1, m):
H[i - 1:, i] = w[:m - i + 1]
H[:, 0] = v
H[-1, :] = np.flip(v, axis=0)
Hpwr = np.eye(np.shape(H)[0]) # Holds intermediate powers of H
nn = n
expnt = 0 # Scaling of Hpwr
Hexpnt = 0 # Scaling of H
while nn > 0:
if nn % 2:
Hpwr = np.matmul(Hpwr, H)
expnt += Hexpnt
H = np.matmul(H, H)
Hexpnt *= 2
# Scale as needed.
if np.abs(H[k - 1, k - 1]) > _EP128:
H /= _EP128
Hexpnt += _E128
nn = nn // 2
p = Hpwr[k - 1, k - 1]
# Multiply by n!/n^n
for i in range(1, n + 1):
p = i * p / n
if np.abs(p) < _EM128:
p *= _EP128
expnt -= _E128
# unscale
if expnt != 0:
p = np.ldexp(p, expnt)
return _select_and_clip_prob(p, 1.0-p, cdf)
def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf):
"""Compute the endpoints of the interval for row i."""
if i == 0:
j1, j2 = -ll - ceilf - 1, ll + ceilf - 1
else:
# i + 1 = 2*ip1div2 + ip1mod2
ip1div2, ip1mod2 = divmod(i + 1, 2)
if ip1mod2 == 0: # i is odd
if ip1div2 == n + 1:
j1, j2 = n - ll - ceilf - 1, n + ll + ceilf - 1
else:
j1, j2 = ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1
else:
j1, j2 = ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1
return max(j1 + 2, 0), min(j2, n)
def _kolmogn_Pomeranz(n, x, cdf=True):
r"""Computes Pr(D_n <= d) using the Pomeranz recursion algorithm.
Pomeranz (1974) [2]
"""
# V is n*(2n+2) matrix.
# Each row is convolution of the previous row and probabilities from a
# Poisson distribution.
# Desired CDF probability is n! V[n-1, 2n+1] (final entry in final row).
# Only two rows are needed at any given stage:
# - Call them V0 and V1.
# - Swap each iteration
# Only a few (contiguous) entries in each row can be non-zero.
# - Keep track of start and end (j1 and j2 below)
# - V0s and V1s track the start in the two rows
# Scale intermediate results as needed.
# Only a few different Poisson distributions can occur
t = n * x
ll = int(np.floor(t))
f = 1.0 * (t - ll) # fractional part of t
g = min(f, 1.0 - f)
ceilf = (1 if f > 0 else 0)
roundf = (1 if f > 0.5 else 0)
npwrs = 2 * (ll + 1) # Maximum number of powers needed in convolutions
gpower = np.empty(npwrs) # gpower = (g/n)^m/m!
twogpower = np.empty(npwrs) # twogpower = (2g/n)^m/m!
onem2gpower = np.empty(npwrs) # onem2gpower = ((1-2g)/n)^m/m!
# gpower etc are *almost* Poisson probs, just missing normalizing factor.
gpower[0] = 1.0
twogpower[0] = 1.0
onem2gpower[0] = 1.0
expnt = 0
g_over_n, two_g_over_n, one_minus_two_g_over_n = g/n, 2*g/n, (1 - 2*g)/n
for m in range(1, npwrs):
gpower[m] = gpower[m - 1] * g_over_n / m
twogpower[m] = twogpower[m - 1] * two_g_over_n / m
onem2gpower[m] = onem2gpower[m - 1] * one_minus_two_g_over_n / m
V0 = np.zeros([npwrs])
V1 = np.zeros([npwrs])
V1[0] = 1 # first row
V0s, V1s = 0, 0 # start indices of the two rows
j1, j2 = _pomeranz_compute_j1j2(0, n, ll, ceilf, roundf)
for i in range(1, 2 * n + 2):
# Preserve j1, V1, V1s, V0s from last iteration
k1 = j1
V0, V1 = V1, V0
V0s, V1s = V1s, V0s
V1.fill(0.0)
j1, j2 = _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf)
if i == 1 or i == 2 * n + 1:
pwrs = gpower
else:
pwrs = (twogpower if i % 2 else onem2gpower)
ln2 = j2 - k1 + 1
if ln2 > 0:
conv = np.convolve(V0[k1 - V0s:k1 - V0s + ln2], pwrs[:ln2])
conv_start = j1 - k1 # First index to use from conv
conv_len = j2 - j1 + 1 # Number of entries to use from conv
V1[:conv_len] = conv[conv_start:conv_start + conv_len]
# Scale to avoid underflow.
if 0 < np.max(V1) < _EM128:
V1 *= _EP128
expnt -= _E128
V1s = V0s + j1 - k1
# multiply by n!
ans = V1[n - V1s]
for m in range(1, n + 1):
if np.abs(ans) > _EP128:
ans *= _EM128
expnt += _E128
ans *= m
# Undo any intermediate scaling
if expnt != 0:
ans = np.ldexp(ans, expnt)
ans = _select_and_clip_prob(ans, 1.0 - ans, cdf)
return ans
def _kolmogn_PelzGood(n, x, cdf=True):
"""Computes the Pelz-Good approximation to Prob(Dn <= x) with 0<=x<=1.
Start with Li-Chien, Korolyuk approximation:
Prob(Dn <= x) ~ K0(z) + K1(z)/sqrt(n) + K2(z)/n + K3(z)/n**1.5
where z = x*sqrt(n).
Transform each K_(z) using Jacobi theta functions into a form suitable
for small z.
Pelz-Good (1976). [6]
"""
if x <= 0.0:
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
if x >= 1.0:
return _select_and_clip_prob(1.0, 0.0, cdf=cdf)
z = np.sqrt(n) * x
zsquared, zthree, zfour, zsix = z**2, z**3, z**4, z**6
qlog = -_PI_SQUARED / 8 / zsquared
if qlog < _MIN_LOG: # z ~ 0.041743441416853426
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
q = np.exp(qlog)
# Coefficients of terms in the sums for K1, K2 and K3
k1a = -zsquared
k1b = _PI_SQUARED / 4
k2a = 6 * zsix + 2 * zfour
k2b = (2 * zfour - 5 * zsquared) * _PI_SQUARED / 4
k2c = _PI_FOUR * (1 - 2 * zsquared) / 16
k3d = _PI_SIX * (5 - 30 * zsquared) / 64
k3c = _PI_FOUR * (-60 * zsquared + 212 * zfour) / 16
k3b = _PI_SQUARED * (135 * zfour - 96 * zsix) / 4
k3a = -30 * zsix - 90 * z**8
K0to3 = np.zeros(4)
# Use a Horner scheme to evaluate sum c_i q^(i^2)
# Reduces to a sum over odd integers.
maxk = int(np.ceil(16 * z / np.pi))
for k in range(maxk, 0, -1):
m = 2 * k - 1
msquared, mfour, msix = m**2, m**4, m**6
qpower = np.power(q, 8 * k)
coeffs = np.array([1.0,
k1a + k1b*msquared,
k2a + k2b*msquared + k2c*mfour,
k3a + k3b*msquared + k3c*mfour + k3d*msix])
K0to3 *= qpower
K0to3 += coeffs
K0to3 *= q
K0to3 *= _SQRT2PI
# z**10 > 0 as z > 0.04
K0to3 /= np.array([z, 6 * zfour, 72 * z**7, 6480 * z**10])
# Now do the other sum over the other terms, all integers k
# K_2: (pi^2 k^2) q^(k^2),
# K_3: (3pi^2 k^2 z^2 - pi^4 k^4)*q^(k^2)
# Don't expect much subtractive cancellation so use direct calculation
q = np.exp(-_PI_SQUARED / 2 / zsquared)
ks = np.arange(maxk, 0, -1)
ksquared = ks ** 2
sqrt3z = _SQRT3 * z
kspi = np.pi * ks
qpwers = q ** ksquared
k2extra = np.sum(ksquared * qpwers)
k2extra *= _PI_SQUARED * _SQRT2PI/(-36 * zthree)
K0to3[2] += k2extra
k3extra = np.sum((sqrt3z + kspi) * (sqrt3z - kspi) * ksquared * qpwers)
k3extra *= _PI_SQUARED * _SQRT2PI/(216 * zsix)
K0to3[3] += k3extra
powers_of_n = np.power(n * 1.0, np.arange(len(K0to3)) / 2.0)
K0to3 /= powers_of_n
if not cdf:
K0to3 *= -1
K0to3[0] += 1
Ksum = sum(K0to3)
return Ksum
def _kolmogn(n, x, cdf=True):
"""Computes the CDF(or SF) for the two-sided Kolmogorov-Smirnov statistic.
x must be of type float, n of type integer.
Simard & L'Ecuyer (2011) [7].
"""
if np.isnan(n):
return n # Keep the same type of nan
if int(n) != n or n <= 0:
return np.nan
if x >= 1.0:
return _select_and_clip_prob(1.0, 0.0, cdf=cdf)
if x <= 0.0:
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
t = n * x
if t <= 1.0: # Ruben-Gambino: 1/2n <= x <= 1/n
if t <= 0.5:
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
if n <= 140:
prob = np.prod(np.arange(1, n+1) * (1.0/n) * (2*t - 1))
else:
prob = np.exp(_log_nfactorial_div_n_pow_n(n) + n * np.log(2*t-1))
return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
if t >= n - 1: # Ruben-Gambino
prob = 2 * (1.0 - x)**n
return _select_and_clip_prob(1 - prob, prob, cdf=cdf)
if x >= 0.5: # Exact: 2 * smirnov
prob = 2 * scipy.special.smirnov(n, x)
return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf)
nxsquared = t * x
if n <= 140:
if nxsquared <= 0.754693:
prob = _kolmogn_DMTW(n, x, cdf=True)
return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
if nxsquared <= 4:
prob = _kolmogn_Pomeranz(n, x, cdf=True)
return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
# Now use Miller approximation of 2*smirnov
prob = 2 * scipy.special.smirnov(n, x)
return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf)
# Split CDF and SF as they have different cutoffs on nxsquared.
if not cdf:
if nxsquared >= 370.0:
return 0.0
if nxsquared >= 2.2:
prob = 2 * scipy.special.smirnov(n, x)
return _clip_prob(prob)
# Fall through and compute the SF as 1.0-CDF
if nxsquared >= 18.0:
cdfprob = 1.0
elif n <= 100000 and n * x**1.5 <= 1.4:
cdfprob = _kolmogn_DMTW(n, x, cdf=True)
else:
cdfprob = _kolmogn_PelzGood(n, x, cdf=True)
return _select_and_clip_prob(cdfprob, 1.0 - cdfprob, cdf=cdf)
def _kolmogn_p(n, x):
"""Computes the PDF for the two-sided Kolmogorov-Smirnov statistic.
x must be of type float, n of type integer.
"""
if np.isnan(n):
return n # Keep the same type of nan
if int(n) != n or n <= 0:
return np.nan
if x >= 1.0 or x <= 0:
return 0
t = n * x
if t <= 1.0:
# Ruben-Gambino: n!/n^n * (2t-1)^n -> 2 n!/n^n * n^2 * (2t-1)^(n-1)
if t <= 0.5:
return 0.0
if n <= 140:
prd = np.prod(np.arange(1, n) * (1.0 / n) * (2 * t - 1))
else:
prd = np.exp(_log_nfactorial_div_n_pow_n(n) + (n-1) * np.log(2 * t - 1))
return prd * 2 * n**2
if t >= n - 1:
# Ruben-Gambino : 1-2(1-x)**n -> 2n*(1-x)**(n-1)
return 2 * (1.0 - x) ** (n-1) * n
if x >= 0.5:
return 2 * scipy.stats.ksone.pdf(x, n)
# Just take a small delta.
# Ideally x +/- delta would stay within [i/n, (i+1)/n] for some integer a.
# as the CDF is a piecewise degree n polynomial.
# It has knots at 1/n, 2/n, ... (n-1)/n
# and is not a C-infinity function at the knots
delta = x / 2.0**16
delta = min(delta, x - 1.0/n)
delta = min(delta, 0.5 - x)
def _kk(_x):
return kolmogn(n, _x)
return _derivative(_kk, x, dx=delta, order=5)
def _kolmogni(n, p, q):
"""Computes the PPF/ISF of kolmogn.
n of type integer, n>= 1
p is the CDF, q the SF, p+q=1
"""
if np.isnan(n):
return n # Keep the same type of nan
if int(n) != n or n <= 0:
return np.nan
if p <= 0:
return 1.0/n
if q <= 0:
return 1.0
delta = np.exp((np.log(p) - scipy.special.loggamma(n+1))/n)
if delta <= 1.0/n:
return (delta + 1.0 / n) / 2
x = -np.expm1(np.log(q/2.0)/n)
if x >= 1 - 1.0/n:
return x
x1 = scu._kolmogci(p)/np.sqrt(n)
x1 = min(x1, 1.0 - 1.0/n)
def _f(x):
return _kolmogn(n, x) - p
return scipy.optimize.brentq(_f, 1.0/n, x1, xtol=1e-14)
def kolmogn(n, x, cdf=True):
"""Computes the CDF for the two-sided Kolmogorov-Smirnov distribution.
The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x),
for a sample of size n drawn from a distribution with CDF F(t), where
D_n &= sup_t |F_n(t) - F(t)|, and
F_n(t) is the Empirical Cumulative Distribution Function of the sample.
Parameters
----------
n : integer, array_like
the number of samples
x : float, array_like
The K-S statistic, float between 0 and 1
cdf : bool, optional
whether to compute the CDF(default=true) or the SF.
Returns
-------
cdf : ndarray
CDF (or SF it cdf is False) at the specified locations.
The return value has shape the result of numpy broadcasting n and x.
"""
it = np.nditer([n, x, cdf, None],
op_dtypes=[None, np.float64, np.bool_, np.float64])
for _n, _x, _cdf, z in it:
if np.isnan(_n):
z[...] = _n
continue
if int(_n) != _n:
raise ValueError(f'n is not integral: {_n}')
z[...] = _kolmogn(int(_n), _x, cdf=_cdf)
result = it.operands[-1]
return result
def kolmognp(n, x):
"""Computes the PDF for the two-sided Kolmogorov-Smirnov distribution.
Parameters
----------
n : integer, array_like
the number of samples
x : float, array_like
The K-S statistic, float between 0 and 1
Returns
-------
pdf : ndarray
The PDF at the specified locations
The return value has shape the result of numpy broadcasting n and x.
"""
it = np.nditer([n, x, None])
for _n, _x, z in it:
if np.isnan(_n):
z[...] = _n
continue
if int(_n) != _n:
raise ValueError(f'n is not integral: {_n}')
z[...] = _kolmogn_p(int(_n), _x)
result = it.operands[-1]
return result
def kolmogni(n, q, cdf=True):
"""Computes the PPF(or ISF) for the two-sided Kolmogorov-Smirnov distribution.
Parameters
----------
n : integer, array_like
the number of samples
q : float, array_like
Probabilities, float between 0 and 1
cdf : bool, optional
whether to compute the PPF(default=true) or the ISF.
Returns
-------
ppf : ndarray
PPF (or ISF if cdf is False) at the specified locations
The return value has shape the result of numpy broadcasting n and x.
"""
it = np.nditer([n, q, cdf, None])
for _n, _q, _cdf, z in it:
if np.isnan(_n):
z[...] = _n
continue
if int(_n) != _n:
raise ValueError(f'n is not integral: {_n}')
_pcdf, _psf = (_q, 1-_q) if _cdf else (1-_q, _q)
z[...] = _kolmogni(int(_n), _pcdf, _psf)
result = it.operands[-1]
return result
| 20,100
| 32.445923
| 84
|
py
|
scipy
|
scipy-main/scipy/stats/_discrete_distns.py
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from functools import partial
from scipy import special
from scipy.special import entr, logsumexp, betaln, gammaln as gamln, zeta
from scipy._lib._util import _lazywhere, rng_integers
from scipy.interpolate import interp1d
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (rv_discrete, get_distribution_names,
_check_shape, _ShapeInfo)
import scipy.stats._boost as _boost
from ._biasedurn import (_PyFishersNCHypergeometric,
_PyWalleniusNCHypergeometric,
_PyStochasticLib3)
def _isintegral(x):
return x == np.round(x)
class binom_gen(rv_discrete):
r"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is:
.. math::
f(k) = \binom{n}{k} p^k (1-p)^{n-k}
for :math:`k \in \{0, 1, \dots, n\}`, :math:`0 \leq p \leq 1`
`binom` takes :math:`n` and :math:`p` as shape parameters,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
%(example)s
See Also
--------
hypergeom, nbinom, nhypergeom
"""
def _shape_info(self):
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("p", False, (0, 1), (True, True))]
def _rvs(self, n, p, size=None, random_state=None):
return random_state.binomial(n, p, size)
def _argcheck(self, n, p):
return (n >= 0) & _isintegral(n) & (p >= 0) & (p <= 1)
def _get_support(self, n, p):
return self.a, n
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
# binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
return _boost._binom_pdf(x, n, p)
def _cdf(self, x, n, p):
k = floor(x)
return _boost._binom_cdf(k, n, p)
def _sf(self, x, n, p):
k = floor(x)
return _boost._binom_sf(k, n, p)
def _isf(self, x, n, p):
return _boost._binom_isf(x, n, p)
def _ppf(self, q, n, p):
return _boost._binom_ppf(q, n, p)
def _stats(self, n, p, moments='mv'):
mu = _boost._binom_mean(n, p)
var = _boost._binom_variance(n, p)
g1, g2 = None, None
if 's' in moments:
g1 = _boost._binom_skewness(n, p)
if 'k' in moments:
g2 = _boost._binom_kurtosis_excess(n, p)
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
r"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is:
.. math::
f(k) = \begin{cases}1-p &\text{if } k = 0\\
p &\text{if } k = 1\end{cases}
for :math:`k` in :math:`\{0, 1\}`, :math:`0 \leq p \leq 1`
`bernoulli` takes :math:`p` as shape parameter,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("p", False, (0, 1), (True, True))]
def _rvs(self, p, size=None, random_state=None):
return binom_gen._rvs(self, 1, p, size=size, random_state=random_state)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _get_support(self, p):
# Overrides binom_gen._get_support!x
return self.a, self.b
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
# bernoulli.pmf(k) = 1-p if k = 0
# = p if k = 1
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _isf(self, x, p):
return binom._isf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class betabinom_gen(rv_discrete):
r"""A beta-binomial discrete random variable.
%(before_notes)s
Notes
-----
The beta-binomial distribution is a binomial distribution with a
probability of success `p` that follows a beta distribution.
The probability mass function for `betabinom` is:
.. math::
f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)}
for :math:`k \in \{0, 1, \dots, n\}`, :math:`n \geq 0`, :math:`a > 0`,
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
`betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
References
----------
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
%(after_notes)s
.. versionadded:: 1.4.0
See Also
--------
beta, binom
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("a", False, (0, np.inf), (False, False)),
_ShapeInfo("b", False, (0, np.inf), (False, False))]
def _rvs(self, n, a, b, size=None, random_state=None):
p = random_state.beta(a, b, size)
return random_state.binomial(n, p, size)
def _get_support(self, n, a, b):
return 0, n
def _argcheck(self, n, a, b):
return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0)
def _logpmf(self, x, n, a, b):
k = floor(x)
combiln = -log(n + 1) - betaln(n - k + 1, k + 1)
return combiln + betaln(k + a, n - k + b) - betaln(a, b)
def _pmf(self, x, n, a, b):
return exp(self._logpmf(x, n, a, b))
def _stats(self, n, a, b, moments='mv'):
e_p = a / (a + b)
e_q = 1 - e_p
mu = n * e_p
var = n * (a + b + n) * e_p * e_q / (a + b + 1)
g1, g2 = None, None
if 's' in moments:
g1 = 1.0 / sqrt(var)
g1 *= (a + b + 2 * n) * (b - a)
g1 /= (a + b + 2) * (a + b)
if 'k' in moments:
g2 = (a + b).astype(e_p.dtype)
g2 *= (a + b - 1 + 6 * n)
g2 += 3 * a * b * (n - 2)
g2 += 6 * n ** 2
g2 -= 3 * e_p * b * n * (6 - n)
g2 -= 18 * e_p * e_q * n ** 2
g2 *= (a + b) ** 2 * (1 + a + b)
g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n))
g2 -= 3
return mu, var, g1, g2
betabinom = betabinom_gen(name='betabinom')
class nbinom_gen(rv_discrete):
r"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
trials, repeated until a predefined, non-random number of successes occurs.
The probability mass function of the number of failures for `nbinom` is:
.. math::
f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k
for :math:`k \ge 0`, :math:`0 < p \leq 1`
`nbinom` takes :math:`n` and :math:`p` as shape parameters where :math:`n`
is the number of successes, :math:`p` is the probability of a single
success, and :math:`1-p` is the probability of a single failure.
Another common parameterization of the negative binomial distribution is
in terms of the mean number of failures :math:`\mu` to achieve :math:`n`
successes. The mean :math:`\mu` is related to the probability of success
as
.. math::
p = \frac{n}{n + \mu}
The number of successes :math:`n` may also be specified in terms of a
"dispersion", "heterogeneity", or "aggregation" parameter :math:`\alpha`,
which relates the mean :math:`\mu` to the variance :math:`\sigma^2`,
e.g. :math:`\sigma^2 = \mu + \alpha \mu^2`. Regardless of the convention
used for :math:`\alpha`,
.. math::
p &= \frac{\mu}{\sigma^2} \\
n &= \frac{\mu^2}{\sigma^2 - \mu}
%(after_notes)s
%(example)s
See Also
--------
hypergeom, binom, nhypergeom
"""
def _shape_info(self):
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("p", False, (0, 1), (True, True))]
def _rvs(self, n, p, size=None, random_state=None):
return random_state.negative_binomial(n, p, size)
def _argcheck(self, n, p):
return (n > 0) & (p > 0) & (p <= 1)
def _pmf(self, x, n, p):
# nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
return _boost._nbinom_pdf(x, n, p)
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return _boost._nbinom_cdf(k, n, p)
def _logcdf(self, x, n, p):
k = floor(x)
cdf = self._cdf(k, n, p)
cond = cdf > 0.5
def f1(k, n, p):
return np.log1p(-special.betainc(k + 1, n, 1 - p))
# do calc in place
logcdf = cdf
with np.errstate(divide='ignore'):
logcdf[cond] = f1(k[cond], n[cond], p[cond])
logcdf[~cond] = np.log(cdf[~cond])
return logcdf
def _sf(self, x, n, p):
k = floor(x)
return _boost._nbinom_sf(k, n, p)
def _isf(self, x, n, p):
with np.errstate(over='ignore'): # see gh-17432
return _boost._nbinom_isf(x, n, p)
def _ppf(self, q, n, p):
with np.errstate(over='ignore'): # see gh-17432
return _boost._nbinom_ppf(q, n, p)
def _stats(self, n, p):
return (
_boost._nbinom_mean(n, p),
_boost._nbinom_variance(n, p),
_boost._nbinom_skewness(n, p),
_boost._nbinom_kurtosis_excess(n, p),
)
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
r"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is:
.. math::
f(k) = (1-p)^{k-1} p
for :math:`k \ge 1`, :math:`0 < p \leq 1`
`geom` takes :math:`p` as shape parameter,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
See Also
--------
planck
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("p", False, (0, 1), (True, True))]
def _rvs(self, p, size=None, random_state=None):
return random_state.geometric(p, size=size)
def _argcheck(self, p):
return (p <= 1) & (p > 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log1p(-q) / log1p(-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
def _entropy(self, p):
return -np.log(p) - np.log1p(-p) * (1.0-p) / p
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
r"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
`M` is the total number of objects, `n` is total number of Type I objects.
The random variate represents the number of Type I objects in `N` drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not
universally accepted. See the Examples for a clarification of the
definitions used here.
The probability mass function is defined as,
.. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}}
{\binom{M}{N}}
for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial
coefficients are defined as,
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
%(after_notes)s
Examples
--------
>>> import numpy as np
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
See Also
--------
nhypergeom, binom, nbinom
"""
def _shape_info(self):
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("N", True, (0, np.inf), (True, False))]
def _rvs(self, M, n, N, size=None, random_state=None):
return random_state.hypergeometric(n, M-n, N, size=size)
def _get_support(self, M, n, N):
return np.maximum(N-(M-n), 0), np.minimum(n, N)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
cond &= _isintegral(M) & _isintegral(n) & _isintegral(N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) -
betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) -
betaln(tot+1, 1))
return result
def _pmf(self, k, M, n, N):
return _boost._hypergeom_pdf(k, n, N, M)
def _cdf(self, k, M, n, N):
return _boost._hypergeom_cdf(k, n, N, M)
def _stats(self, M, n, N):
M, n, N = 1. * M, 1. * n, 1. * N
m = M - n
# Boost kurtosis_excess doesn't return the same as the value
# computed here.
g2 = M * (M + 1) - 6. * N * (M - N) - 6. * n * m
g2 *= (M - 1) * M * M
g2 += 6. * n * N * (M - N) * m * (5. * M - 6)
g2 /= n * N * (M - N) * m * (M - 2.) * (M - 3.)
return (
_boost._hypergeom_mean(n, N, M),
_boost._hypergeom_variance(n, N, M),
_boost._hypergeom_skewness(n, N, M),
g2,
)
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
return _boost._hypergeom_sf(k, n, N, M)
def _logsf(self, k, M, n, N):
res = []
for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5):
# Less terms to sum if we calculate log(1-cdf)
res.append(log1p(-exp(self.logcdf(quant, tot, good, draw))))
else:
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
def _logcdf(self, k, M, n, N):
res = []
for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5):
# Less terms to sum if we calculate log(1-sf)
res.append(log1p(-exp(self.logsf(quant, tot, good, draw))))
else:
# Integration over probability mass function using logsumexp
k2 = np.arange(0, quant + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
class nhypergeom_gen(rv_discrete):
r"""A negative hypergeometric discrete random variable.
Consider a box containing :math:`M` balls:, :math:`n` red and
:math:`M-n` blue. We randomly sample balls from the box, one
at a time and *without* replacement, until we have picked :math:`r`
blue balls. `nhypergeom` is the distribution of the number of
red balls :math:`k` we have picked.
%(before_notes)s
Notes
-----
The symbols used to denote the shape parameters (`M`, `n`, and `r`) are not
universally accepted. See the Examples for a clarification of the
definitions used here.
The probability mass function is defined as,
.. math:: f(k; M, n, r) = \frac{{{k+r-1}\choose{k}}{{M-r-k}\choose{n-k}}}
{{M \choose n}}
for :math:`k \in [0, n]`, :math:`n \in [0, M]`, :math:`r \in [0, M-n]`,
and the binomial coefficient is:
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
It is equivalent to observing :math:`k` successes in :math:`k+r-1`
samples with :math:`k+r`'th sample being a failure. The former
can be modelled as a hypergeometric distribution. The probability
of the latter is simply the number of failures remaining
:math:`M-n-(r-1)` divided by the size of the remaining population
:math:`M-(k+r-1)`. This relationship can be shown as:
.. math:: NHG(k;M,n,r) = HG(k;M,n,k+r-1)\frac{(M-n-(r-1))}{(M-(k+r-1))}
where :math:`NHG` is probability mass function (PMF) of the
negative hypergeometric distribution and :math:`HG` is the
PMF of the hypergeometric distribution.
%(after_notes)s
Examples
--------
>>> import numpy as np
>>> from scipy.stats import nhypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs.
Then if we want to know the probability of finding a given number
of dogs (successes) in a sample with exactly 12 animals that
aren't dogs (failures), we can initialize a frozen distribution
and plot the probability mass function:
>>> M, n, r = [20, 7, 12]
>>> rv = nhypergeom(M, n, r)
>>> x = np.arange(0, n+2)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group with given 12 failures')
>>> ax.set_ylabel('nhypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `nhypergeom`
methods directly. To for example obtain the probability mass
function, use:
>>> prb = nhypergeom.pmf(x, M, n, r)
And to generate random numbers:
>>> R = nhypergeom.rvs(M, n, r, size=10)
To verify the relationship between `hypergeom` and `nhypergeom`, use:
>>> from scipy.stats import hypergeom, nhypergeom
>>> M, n, r = 45, 13, 8
>>> k = 6
>>> nhypergeom.pmf(k, M, n, r)
0.06180776620271643
>>> hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1))
0.06180776620271644
See Also
--------
hypergeom, binom, nbinom
References
----------
.. [1] Negative Hypergeometric Distribution on Wikipedia
https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution
.. [2] Negative Hypergeometric Distribution from
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Negativehypergeometric.pdf
"""
def _shape_info(self):
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("r", True, (0, np.inf), (True, False))]
def _get_support(self, M, n, r):
return 0, n
def _argcheck(self, M, n, r):
cond = (n >= 0) & (n <= M) & (r >= 0) & (r <= M-n)
cond &= _isintegral(M) & _isintegral(n) & _isintegral(r)
return cond
def _rvs(self, M, n, r, size=None, random_state=None):
@_vectorize_rvs_over_shapes
def _rvs1(M, n, r, size, random_state):
# invert cdf by calculating all values in support, scalar M, n, r
a, b = self.support(M, n, r)
ks = np.arange(a, b+1)
cdf = self.cdf(ks, M, n, r)
ppf = interp1d(cdf, ks, kind='next', fill_value='extrapolate')
rvs = ppf(random_state.uniform(size=size)).astype(int)
if size is None:
return rvs.item()
return rvs
return _rvs1(M, n, r, size=size, random_state=random_state)
def _logpmf(self, k, M, n, r):
cond = ((r == 0) & (k == 0))
result = _lazywhere(~cond, (k, M, n, r),
lambda k, M, n, r:
(-betaln(k+1, r) + betaln(k+r, 1) -
betaln(n-k+1, M-r-n+1) + betaln(M-r-k+1, 1) +
betaln(n+1, M-n+1) - betaln(M+1, 1)),
fillvalue=0.0)
return result
def _pmf(self, k, M, n, r):
# same as the following but numerically more precise
# return comb(k+r-1, k) * comb(M-r-k, n-k) / comb(M, n)
return exp(self._logpmf(k, M, n, r))
def _stats(self, M, n, r):
# Promote the datatype to at least float
# mu = rn / (M-n+1)
M, n, r = 1.*M, 1.*n, 1.*r
mu = r*n / (M-n+1)
var = r*(M+1)*n / ((M-n+1)*(M-n+2)) * (1 - r / (M-n+1))
# The skew and kurtosis are mathematically
# intractable so return `None`. See [2]_.
g1, g2 = None, None
return mu, var, g1, g2
nhypergeom = nhypergeom_gen(name='nhypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
r"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is:
.. math::
f(k) = - \frac{p^k}{k \log(1-p)}
for :math:`k \ge 1`, :math:`0 < p < 1`
`logser` takes :math:`p` as shape parameter,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("p", False, (0, 1), (True, True))]
def _rvs(self, p, size=None, random_state=None):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return random_state.logseries(p, size=size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
# logser.pmf(k) = - p**k / (k*log(1-p))
return -np.power(p, k) * 1.0 / k / special.log1p(-p)
def _stats(self, p):
r = special.log1p(-p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
r"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is:
.. math::
f(k) = \exp(-\mu) \frac{\mu^k}{k!}
for :math:`k \ge 0`.
`poisson` takes :math:`\mu \geq 0` as shape parameter.
When :math:`\mu = 0`, the ``pmf`` method
returns ``1.0`` at quantile :math:`k = 0`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("mu", False, (0, np.inf), (True, False))]
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu, size=None, random_state=None):
return random_state.poisson(mu, size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
# poisson.pmf(k) = exp(-mu) * mu**k / k!
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
r"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is:
.. math::
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k)
for :math:`k \ge 0` and :math:`\lambda > 0`.
`planck` takes :math:`\lambda` as shape parameter. The Planck distribution
can be written as a geometric distribution (`geom`) with
:math:`p = 1 - \exp(-\lambda)` shifted by ``loc = -1``.
%(after_notes)s
See Also
--------
geom
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("lambda", False, (0, np.inf), (False, False))]
def _argcheck(self, lambda_):
return lambda_ > 0
def _pmf(self, k, lambda_):
return -expm1(-lambda_)*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return -expm1(-lambda_*(k+1))
def _sf(self, x, lambda_):
return exp(self._logsf(x, lambda_))
def _logsf(self, x, lambda_):
k = floor(x)
return -lambda_*(k+1)
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(*(self._get_support(lambda_)))
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _rvs(self, lambda_, size=None, random_state=None):
# use relation to geometric distribution for sampling
p = -expm1(-lambda_)
return random_state.geometric(p, size=size) - 1.0
def _stats(self, lambda_):
mu = 1/expm1(lambda_)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
C = -expm1(-lambda_)
return lambda_*exp(-lambda_)/C - log(C)
planck = planck_gen(a=0, name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
r"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is:
.. math::
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N))
for :math:`k = 0,..., N-1`.
`boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("lambda_", False, (0, np.inf), (False, False)),
_ShapeInfo("N", True, (0, np.inf), (False, False))]
def _argcheck(self, lambda_, N):
return (lambda_ > 0) & (N > 0) & _isintegral(N)
def _get_support(self, lambda_, N):
return self.a, N - 1
def _pmf(self, k, lambda_, N):
# boltzmann.pmf(k) =
# (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann', a=0,
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
r"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is:
.. math::
f(k) = \frac{1}{\texttt{high} - \texttt{low}}
for :math:`k \in \{\texttt{low}, \dots, \texttt{high} - 1\}`.
`randint` takes :math:`\texttt{low}` and :math:`\texttt{high}` as shape
parameters.
%(after_notes)s
Examples
--------
>>> import numpy as np
>>> from scipy.stats import randint
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
>>> low, high = 7, 31
>>> mean, var, skew, kurt = randint.stats(low, high, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(low - 5, high + 5)
>>> ax.plot(x, randint.pmf(x, low, high), 'bo', ms=8, label='randint pmf')
>>> ax.vlines(x, 0, randint.pmf(x, low, high), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function) to
fix the shape and location. This returns a "frozen" RV object holding the
given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = randint(low, high)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-',
... lw=1, label='frozen pmf')
>>> ax.legend(loc='lower center')
>>> plt.show()
Check the relationship between the cumulative distribution function
(``cdf``) and its inverse, the percent point function (``ppf``):
>>> q = np.arange(low, high)
>>> p = randint.cdf(q, low, high)
>>> np.allclose(q, randint.ppf(p, low, high))
True
Generate random numbers:
>>> r = randint.rvs(low, high, size=1000)
"""
def _shape_info(self):
return [_ShapeInfo("low", True, (-np.inf, np.inf), (False, False)),
_ShapeInfo("high", True, (-np.inf, np.inf), (False, False))]
def _argcheck(self, low, high):
return (high > low) & _isintegral(low) & _isintegral(high)
def _get_support(self, low, high):
return low, high-1
def _pmf(self, k, low, high):
# randint.pmf(k) = 1./(high - low)
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high, size=None, random_state=None):
"""An array of *size* random integers >= ``low`` and < ``high``."""
if np.asarray(low).size == 1 and np.asarray(high).size == 1:
# no need to vectorize in that case
return rng_integers(random_state, low, high, size=size)
if size is not None:
# NumPy's RandomState.randint() doesn't broadcast its arguments.
# Use `broadcast_to()` to extend the shapes of low and high
# up to size. Then we can use the numpy.vectorize'd
# randint without needing to pass it a `size` argument.
low = np.broadcast_to(low, size)
high = np.broadcast_to(high, size)
randint = np.vectorize(partial(rng_integers, random_state),
otypes=[np.int_])
return randint(low, high)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
r"""A Zipf (Zeta) discrete random variable.
%(before_notes)s
See Also
--------
zipfian
Notes
-----
The probability mass function for `zipf` is:
.. math::
f(k, a) = \frac{1}{\zeta(a) k^a}
for :math:`k \ge 1`, :math:`a > 1`.
`zipf` takes :math:`a > 1` as shape parameter. :math:`\zeta` is the
Riemann zeta function (`scipy.special.zeta`)
The Zipf distribution is also known as the zeta distribution, which is
a special case of the Zipfian distribution (`zipfian`).
%(after_notes)s
References
----------
.. [1] "Zeta Distribution", Wikipedia,
https://en.wikipedia.org/wiki/Zeta_distribution
%(example)s
Confirm that `zipf` is the large `n` limit of `zipfian`.
>>> import numpy as np
>>> from scipy.stats import zipfian
>>> k = np.arange(11)
>>> np.allclose(zipf.pmf(k, a), zipfian.pmf(k, a, n=10000000))
True
"""
def _shape_info(self):
return [_ShapeInfo("a", False, (1, np.inf), (False, False))]
def _rvs(self, a, size=None, random_state=None):
return random_state.zipf(a, size=size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
# zipf.pmf(k, a) = 1/(zeta(a) * k**a)
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
def _gen_harmonic_gt1(n, a):
"""Generalized harmonic number, a > 1"""
# See https://en.wikipedia.org/wiki/Harmonic_number; search for "hurwitz"
return zeta(a, 1) - zeta(a, n+1)
def _gen_harmonic_leq1(n, a):
"""Generalized harmonic number, a <= 1"""
if not np.size(n):
return n
n_max = np.max(n) # loop starts at maximum of all n
out = np.zeros_like(a, dtype=float)
# add terms of harmonic series; starting from smallest to avoid roundoff
for i in np.arange(n_max, 0, -1, dtype=float):
mask = i <= n # don't add terms after nth
out[mask] += 1/i**a[mask]
return out
def _gen_harmonic(n, a):
"""Generalized harmonic number"""
n, a = np.broadcast_arrays(n, a)
return _lazywhere(a > 1, (n, a),
f=_gen_harmonic_gt1, f2=_gen_harmonic_leq1)
class zipfian_gen(rv_discrete):
r"""A Zipfian discrete random variable.
%(before_notes)s
See Also
--------
zipf
Notes
-----
The probability mass function for `zipfian` is:
.. math::
f(k, a, n) = \frac{1}{H_{n,a} k^a}
for :math:`k \in \{1, 2, \dots, n-1, n\}`, :math:`a \ge 0`,
:math:`n \in \{1, 2, 3, \dots\}`.
`zipfian` takes :math:`a` and :math:`n` as shape parameters.
:math:`H_{n,a}` is the :math:`n`:sup:`th` generalized harmonic
number of order :math:`a`.
The Zipfian distribution reduces to the Zipf (zeta) distribution as
:math:`n \rightarrow \infty`.
%(after_notes)s
References
----------
.. [1] "Zipf's Law", Wikipedia, https://en.wikipedia.org/wiki/Zipf's_law
.. [2] Larry Leemis, "Zipf Distribution", Univariate Distribution
Relationships. http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
%(example)s
Confirm that `zipfian` reduces to `zipf` for large `n`, `a > 1`.
>>> import numpy as np
>>> from scipy.stats import zipf
>>> k = np.arange(11)
>>> np.allclose(zipfian.pmf(k, a=3.5, n=10000000), zipf.pmf(k, a=3.5))
True
"""
def _shape_info(self):
return [_ShapeInfo("a", False, (0, np.inf), (True, False)),
_ShapeInfo("n", True, (0, np.inf), (False, False))]
def _argcheck(self, a, n):
# we need np.asarray here because moment (maybe others) don't convert
return (a >= 0) & (n > 0) & (n == np.asarray(n, dtype=int))
def _get_support(self, a, n):
return 1, n
def _pmf(self, k, a, n):
return 1.0 / _gen_harmonic(n, a) / k**a
def _cdf(self, k, a, n):
return _gen_harmonic(k, a) / _gen_harmonic(n, a)
def _sf(self, k, a, n):
k = k + 1 # # to match SciPy convention
# see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
return ((k**a*(_gen_harmonic(n, a) - _gen_harmonic(k, a)) + 1)
/ (k**a*_gen_harmonic(n, a)))
def _stats(self, a, n):
# see # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
Hna = _gen_harmonic(n, a)
Hna1 = _gen_harmonic(n, a-1)
Hna2 = _gen_harmonic(n, a-2)
Hna3 = _gen_harmonic(n, a-3)
Hna4 = _gen_harmonic(n, a-4)
mu1 = Hna1/Hna
mu2n = (Hna2*Hna - Hna1**2)
mu2d = Hna**2
mu2 = mu2n / mu2d
g1 = (Hna3/Hna - 3*Hna1*Hna2/Hna**2 + 2*Hna1**3/Hna**3)/mu2**(3/2)
g2 = (Hna**3*Hna4 - 4*Hna**2*Hna1*Hna3 + 6*Hna*Hna1**2*Hna2
- 3*Hna1**4) / mu2n**2
g2 -= 3
return mu1, mu2, g1, g2
zipfian = zipfian_gen(a=1, name='zipfian', longname='A Zipfian')
class dlaplace_gen(rv_discrete):
r"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is:
.. math::
f(k) = \tanh(a/2) \exp(-a |k|)
for integers :math:`k` and :math:`a > 0`.
`dlaplace` takes :math:`a` as shape parameter.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
def _pmf(self, k, a):
# dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
def f(k, a):
return 1.0 - exp(-a * k) / (exp(a) + 1)
def f2(k, a):
return exp(a * (k + 1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)),
log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
def _rvs(self, a, size=None, random_state=None):
# The discrete Laplace is equivalent to the two-sided geometric
# distribution with PMF:
# f(k) = (1 - alpha)/(1 + alpha) * alpha^abs(k)
# Reference:
# https://www.sciencedirect.com/science/
# article/abs/pii/S0378375804003519
# Furthermore, the two-sided geometric distribution is
# equivalent to the difference between two iid geometric
# distributions.
# Reference (page 179):
# https://pdfs.semanticscholar.org/61b3/
# b99f466815808fd0d03f5d2791eea8b541a1.pdf
# Thus, we can leverage the following:
# 1) alpha = e^-a
# 2) probability_of_success = 1 - alpha (Bernoulli trial)
probOfSuccess = -np.expm1(-np.asarray(a))
x = random_state.geometric(probOfSuccess, size=size)
y = random_state.geometric(probOfSuccess, size=size)
return x - y
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
r"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with
expected values :math:`\lambda_1` and :math:`\lambda_2`. Then,
:math:`k_1 - k_2` follows a Skellam distribution with parameters
:math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and
:math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where
:math:`\rho` is the correlation coefficient between :math:`k_1` and
:math:`k_2`. If the two Poisson-distributed r.v. are independent then
:math:`\rho = 0`.
Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive.
For details see: https://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("mu1", False, (0, np.inf), (False, False)),
_ShapeInfo("mu2", False, (0, np.inf), (False, False))]
def _rvs(self, mu1, mu2, size=None, random_state=None):
n = size
return (random_state.poisson(mu1, n) -
random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
with np.errstate(over='ignore'): # see gh-17432
px = np.where(x < 0,
_boost._ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_boost._ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
with np.errstate(over='ignore'): # see gh-17432
px = np.where(x < 0,
_boost._ncx2_cdf(2*mu2, -2*x, 2*mu1),
1 - _boost._ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
class yulesimon_gen(rv_discrete):
r"""A Yule-Simon discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for the `yulesimon` is:
.. math::
f(k) = \alpha B(k, \alpha+1)
for :math:`k=1,2,3,...`, where :math:`\alpha>0`.
Here :math:`B` refers to the `scipy.special.beta` function.
The sampling of random variates is based on pg 553, Section 6.3 of [1]_.
Our notation maps to the referenced logic via :math:`\alpha=a-1`.
For details see the wikipedia entry [2]_.
References
----------
.. [1] Devroye, Luc. "Non-uniform Random Variate Generation",
(1986) Springer, New York.
.. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("alpha", False, (0, np.inf), (False, False))]
def _rvs(self, alpha, size=None, random_state=None):
E1 = random_state.standard_exponential(size)
E2 = random_state.standard_exponential(size)
ans = ceil(-E1 / log1p(-exp(-E2 / alpha)))
return ans
def _pmf(self, x, alpha):
return alpha * special.beta(x, alpha + 1)
def _argcheck(self, alpha):
return (alpha > 0)
def _logpmf(self, x, alpha):
return log(alpha) + special.betaln(x, alpha + 1)
def _cdf(self, x, alpha):
return 1 - x * special.beta(x, alpha + 1)
def _sf(self, x, alpha):
return x * special.beta(x, alpha + 1)
def _logsf(self, x, alpha):
return log(x) + special.betaln(x, alpha + 1)
def _stats(self, alpha):
mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1))
mu2 = np.where(alpha > 2,
alpha**2 / ((alpha - 2.0) * (alpha - 1)**2),
np.inf)
mu2 = np.where(alpha <= 1, np.nan, mu2)
g1 = np.where(alpha > 3,
sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)),
np.inf)
g1 = np.where(alpha <= 2, np.nan, g1)
g2 = np.where(alpha > 4,
alpha + 3 + ((alpha**3 - 49 * alpha - 22) /
(alpha * (alpha - 4) * (alpha - 3))),
np.inf)
g2 = np.where(alpha <= 2, np.nan, g2)
return mu, mu2, g1, g2
yulesimon = yulesimon_gen(name='yulesimon', a=1)
def _vectorize_rvs_over_shapes(_rvs1):
"""Decorator that vectorizes _rvs method to work on ndarray shapes"""
# _rvs1 must be a _function_ that accepts _scalar_ args as positional
# arguments, `size` and `random_state` as keyword arguments.
# _rvs1 must return a random variate array with shape `size`. If `size` is
# None, _rvs1 must return a scalar.
# When applied to _rvs1, this decorator broadcasts ndarray args
# and loops over them, calling _rvs1 for each set of scalar args.
# For usage example, see _nchypergeom_gen
def _rvs(*args, size, random_state):
_rvs1_size, _rvs1_indices = _check_shape(args[0].shape, size)
size = np.array(size)
_rvs1_size = np.array(_rvs1_size)
_rvs1_indices = np.array(_rvs1_indices)
if np.all(_rvs1_indices): # all args are scalars
return _rvs1(*args, size, random_state)
out = np.empty(size)
# out.shape can mix dimensions associated with arg_shape and _rvs1_size
# Sort them to arg_shape + _rvs1_size for easy indexing of dimensions
# corresponding with the different sets of scalar args
j0 = np.arange(out.ndim)
j1 = np.hstack((j0[~_rvs1_indices], j0[_rvs1_indices]))
out = np.moveaxis(out, j1, j0)
for i in np.ndindex(*size[~_rvs1_indices]):
# arg can be squeezed because singleton dimensions will be
# associated with _rvs1_size, not arg_shape per _check_shape
out[i] = _rvs1(*[np.squeeze(arg)[i] for arg in args],
_rvs1_size, random_state)
return np.moveaxis(out, j0, j1) # move axes back before returning
return _rvs
class _nchypergeom_gen(rv_discrete):
r"""A noncentral hypergeometric discrete random variable.
For subclassing by nchypergeom_fisher_gen and nchypergeom_wallenius_gen.
"""
rvs_name = None
dist = None
def _shape_info(self):
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("N", True, (0, np.inf), (True, False)),
_ShapeInfo("odds", False, (0, np.inf), (False, False))]
def _get_support(self, M, n, N, odds):
N, m1, n = M, n, N # follow Wikipedia notation
m2 = N - m1
x_min = np.maximum(0, n - m2)
x_max = np.minimum(n, m1)
return x_min, x_max
def _argcheck(self, M, n, N, odds):
M, n = np.asarray(M), np.asarray(n),
N, odds = np.asarray(N), np.asarray(odds)
cond1 = (M.astype(int) == M) & (M >= 0)
cond2 = (n.astype(int) == n) & (n >= 0)
cond3 = (N.astype(int) == N) & (N >= 0)
cond4 = odds > 0
cond5 = N <= M
cond6 = n <= M
return cond1 & cond2 & cond3 & cond4 & cond5 & cond6
def _rvs(self, M, n, N, odds, size=None, random_state=None):
@_vectorize_rvs_over_shapes
def _rvs1(M, n, N, odds, size, random_state):
length = np.prod(size)
urn = _PyStochasticLib3()
rv_gen = getattr(urn, self.rvs_name)
rvs = rv_gen(N, n, M, odds, length, random_state)
rvs = rvs.reshape(size)
return rvs
return _rvs1(M, n, N, odds, size=size, random_state=random_state)
def _pmf(self, x, M, n, N, odds):
x, M, n, N, odds = np.broadcast_arrays(x, M, n, N, odds)
if x.size == 0: # np.vectorize doesn't work with zero size input
return np.empty_like(x)
@np.vectorize
def _pmf1(x, M, n, N, odds):
urn = self.dist(N, n, M, odds, 1e-12)
return urn.probability(x)
return _pmf1(x, M, n, N, odds)
def _stats(self, M, n, N, odds, moments):
@np.vectorize
def _moments1(M, n, N, odds):
urn = self.dist(N, n, M, odds, 1e-12)
return urn.moments()
m, v = (_moments1(M, n, N, odds) if ("m" in moments or "v" in moments)
else (None, None))
s, k = None, None
return m, v, s, k
class nchypergeom_fisher_gen(_nchypergeom_gen):
r"""A Fisher's noncentral hypergeometric discrete random variable.
Fisher's noncentral hypergeometric distribution models drawing objects of
two types from a bin. `M` is the total number of objects, `n` is the
number of Type I objects, and `odds` is the odds ratio: the odds of
selecting a Type I object rather than a Type II object when there is only
one object of each type.
The random variate represents the number of Type I objects drawn if we
take a handful of objects from the bin at once and find out afterwards
that we took `N` objects.
%(before_notes)s
See Also
--------
nchypergeom_wallenius, hypergeom, nhypergeom
Notes
-----
Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
with parameters `N`, `n`, and `M` (respectively) as defined above.
The probability mass function is defined as
.. math::
p(x; M, n, N, \omega) =
\frac{\binom{n}{x}\binom{M - n}{N-x}\omega^x}{P_0},
for
:math:`x \in [x_l, x_u]`,
:math:`M \in {\mathbb N}`,
:math:`n \in [0, M]`,
:math:`N \in [0, M]`,
:math:`\omega > 0`,
where
:math:`x_l = \max(0, N - (M - n))`,
:math:`x_u = \min(N, n)`,
.. math::
P_0 = \sum_{y=x_l}^{x_u} \binom{n}{y}\binom{M - n}{N-y}\omega^y,
and the binomial coefficients are defined as
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
`nchypergeom_fisher` uses the BiasedUrn package by Agner Fog with
permission for it to be distributed under SciPy's license.
The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
universally accepted; they are chosen for consistency with `hypergeom`.
Note that Fisher's noncentral hypergeometric distribution is distinct
from Wallenius' noncentral hypergeometric distribution, which models
drawing a pre-determined `N` objects from a bin one by one.
When the odds ratio is unity, however, both distributions reduce to the
ordinary hypergeometric distribution.
%(after_notes)s
References
----------
.. [1] Agner Fog, "Biased Urn Theory".
https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
.. [2] "Fisher's noncentral hypergeometric distribution", Wikipedia,
https://en.wikipedia.org/wiki/Fisher's_noncentral_hypergeometric_distribution
%(example)s
"""
rvs_name = "rvs_fisher"
dist = _PyFishersNCHypergeometric
nchypergeom_fisher = nchypergeom_fisher_gen(
name='nchypergeom_fisher',
longname="A Fisher's noncentral hypergeometric")
class nchypergeom_wallenius_gen(_nchypergeom_gen):
r"""A Wallenius' noncentral hypergeometric discrete random variable.
Wallenius' noncentral hypergeometric distribution models drawing objects of
two types from a bin. `M` is the total number of objects, `n` is the
number of Type I objects, and `odds` is the odds ratio: the odds of
selecting a Type I object rather than a Type II object when there is only
one object of each type.
The random variate represents the number of Type I objects drawn if we
draw a pre-determined `N` objects from a bin one by one.
%(before_notes)s
See Also
--------
nchypergeom_fisher, hypergeom, nhypergeom
Notes
-----
Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
with parameters `N`, `n`, and `M` (respectively) as defined above.
The probability mass function is defined as
.. math::
p(x; N, n, M) = \binom{n}{x} \binom{M - n}{N-x}
\int_0^1 \left(1-t^{\omega/D}\right)^x\left(1-t^{1/D}\right)^{N-x} dt
for
:math:`x \in [x_l, x_u]`,
:math:`M \in {\mathbb N}`,
:math:`n \in [0, M]`,
:math:`N \in [0, M]`,
:math:`\omega > 0`,
where
:math:`x_l = \max(0, N - (M - n))`,
:math:`x_u = \min(N, n)`,
.. math::
D = \omega(n - x) + ((M - n)-(N-x)),
and the binomial coefficients are defined as
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
`nchypergeom_wallenius` uses the BiasedUrn package by Agner Fog with
permission for it to be distributed under SciPy's license.
The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
universally accepted; they are chosen for consistency with `hypergeom`.
Note that Wallenius' noncentral hypergeometric distribution is distinct
from Fisher's noncentral hypergeometric distribution, which models
take a handful of objects from the bin at once, finding out afterwards
that `N` objects were taken.
When the odds ratio is unity, however, both distributions reduce to the
ordinary hypergeometric distribution.
%(after_notes)s
References
----------
.. [1] Agner Fog, "Biased Urn Theory".
https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
.. [2] "Wallenius' noncentral hypergeometric distribution", Wikipedia,
https://en.wikipedia.org/wiki/Wallenius'_noncentral_hypergeometric_distribution
%(example)s
"""
rvs_name = "rvs_wallenius"
dist = _PyWalleniusNCHypergeometric
nchypergeom_wallenius = nchypergeom_wallenius_gen(
name='nchypergeom_wallenius',
longname="A Wallenius' noncentral hypergeometric")
# Collect names of classes and objects in this module.
pairs = list(globals().copy().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| 55,962
| 29.136241
| 90
|
py
|
scipy
|
scipy-main/scipy/stats/contingency.py
|
"""
Contingency table functions (:mod:`scipy.stats.contingency`)
============================================================
Functions for creating and analyzing contingency tables.
.. currentmodule:: scipy.stats.contingency
.. autosummary::
:toctree: generated/
chi2_contingency
relative_risk
odds_ratio
crosstab
association
expected_freq
margins
"""
from functools import reduce
import math
import numpy as np
from ._stats_py import power_divergence
from ._relative_risk import relative_risk
from ._crosstab import crosstab
from ._odds_ratio import odds_ratio
from scipy._lib._bunch import _make_tuple_bunch
__all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab',
'association', 'relative_risk', 'odds_ratio']
def margins(a):
"""Return a list of the marginal sums of the array `a`.
Parameters
----------
a : ndarray
The array for which to compute the marginal sums.
Returns
-------
margsums : list of ndarrays
A list of length `a.ndim`. `margsums[k]` is the result
of summing `a` over all axes except `k`; it has the same
number of dimensions as `a`, but the length of each axis
except axis `k` will be 1.
Examples
--------
>>> import numpy as np
>>> from scipy.stats.contingency import margins
>>> a = np.arange(12).reshape(2, 6)
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11]])
>>> m0, m1 = margins(a)
>>> m0
array([[15],
[51]])
>>> m1
array([[ 6, 8, 10, 12, 14, 16]])
>>> b = np.arange(24).reshape(2,3,4)
>>> m0, m1, m2 = margins(b)
>>> m0
array([[[ 66]],
[[210]]])
>>> m1
array([[[ 60],
[ 92],
[124]]])
>>> m2
array([[[60, 66, 72, 78]]])
"""
margsums = []
ranged = list(range(a.ndim))
for k in ranged:
marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
margsums.append(marg)
return margsums
def expected_freq(observed):
"""
Compute the expected frequencies from a contingency table.
Given an n-dimensional contingency table of observed frequencies,
compute the expected frequencies for the table based on the marginal
sums under the assumption that the groups associated with each
dimension are independent.
Parameters
----------
observed : array_like
The table of observed frequencies. (While this function can handle
a 1-D array, that case is trivial. Generally `observed` is at
least 2-D.)
Returns
-------
expected : ndarray of float64
The expected frequencies, based on the marginal sums of the table.
Same shape as `observed`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats.contingency import expected_freq
>>> observed = np.array([[10, 10, 20],[20, 20, 20]])
>>> expected_freq(observed)
array([[ 12., 12., 16.],
[ 18., 18., 24.]])
"""
# Typically `observed` is an integer array. If `observed` has a large
# number of dimensions or holds large values, some of the following
# computations may overflow, so we first switch to floating point.
observed = np.asarray(observed, dtype=np.float64)
# Create a list of the marginal sums.
margsums = margins(observed)
# Create the array of expected frequencies. The shapes of the
# marginal sums returned by apply_over_axes() are just what we
# need for broadcasting in the following product.
d = observed.ndim
expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
return expected
Chi2ContingencyResult = _make_tuple_bunch(
'Chi2ContingencyResult',
['statistic', 'pvalue', 'dof', 'expected_freq'], []
)
def chi2_contingency(observed, correction=True, lambda_=None):
"""Chi-square test of independence of variables in a contingency table.
This function computes the chi-square statistic and p-value for the
hypothesis test of independence of the observed frequencies in the
contingency table [1]_ `observed`. The expected frequencies are computed
based on the marginal sums under the assumption of independence; see
`scipy.stats.contingency.expected_freq`. The number of degrees of
freedom is (expressed using numpy functions and attributes)::
dof = observed.size - sum(observed.shape) + observed.ndim - 1
Parameters
----------
observed : array_like
The contingency table. The table contains the observed frequencies
(i.e. number of occurrences) in each category. In the two-dimensional
case, the table is often described as an "R x C table".
correction : bool, optional
If True, *and* the degrees of freedom is 1, apply Yates' correction
for continuity. The effect of the correction is to adjust each
observed value by 0.5 towards the corresponding expected value.
lambda_ : float or str, optional
By default, the statistic computed in this test is Pearson's
chi-squared statistic [2]_. `lambda_` allows a statistic from the
Cressie-Read power divergence family [3]_ to be used instead. See
`scipy.stats.power_divergence` for details.
Returns
-------
res : Chi2ContingencyResult
An object containing attributes:
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
dof : int
The degrees of freedom.
expected_freq : ndarray, same shape as `observed`
The expected frequencies, based on the marginal sums of the table.
See Also
--------
scipy.stats.contingency.expected_freq
scipy.stats.fisher_exact
scipy.stats.chisquare
scipy.stats.power_divergence
scipy.stats.barnard_exact
scipy.stats.boschloo_exact
Notes
-----
An often quoted guideline for the validity of this calculation is that
the test should be used only if the observed and expected frequencies
in each cell are at least 5.
This is a test for the independence of different categories of a
population. The test is only meaningful when the dimension of
`observed` is two or more. Applying the test to a one-dimensional
table will always result in `expected` equal to `observed` and a
chi-square statistic equal to 0.
This function does not handle masked arrays, because the calculation
does not make sense with missing values.
Like `scipy.stats.chisquare`, this function computes a chi-square
statistic; the convenience this function provides is to figure out the
expected frequencies and degrees of freedom from the given contingency
table. If these were already known, and if the Yates' correction was not
required, one could use `scipy.stats.chisquare`. That is, if one calls::
res = chi2_contingency(obs, correction=False)
then the following is true::
(res.statistic, res.pvalue) == stats.chisquare(obs.ravel(),
f_exp=ex.ravel(),
ddof=obs.size - 1 - dof)
The `lambda_` argument was added in version 0.13.0 of scipy.
References
----------
.. [1] "Contingency table",
https://en.wikipedia.org/wiki/Contingency_table
.. [2] "Pearson's chi-squared test",
https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
.. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
.. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of
Cardiovascular Events in Women and Men: A Sex-Specific
Meta-analysis of Randomized Controlled Trials."
JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006.
Examples
--------
In [4]_, the use of aspirin to prevent cardiovascular events in women
and men was investigated. The study notably concluded:
...aspirin therapy reduced the risk of a composite of
cardiovascular events due to its effect on reducing the risk of
ischemic stroke in women [...]
The article lists studies of various cardiovascular events. Let's
focus on the ischemic stoke in women.
The following table summarizes the results of the experiment in which
participants took aspirin or a placebo on a regular basis for several
years. Cases of ischemic stroke were recorded::
Aspirin Control/Placebo
Ischemic stroke 176 230
No stroke 21035 21018
Is there evidence that the aspirin reduces the risk of ischemic stroke?
We begin by formulating a null hypothesis :math:`H_0`:
The effect of aspirin is equivalent to that of placebo.
Let's assess the plausibility of this hypothesis with
a chi-square test.
>>> import numpy as np
>>> from scipy.stats import chi2_contingency
>>> table = np.array([[176, 230], [21035, 21018]])
>>> res = chi2_contingency(table)
>>> res.statistic
6.892569132546561
>>> res.pvalue
0.008655478161175739
Using a significance level of 5%, we would reject the null hypothesis in
favor of the alternative hypothesis: "the effect of aspirin
is not equivalent to the effect of placebo".
Because `scipy.stats.contingency.chi2_contingency` performs a two-sided
test, the alternative hypothesis does not indicate the direction of the
effect. We can use `stats.contingency.odds_ratio` to support the
conclusion that aspirin *reduces* the risk of ischemic stroke.
Below are further examples showing how larger contingency tables can be
tested.
A two-way example (2 x 3):
>>> obs = np.array([[10, 10, 20], [20, 20, 20]])
>>> res = chi2_contingency(obs)
>>> res.statistic
2.7777777777777777
>>> res.pvalue
0.24935220877729619
>>> res.dof
2
>>> res.expected_freq
array([[ 12., 12., 16.],
[ 18., 18., 24.]])
Perform the test using the log-likelihood ratio (i.e. the "G-test")
instead of Pearson's chi-squared statistic.
>>> res = chi2_contingency(obs, lambda_="log-likelihood")
>>> res.statistic
2.7688587616781319
>>> res.pvalue
0.25046668010954165
A four-way example (2 x 2 x 2 x 2):
>>> obs = np.array(
... [[[[12, 17],
... [11, 16]],
... [[11, 12],
... [15, 16]]],
... [[[23, 15],
... [30, 22]],
... [[14, 17],
... [15, 16]]]])
>>> res = chi2_contingency(obs)
>>> res.statistic
8.7584514426741897
>>> res.pvalue
0.64417725029295503
"""
observed = np.asarray(observed)
if np.any(observed < 0):
raise ValueError("All values in `observed` must be nonnegative.")
if observed.size == 0:
raise ValueError("No data; `observed` has size 0.")
expected = expected_freq(observed)
if np.any(expected == 0):
# Include one of the positions where expected is zero in
# the exception message.
zeropos = list(zip(*np.nonzero(expected == 0)))[0]
raise ValueError("The internally computed table of expected "
"frequencies has a zero element at {}.".format(zeropos))
# The degrees of freedom
dof = expected.size - sum(expected.shape) + expected.ndim - 1
if dof == 0:
# Degenerate case; this occurs when `observed` is 1D (or, more
# generally, when it has only one nontrivial dimension). In this
# case, we also have observed == expected, so chi2 is 0.
chi2 = 0.0
p = 1.0
else:
if dof == 1 and correction:
# Adjust `observed` according to Yates' correction for continuity.
# Magnitude of correction no bigger than difference; see gh-13875
diff = expected - observed
direction = np.sign(diff)
magnitude = np.minimum(0.5, np.abs(diff))
observed = observed + magnitude * direction
chi2, p = power_divergence(observed, expected,
ddof=observed.size - 1 - dof, axis=None,
lambda_=lambda_)
return Chi2ContingencyResult(chi2, p, dof, expected)
def association(observed, method="cramer", correction=False, lambda_=None):
"""Calculates degree of association between two nominal variables.
The function provides the option for computing one of three measures of
association between two nominal variables from the data given in a 2d
contingency table: Tschuprow's T, Pearson's Contingency Coefficient
and Cramer's V.
Parameters
----------
observed : array-like
The array of observed values
method : {"cramer", "tschuprow", "pearson"} (default = "cramer")
The association test statistic.
correction : bool, optional
Inherited from `scipy.stats.contingency.chi2_contingency()`
lambda_ : float or str, optional
Inherited from `scipy.stats.contingency.chi2_contingency()`
Returns
-------
statistic : float
Value of the test statistic
Notes
-----
Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all
measure the degree to which two nominal or ordinal variables are related,
or the level of their association. This differs from correlation, although
many often mistakenly consider them equivalent. Correlation measures in
what way two variables are related, whereas, association measures how
related the variables are. As such, association does not subsume
independent variables, and is rather a test of independence. A value of
1.0 indicates perfect association, and 0.0 means the variables have no
association.
Both the Cramer's V and Tschuprow's T are extensions of the phi
coefficient. Moreover, due to the close relationship between the
Cramer's V and Tschuprow's T the returned values can often be similar
or even equivalent. They are likely to diverge more as the array shape
diverges from a 2x2.
References
----------
.. [1] "Tschuprow's T",
https://en.wikipedia.org/wiki/Tschuprow's_T
.. [2] Tschuprow, A. A. (1939)
Principles of the Mathematical Theory of Correlation;
translated by M. Kantorowitsch. W. Hodge & Co.
.. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V
.. [4] "Nominal Association: Phi and Cramer's V",
http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html
.. [5] Gingrich, Paul, "Association Between Variables",
http://uregina.ca/~gingrich/ch11a.pdf
Examples
--------
An example with a 4x2 contingency table:
>>> import numpy as np
>>> from scipy.stats.contingency import association
>>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]])
Pearson's contingency coefficient
>>> association(obs4x2, method="pearson")
0.18303298140595667
Cramer's V
>>> association(obs4x2, method="cramer")
0.18617813077483678
Tschuprow's T
>>> association(obs4x2, method="tschuprow")
0.14146478765062995
"""
arr = np.asarray(observed)
if not np.issubdtype(arr.dtype, np.integer):
raise ValueError("`observed` must be an integer array.")
if len(arr.shape) != 2:
raise ValueError("method only accepts 2d arrays")
chi2_stat = chi2_contingency(arr, correction=correction,
lambda_=lambda_)
phi2 = chi2_stat.statistic / arr.sum()
n_rows, n_cols = arr.shape
if method == "cramer":
value = phi2 / min(n_cols - 1, n_rows - 1)
elif method == "tschuprow":
value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1))
elif method == 'pearson':
value = phi2 / (1 + phi2)
else:
raise ValueError("Invalid argument value: 'method' argument must "
"be 'cramer', 'tschuprow', or 'pearson'")
return math.sqrt(value)
| 16,283
| 33.720682
| 86
|
py
|
scipy
|
scipy-main/scipy/stats/_resampling.py
|
from __future__ import annotations
import warnings
import numpy as np
from itertools import combinations, permutations, product
from collections.abc import Sequence
import inspect
from scipy._lib._util import check_random_state, _rename_parameter
from scipy.special import ndtr, ndtri, comb, factorial
from scipy._lib._util import rng_integers
from dataclasses import dataclass
from ._common import ConfidenceInterval
from ._axis_nan_policy import _broadcast_concatenate, _broadcast_arrays
from ._warnings_errors import DegenerateDataWarning
__all__ = ['bootstrap', 'monte_carlo_test', 'permutation_test']
def _vectorize_statistic(statistic):
"""Vectorize an n-sample statistic"""
# This is a little cleaner than np.nditer at the expense of some data
# copying: concatenate samples together, then use np.apply_along_axis
def stat_nd(*data, axis=0):
lengths = [sample.shape[axis] for sample in data]
split_indices = np.cumsum(lengths)[:-1]
z = _broadcast_concatenate(data, axis)
# move working axis to position 0 so that new dimensions in the output
# of `statistic` are _prepended_. ("This axis is removed, and replaced
# with new dimensions...")
z = np.moveaxis(z, axis, 0)
def stat_1d(z):
data = np.split(z, split_indices)
return statistic(*data)
return np.apply_along_axis(stat_1d, 0, z)[()]
return stat_nd
def _jackknife_resample(sample, batch=None):
"""Jackknife resample the sample. Only one-sample stats for now."""
n = sample.shape[-1]
batch_nominal = batch or n
for k in range(0, n, batch_nominal):
# col_start:col_end are the observations to remove
batch_actual = min(batch_nominal, n-k)
# jackknife - each row leaves out one observation
j = np.ones((batch_actual, n), dtype=bool)
np.fill_diagonal(j[:, k:k+batch_actual], False)
i = np.arange(n)
i = np.broadcast_to(i, (batch_actual, n))
i = i[j].reshape((batch_actual, n-1))
resamples = sample[..., i]
yield resamples
def _bootstrap_resample(sample, n_resamples=None, random_state=None):
"""Bootstrap resample the sample."""
n = sample.shape[-1]
# bootstrap - each row is a random resample of original observations
i = rng_integers(random_state, 0, n, (n_resamples, n))
resamples = sample[..., i]
return resamples
def _percentile_of_score(a, score, axis):
"""Vectorized, simplified `scipy.stats.percentileofscore`.
Uses logic of the 'mean' value of percentileofscore's kind parameter.
Unlike `stats.percentileofscore`, the percentile returned is a fraction
in [0, 1].
"""
B = a.shape[axis]
return ((a < score).sum(axis=axis) + (a <= score).sum(axis=axis)) / (2 * B)
def _percentile_along_axis(theta_hat_b, alpha):
"""`np.percentile` with different percentile for each slice."""
# the difference between _percentile_along_axis and np.percentile is that
# np.percentile gets _all_ the qs for each axis slice, whereas
# _percentile_along_axis gets the q corresponding with each axis slice
shape = theta_hat_b.shape[:-1]
alpha = np.broadcast_to(alpha, shape)
percentiles = np.zeros_like(alpha, dtype=np.float64)
for indices, alpha_i in np.ndenumerate(alpha):
if np.isnan(alpha_i):
# e.g. when bootstrap distribution has only one unique element
msg = (
"The BCa confidence interval cannot be calculated."
" This problem is known to occur when the distribution"
" is degenerate or the statistic is np.min."
)
warnings.warn(DegenerateDataWarning(msg))
percentiles[indices] = np.nan
else:
theta_hat_b_i = theta_hat_b[indices]
percentiles[indices] = np.percentile(theta_hat_b_i, alpha_i)
return percentiles[()] # return scalar instead of 0d array
def _bca_interval(data, statistic, axis, alpha, theta_hat_b, batch):
"""Bias-corrected and accelerated interval."""
# closely follows [1] 14.3 and 15.4 (Eq. 15.36)
# calculate z0_hat
theta_hat = np.asarray(statistic(*data, axis=axis))[..., None]
percentile = _percentile_of_score(theta_hat_b, theta_hat, axis=-1)
z0_hat = ndtri(percentile)
# calculate a_hat
theta_hat_ji = [] # j is for sample of data, i is for jackknife resample
for j, sample in enumerate(data):
# _jackknife_resample will add an axis prior to the last axis that
# corresponds with the different jackknife resamples. Do the same for
# each sample of the data to ensure broadcastability. We need to
# create a copy of the list containing the samples anyway, so do this
# in the loop to simplify the code. This is not the bottleneck...
samples = [np.expand_dims(sample, -2) for sample in data]
theta_hat_i = []
for jackknife_sample in _jackknife_resample(sample, batch):
samples[j] = jackknife_sample
broadcasted = _broadcast_arrays(samples, axis=-1)
theta_hat_i.append(statistic(*broadcasted, axis=-1))
theta_hat_ji.append(theta_hat_i)
theta_hat_ji = [np.concatenate(theta_hat_i, axis=-1)
for theta_hat_i in theta_hat_ji]
n_j = [theta_hat_i.shape[-1] for theta_hat_i in theta_hat_ji]
theta_hat_j_dot = [theta_hat_i.mean(axis=-1, keepdims=True)
for theta_hat_i in theta_hat_ji]
U_ji = [(n - 1) * (theta_hat_dot - theta_hat_i)
for theta_hat_dot, theta_hat_i, n
in zip(theta_hat_j_dot, theta_hat_ji, n_j)]
nums = [(U_i**3).sum(axis=-1)/n**3 for U_i, n in zip(U_ji, n_j)]
dens = [(U_i**2).sum(axis=-1)/n**2 for U_i, n in zip(U_ji, n_j)]
a_hat = 1/6 * sum(nums) / sum(dens)**(3/2)
# calculate alpha_1, alpha_2
z_alpha = ndtri(alpha)
z_1alpha = -z_alpha
num1 = z0_hat + z_alpha
alpha_1 = ndtr(z0_hat + num1/(1 - a_hat*num1))
num2 = z0_hat + z_1alpha
alpha_2 = ndtr(z0_hat + num2/(1 - a_hat*num2))
return alpha_1, alpha_2, a_hat # return a_hat for testing
def _bootstrap_iv(data, statistic, vectorized, paired, axis, confidence_level,
alternative, n_resamples, batch, method, bootstrap_result,
random_state):
"""Input validation and standardization for `bootstrap`."""
if vectorized not in {True, False, None}:
raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
if vectorized is None:
vectorized = 'axis' in inspect.signature(statistic).parameters
if not vectorized:
statistic = _vectorize_statistic(statistic)
axis_int = int(axis)
if axis != axis_int:
raise ValueError("`axis` must be an integer.")
n_samples = 0
try:
n_samples = len(data)
except TypeError:
raise ValueError("`data` must be a sequence of samples.")
if n_samples == 0:
raise ValueError("`data` must contain at least one sample.")
data_iv = []
for sample in data:
sample = np.atleast_1d(sample)
if sample.shape[axis_int] <= 1:
raise ValueError("each sample in `data` must contain two or more "
"observations along `axis`.")
sample = np.moveaxis(sample, axis_int, -1)
data_iv.append(sample)
if paired not in {True, False}:
raise ValueError("`paired` must be `True` or `False`.")
if paired:
n = data_iv[0].shape[-1]
for sample in data_iv[1:]:
if sample.shape[-1] != n:
message = ("When `paired is True`, all samples must have the "
"same length along `axis`")
raise ValueError(message)
# to generate the bootstrap distribution for paired-sample statistics,
# resample the indices of the observations
def statistic(i, axis=-1, data=data_iv, unpaired_statistic=statistic):
data = [sample[..., i] for sample in data]
return unpaired_statistic(*data, axis=axis)
data_iv = [np.arange(n)]
confidence_level_float = float(confidence_level)
alternative = alternative.lower()
alternatives = {'two-sided', 'less', 'greater'}
if alternative not in alternatives:
raise ValueError(f"`alternative` must be one of {alternatives}")
n_resamples_int = int(n_resamples)
if n_resamples != n_resamples_int or n_resamples_int < 0:
raise ValueError("`n_resamples` must be a non-negative integer.")
if batch is None:
batch_iv = batch
else:
batch_iv = int(batch)
if batch != batch_iv or batch_iv <= 0:
raise ValueError("`batch` must be a positive integer or None.")
methods = {'percentile', 'basic', 'bca'}
method = method.lower()
if method not in methods:
raise ValueError(f"`method` must be in {methods}")
message = "`bootstrap_result` must have attribute `bootstrap_distribution'"
if (bootstrap_result is not None
and not hasattr(bootstrap_result, "bootstrap_distribution")):
raise ValueError(message)
message = ("Either `bootstrap_result.bootstrap_distribution.size` or "
"`n_resamples` must be positive.")
if ((not bootstrap_result or
not bootstrap_result.bootstrap_distribution.size)
and n_resamples_int == 0):
raise ValueError(message)
random_state = check_random_state(random_state)
return (data_iv, statistic, vectorized, paired, axis_int,
confidence_level_float, alternative, n_resamples_int, batch_iv,
method, bootstrap_result, random_state)
@dataclass
class BootstrapResult:
"""Result object returned by `scipy.stats.bootstrap`.
Attributes
----------
confidence_interval : ConfidenceInterval
The bootstrap confidence interval as an instance of
`collections.namedtuple` with attributes `low` and `high`.
bootstrap_distribution : ndarray
The bootstrap distribution, that is, the value of `statistic` for
each resample. The last dimension corresponds with the resamples
(e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``).
standard_error : float or ndarray
The bootstrap standard error, that is, the sample standard
deviation of the bootstrap distribution.
"""
confidence_interval: ConfidenceInterval
bootstrap_distribution: np.ndarray
standard_error: float | np.ndarray
def bootstrap(data, statistic, *, n_resamples=9999, batch=None,
vectorized=None, paired=False, axis=0, confidence_level=0.95,
alternative='two-sided', method='BCa', bootstrap_result=None,
random_state=None):
r"""
Compute a two-sided bootstrap confidence interval of a statistic.
When `method` is ``'percentile'`` and `alternative` is ``'two-sided'``,
a bootstrap confidence interval is computed according to the following
procedure.
1. Resample the data: for each sample in `data` and for each of
`n_resamples`, take a random sample of the original sample
(with replacement) of the same size as the original sample.
2. Compute the bootstrap distribution of the statistic: for each set of
resamples, compute the test statistic.
3. Determine the confidence interval: find the interval of the bootstrap
distribution that is
- symmetric about the median and
- contains `confidence_level` of the resampled statistic values.
While the ``'percentile'`` method is the most intuitive, it is rarely
used in practice. Two more common methods are available, ``'basic'``
('reverse percentile') and ``'BCa'`` ('bias-corrected and accelerated');
they differ in how step 3 is performed.
If the samples in `data` are taken at random from their respective
distributions :math:`n` times, the confidence interval returned by
`bootstrap` will contain the true value of the statistic for those
distributions approximately `confidence_level`:math:`\, \times \, n` times.
Parameters
----------
data : sequence of array-like
Each element of data is a sample from an underlying distribution.
statistic : callable
Statistic for which the confidence interval is to be calculated.
`statistic` must be a callable that accepts ``len(data)`` samples
as separate arguments and returns the resulting statistic.
If `vectorized` is set ``True``,
`statistic` must also accept a keyword argument `axis` and be
vectorized to compute the statistic along the provided `axis`.
n_resamples : int, default: ``9999``
The number of resamples performed to form the bootstrap distribution
of the statistic.
batch : int, optional
The number of resamples to process in each vectorized call to
`statistic`. Memory usage is O(`batch`*``n``), where ``n`` is the
sample size. Default is ``None``, in which case ``batch = n_resamples``
(or ``batch = max(n_resamples, n)`` for ``method='BCa'``).
vectorized : bool, optional
If `vectorized` is set ``False``, `statistic` will not be passed
keyword argument `axis` and is expected to calculate the statistic
only for 1D samples. If ``True``, `statistic` will be passed keyword
argument `axis` and is expected to calculate the statistic along `axis`
when passed an ND sample array. If ``None`` (default), `vectorized`
will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of
a vectorized statistic typically reduces computation time.
paired : bool, default: ``False``
Whether the statistic treats corresponding elements of the samples
in `data` as paired.
axis : int, default: ``0``
The axis of the samples in `data` along which the `statistic` is
calculated.
confidence_level : float, default: ``0.95``
The confidence level of the confidence interval.
alternative : {'two-sided', 'less', 'greater'}, default: ``'two-sided'``
Choose ``'two-sided'`` (default) for a two-sided confidence interval,
``'less'`` for a one-sided confidence interval with the lower bound
at ``-np.inf``, and ``'greater'`` for a one-sided confidence interval
with the upper bound at ``np.inf``. The other bound of the one-sided
confidence intervals is the same as that of a two-sided confidence
interval with `confidence_level` twice as far from 1.0; e.g. the upper
bound of a 95% ``'less'`` confidence interval is the same as the upper
bound of a 90% ``'two-sided'`` confidence interval.
method : {'percentile', 'basic', 'bca'}, default: ``'BCa'``
Whether to return the 'percentile' bootstrap confidence interval
(``'percentile'``), the 'basic' (AKA 'reverse') bootstrap confidence
interval (``'basic'``), or the bias-corrected and accelerated bootstrap
confidence interval (``'BCa'``).
bootstrap_result : BootstrapResult, optional
Provide the result object returned by a previous call to `bootstrap`
to include the previous bootstrap distribution in the new bootstrap
distribution. This can be used, for example, to change
`confidence_level`, change `method`, or see the effect of performing
additional resampling without repeating computations.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate resamples.
If `random_state` is ``None`` (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
Returns
-------
res : BootstrapResult
An object with attributes:
confidence_interval : ConfidenceInterval
The bootstrap confidence interval as an instance of
`collections.namedtuple` with attributes `low` and `high`.
bootstrap_distribution : ndarray
The bootstrap distribution, that is, the value of `statistic` for
each resample. The last dimension corresponds with the resamples
(e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``).
standard_error : float or ndarray
The bootstrap standard error, that is, the sample standard
deviation of the bootstrap distribution.
Warns
-----
`~scipy.stats.DegenerateDataWarning`
Generated when ``method='BCa'`` and the bootstrap distribution is
degenerate (e.g. all elements are identical).
Notes
-----
Elements of the confidence interval may be NaN for ``method='BCa'`` if
the bootstrap distribution is degenerate (e.g. all elements are identical).
In this case, consider using another `method` or inspecting `data` for
indications that other analysis may be more appropriate (e.g. all
observations are identical).
References
----------
.. [1] B. Efron and R. J. Tibshirani, An Introduction to the Bootstrap,
Chapman & Hall/CRC, Boca Raton, FL, USA (1993)
.. [2] Nathaniel E. Helwig, "Bootstrap Confidence Intervals",
http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf
.. [3] Bootstrapping (statistics), Wikipedia,
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
Examples
--------
Suppose we have sampled data from an unknown distribution.
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> from scipy.stats import norm
>>> dist = norm(loc=2, scale=4) # our "unknown" distribution
>>> data = dist.rvs(size=100, random_state=rng)
We are interested in the standard deviation of the distribution.
>>> std_true = dist.std() # the true value of the statistic
>>> print(std_true)
4.0
>>> std_sample = np.std(data) # the sample statistic
>>> print(std_sample)
3.9460644295563863
The bootstrap is used to approximate the variability we would expect if we
were to repeatedly sample from the unknown distribution and calculate the
statistic of the sample each time. It does this by repeatedly resampling
values *from the original sample* with replacement and calculating the
statistic of each resample. This results in a "bootstrap distribution" of
the statistic.
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import bootstrap
>>> data = (data,) # samples must be in a sequence
>>> res = bootstrap(data, np.std, confidence_level=0.9,
... random_state=rng)
>>> fig, ax = plt.subplots()
>>> ax.hist(res.bootstrap_distribution, bins=25)
>>> ax.set_title('Bootstrap Distribution')
>>> ax.set_xlabel('statistic value')
>>> ax.set_ylabel('frequency')
>>> plt.show()
The standard error quantifies this variability. It is calculated as the
standard deviation of the bootstrap distribution.
>>> res.standard_error
0.24427002125829136
>>> res.standard_error == np.std(res.bootstrap_distribution, ddof=1)
True
The bootstrap distribution of the statistic is often approximately normal
with scale equal to the standard error.
>>> x = np.linspace(3, 5)
>>> pdf = norm.pdf(x, loc=std_sample, scale=res.standard_error)
>>> fig, ax = plt.subplots()
>>> ax.hist(res.bootstrap_distribution, bins=25, density=True)
>>> ax.plot(x, pdf)
>>> ax.set_title('Normal Approximation of the Bootstrap Distribution')
>>> ax.set_xlabel('statistic value')
>>> ax.set_ylabel('pdf')
>>> plt.show()
This suggests that we could construct a 90% confidence interval on the
statistic based on quantiles of this normal distribution.
>>> norm.interval(0.9, loc=std_sample, scale=res.standard_error)
(3.5442759991341726, 4.3478528599786)
Due to central limit theorem, this normal approximation is accurate for a
variety of statistics and distributions underlying the samples; however,
the approximation is not reliable in all cases. Because `bootstrap` is
designed to work with arbitrary underlying distributions and statistics,
it uses more advanced techniques to generate an accurate confidence
interval.
>>> print(res.confidence_interval)
ConfidenceInterval(low=3.57655333533867, high=4.382043696342881)
If we sample from the original distribution 1000 times and form a bootstrap
confidence interval for each sample, the confidence interval
contains the true value of the statistic approximately 90% of the time.
>>> n_trials = 1000
>>> ci_contains_true_std = 0
>>> for i in range(n_trials):
... data = (dist.rvs(size=100, random_state=rng),)
... ci = bootstrap(data, np.std, confidence_level=0.9, n_resamples=1000,
... random_state=rng).confidence_interval
... if ci[0] < std_true < ci[1]:
... ci_contains_true_std += 1
>>> print(ci_contains_true_std)
875
Rather than writing a loop, we can also determine the confidence intervals
for all 1000 samples at once.
>>> data = (dist.rvs(size=(n_trials, 100), random_state=rng),)
>>> res = bootstrap(data, np.std, axis=-1, confidence_level=0.9,
... n_resamples=1000, random_state=rng)
>>> ci_l, ci_u = res.confidence_interval
Here, `ci_l` and `ci_u` contain the confidence interval for each of the
``n_trials = 1000`` samples.
>>> print(ci_l[995:])
[3.77729695 3.75090233 3.45829131 3.34078217 3.48072829]
>>> print(ci_u[995:])
[4.88316666 4.86924034 4.32032996 4.2822427 4.59360598]
And again, approximately 90% contain the true value, ``std_true = 4``.
>>> print(np.sum((ci_l < std_true) & (std_true < ci_u)))
900
`bootstrap` can also be used to estimate confidence intervals of
multi-sample statistics, including those calculated by hypothesis
tests. `scipy.stats.mood` perform's Mood's test for equal scale parameters,
and it returns two outputs: a statistic, and a p-value. To get a
confidence interval for the test statistic, we first wrap
`scipy.stats.mood` in a function that accepts two sample arguments,
accepts an `axis` keyword argument, and returns only the statistic.
>>> from scipy.stats import mood
>>> def my_statistic(sample1, sample2, axis):
... statistic, _ = mood(sample1, sample2, axis=-1)
... return statistic
Here, we use the 'percentile' method with the default 95% confidence level.
>>> sample1 = norm.rvs(scale=1, size=100, random_state=rng)
>>> sample2 = norm.rvs(scale=2, size=100, random_state=rng)
>>> data = (sample1, sample2)
>>> res = bootstrap(data, my_statistic, method='basic', random_state=rng)
>>> print(mood(sample1, sample2)[0]) # element 0 is the statistic
-5.521109549096542
>>> print(res.confidence_interval)
ConfidenceInterval(low=-7.255994487314675, high=-4.016202624747605)
The bootstrap estimate of the standard error is also available.
>>> print(res.standard_error)
0.8344963846318795
Paired-sample statistics work, too. For example, consider the Pearson
correlation coefficient.
>>> from scipy.stats import pearsonr
>>> n = 100
>>> x = np.linspace(0, 10, n)
>>> y = x + rng.uniform(size=n)
>>> print(pearsonr(x, y)[0]) # element 0 is the statistic
0.9962357936065914
We wrap `pearsonr` so that it returns only the statistic.
>>> def my_statistic(x, y):
... return pearsonr(x, y)[0]
We call `bootstrap` using ``paired=True``.
Also, since ``my_statistic`` isn't vectorized to calculate the statistic
along a given axis, we pass in ``vectorized=False``.
>>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
... random_state=rng)
>>> print(res.confidence_interval)
ConfidenceInterval(low=0.9950085825848624, high=0.9971212407917498)
The result object can be passed back into `bootstrap` to perform additional
resampling:
>>> len(res.bootstrap_distribution)
9999
>>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
... n_resamples=1001, random_state=rng,
... bootstrap_result=res)
>>> len(res.bootstrap_distribution)
11000
or to change the confidence interval options:
>>> res2 = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
... n_resamples=0, random_state=rng, bootstrap_result=res,
... method='percentile', confidence_level=0.9)
>>> np.testing.assert_equal(res2.bootstrap_distribution,
... res.bootstrap_distribution)
>>> res.confidence_interval
ConfidenceInterval(low=0.9950035351407804, high=0.9971170323404578)
without repeating computation of the original bootstrap distribution.
"""
# Input validation
args = _bootstrap_iv(data, statistic, vectorized, paired, axis,
confidence_level, alternative, n_resamples, batch,
method, bootstrap_result, random_state)
(data, statistic, vectorized, paired, axis, confidence_level,
alternative, n_resamples, batch, method, bootstrap_result,
random_state) = args
theta_hat_b = ([] if bootstrap_result is None
else [bootstrap_result.bootstrap_distribution])
batch_nominal = batch or n_resamples or 1
for k in range(0, n_resamples, batch_nominal):
batch_actual = min(batch_nominal, n_resamples-k)
# Generate resamples
resampled_data = []
for sample in data:
resample = _bootstrap_resample(sample, n_resamples=batch_actual,
random_state=random_state)
resampled_data.append(resample)
# Compute bootstrap distribution of statistic
theta_hat_b.append(statistic(*resampled_data, axis=-1))
theta_hat_b = np.concatenate(theta_hat_b, axis=-1)
# Calculate percentile interval
alpha = ((1 - confidence_level)/2 if alternative == 'two-sided'
else (1 - confidence_level))
if method == 'bca':
interval = _bca_interval(data, statistic, axis=-1, alpha=alpha,
theta_hat_b=theta_hat_b, batch=batch)[:2]
percentile_fun = _percentile_along_axis
else:
interval = alpha, 1-alpha
def percentile_fun(a, q):
return np.percentile(a=a, q=q, axis=-1)
# Calculate confidence interval of statistic
ci_l = percentile_fun(theta_hat_b, interval[0]*100)
ci_u = percentile_fun(theta_hat_b, interval[1]*100)
if method == 'basic': # see [3]
theta_hat = statistic(*data, axis=-1)
ci_l, ci_u = 2*theta_hat - ci_u, 2*theta_hat - ci_l
if alternative == 'less':
ci_l = np.full_like(ci_l, -np.inf)
elif alternative == 'greater':
ci_u = np.full_like(ci_u, np.inf)
return BootstrapResult(confidence_interval=ConfidenceInterval(ci_l, ci_u),
bootstrap_distribution=theta_hat_b,
standard_error=np.std(theta_hat_b, ddof=1, axis=-1))
def _monte_carlo_test_iv(data, rvs, statistic, vectorized, n_resamples,
batch, alternative, axis):
"""Input validation for `monte_carlo_test`."""
axis_int = int(axis)
if axis != axis_int:
raise ValueError("`axis` must be an integer.")
if vectorized not in {True, False, None}:
raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
if not isinstance(rvs, Sequence):
rvs = (rvs,)
data = (data,)
for rvs_i in rvs:
if not callable(rvs_i):
raise TypeError("`rvs` must be callable or sequence of callables.")
if not len(rvs) == len(data):
message = "If `rvs` is a sequence, `len(rvs)` must equal `len(data)`."
raise ValueError(message)
if not callable(statistic):
raise TypeError("`statistic` must be callable.")
if vectorized is None:
vectorized = 'axis' in inspect.signature(statistic).parameters
if not vectorized:
statistic_vectorized = _vectorize_statistic(statistic)
else:
statistic_vectorized = statistic
data = _broadcast_arrays(data, axis)
data_iv = []
for sample in data:
sample = np.atleast_1d(sample)
sample = np.moveaxis(sample, axis_int, -1)
data_iv.append(sample)
n_resamples_int = int(n_resamples)
if n_resamples != n_resamples_int or n_resamples_int <= 0:
raise ValueError("`n_resamples` must be a positive integer.")
if batch is None:
batch_iv = batch
else:
batch_iv = int(batch)
if batch != batch_iv or batch_iv <= 0:
raise ValueError("`batch` must be a positive integer or None.")
alternatives = {'two-sided', 'greater', 'less'}
alternative = alternative.lower()
if alternative not in alternatives:
raise ValueError(f"`alternative` must be in {alternatives}")
return (data_iv, rvs, statistic_vectorized, vectorized, n_resamples_int,
batch_iv, alternative, axis_int)
@dataclass
class MonteCarloTestResult:
"""Result object returned by `scipy.stats.monte_carlo_test`.
Attributes
----------
statistic : float or ndarray
The observed test statistic of the sample.
pvalue : float or ndarray
The p-value for the given alternative.
null_distribution : ndarray
The values of the test statistic generated under the null
hypothesis.
"""
statistic: float | np.ndarray
pvalue: float | np.ndarray
null_distribution: np.ndarray
@_rename_parameter('sample', 'data')
def monte_carlo_test(data, rvs, statistic, *, vectorized=None,
n_resamples=9999, batch=None, alternative="two-sided",
axis=0):
r"""Perform a Monte Carlo hypothesis test.
`data` contains a sample or a sequence of one or more samples. `rvs`
specifies the distribution(s) of the sample(s) in `data` under the null
hypothesis. The value of `statistic` for the given `data` is compared
against a Monte Carlo null distribution: the value of the statistic for
each of `n_resamples` sets of samples generated using `rvs`. This gives
the p-value, the probability of observing such an extreme value of the
test statistic under the null hypothesis.
Parameters
----------
data : array-like or sequence of array-like
An array or sequence of arrays of observations.
rvs : callable or tuple of callables
A callable or sequence of callables that generates random variates
under the null hypothesis. Each element of `rvs` must be a callable
that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and
returns an N-d array sample of that shape. If `rvs` is a sequence, the
number of callables in `rvs` must match the number of samples in
`data`, i.e. ``len(rvs) == len(data)``. If `rvs` is a single callable,
`data` is treated as a single sample.
statistic : callable
Statistic for which the p-value of the hypothesis test is to be
calculated. `statistic` must be a callable that accepts a sample
(e.g. ``statistic(sample)``) or ``len(rvs)`` separate samples (e.g.
``statistic(samples1, sample2)`` if `rvs` contains two callables and
`data` contains two samples) and returns the resulting statistic.
If `vectorized` is set ``True``, `statistic` must also accept a keyword
argument `axis` and be vectorized to compute the statistic along the
provided `axis` of the samples in `data`.
vectorized : bool, optional
If `vectorized` is set ``False``, `statistic` will not be passed
keyword argument `axis` and is expected to calculate the statistic
only for 1D samples. If ``True``, `statistic` will be passed keyword
argument `axis` and is expected to calculate the statistic along `axis`
when passed ND sample arrays. If ``None`` (default), `vectorized`
will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of
a vectorized statistic typically reduces computation time.
n_resamples : int, default: 9999
Number of samples drawn from each of the callables of `rvs`.
Equivalently, the number statistic values under the null hypothesis
used as the Monte Carlo null distribution.
batch : int, optional
The number of Monte Carlo samples to process in each call to
`statistic`. Memory usage is O(`batch`*``sample.size[axis]``). Default
is ``None``, in which case `batch` equals `n_resamples`.
alternative : {'two-sided', 'less', 'greater'}
The alternative hypothesis for which the p-value is calculated.
For each alternative, the p-value is defined as follows.
- ``'greater'`` : the percentage of the null distribution that is
greater than or equal to the observed value of the test statistic.
- ``'less'`` : the percentage of the null distribution that is
less than or equal to the observed value of the test statistic.
- ``'two-sided'`` : twice the smaller of the p-values above.
axis : int, default: 0
The axis of `data` (or each sample within `data`) over which to
calculate the statistic.
Returns
-------
res : MonteCarloTestResult
An object with attributes:
statistic : float or ndarray
The test statistic of the observed `data`.
pvalue : float or ndarray
The p-value for the given alternative.
null_distribution : ndarray
The values of the test statistic generated under the null
hypothesis.
References
----------
.. [1] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."
Statistical Applications in Genetics and Molecular Biology 9.1 (2010).
Examples
--------
Suppose we wish to test whether a small sample has been drawn from a normal
distribution. We decide that we will use the skew of the sample as a
test statistic, and we will consider a p-value of 0.05 to be statistically
significant.
>>> import numpy as np
>>> from scipy import stats
>>> def statistic(x, axis):
... return stats.skew(x, axis)
After collecting our data, we calculate the observed value of the test
statistic.
>>> rng = np.random.default_rng()
>>> x = stats.skewnorm.rvs(a=1, size=50, random_state=rng)
>>> statistic(x, axis=0)
0.12457412450240658
To determine the probability of observing such an extreme value of the
skewness by chance if the sample were drawn from the normal distribution,
we can perform a Monte Carlo hypothesis test. The test will draw many
samples at random from their normal distribution, calculate the skewness
of each sample, and compare our original skewness against this
distribution to determine an approximate p-value.
>>> from scipy.stats import monte_carlo_test
>>> # because our statistic is vectorized, we pass `vectorized=True`
>>> rvs = lambda size: stats.norm.rvs(size=size, random_state=rng)
>>> res = monte_carlo_test(x, rvs, statistic, vectorized=True)
>>> print(res.statistic)
0.12457412450240658
>>> print(res.pvalue)
0.7012
The probability of obtaining a test statistic less than or equal to the
observed value under the null hypothesis is ~70%. This is greater than
our chosen threshold of 5%, so we cannot consider this to be significant
evidence against the null hypothesis.
Note that this p-value essentially matches that of
`scipy.stats.skewtest`, which relies on an asymptotic distribution of a
test statistic based on the sample skewness.
>>> stats.skewtest(x).pvalue
0.6892046027110614
This asymptotic approximation is not valid for small sample sizes, but
`monte_carlo_test` can be used with samples of any size.
>>> x = stats.skewnorm.rvs(a=1, size=7, random_state=rng)
>>> # stats.skewtest(x) would produce an error due to small sample
>>> res = monte_carlo_test(x, rvs, statistic, vectorized=True)
The Monte Carlo distribution of the test statistic is provided for
further investigation.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.hist(res.null_distribution, bins=50)
>>> ax.set_title("Monte Carlo distribution of test statistic")
>>> ax.set_xlabel("Value of Statistic")
>>> ax.set_ylabel("Frequency")
>>> plt.show()
"""
args = _monte_carlo_test_iv(data, rvs, statistic, vectorized,
n_resamples, batch, alternative, axis)
(data, rvs, statistic, vectorized,
n_resamples, batch, alternative, axis) = args
# Some statistics return plain floats; ensure they're at least np.float64
observed = np.asarray(statistic(*data, axis=-1))[()]
n_observations = [sample.shape[-1] for sample in data]
batch_nominal = batch or n_resamples
null_distribution = []
for k in range(0, n_resamples, batch_nominal):
batch_actual = min(batch_nominal, n_resamples - k)
resamples = [rvs_i(size=(batch_actual, n_observations_i))
for rvs_i, n_observations_i in zip(rvs, n_observations)]
null_distribution.append(statistic(*resamples, axis=-1))
null_distribution = np.concatenate(null_distribution)
null_distribution = null_distribution.reshape([-1] + [1]*observed.ndim)
def less(null_distribution, observed):
cmps = null_distribution <= observed
pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1) # see [1]
return pvalues
def greater(null_distribution, observed):
cmps = null_distribution >= observed
pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1) # see [1]
return pvalues
def two_sided(null_distribution, observed):
pvalues_less = less(null_distribution, observed)
pvalues_greater = greater(null_distribution, observed)
pvalues = np.minimum(pvalues_less, pvalues_greater) * 2
return pvalues
compare = {"less": less,
"greater": greater,
"two-sided": two_sided}
pvalues = compare[alternative](null_distribution, observed)
pvalues = np.clip(pvalues, 0, 1)
return MonteCarloTestResult(observed, pvalues, null_distribution)
@dataclass
class PermutationTestResult:
"""Result object returned by `scipy.stats.permutation_test`.
Attributes
----------
statistic : float or ndarray
The observed test statistic of the data.
pvalue : float or ndarray
The p-value for the given alternative.
null_distribution : ndarray
The values of the test statistic generated under the null
hypothesis.
"""
statistic: float | np.ndarray
pvalue: float | np.ndarray
null_distribution: np.ndarray
def _all_partitions_concatenated(ns):
"""
Generate all partitions of indices of groups of given sizes, concatenated
`ns` is an iterable of ints.
"""
def all_partitions(z, n):
for c in combinations(z, n):
x0 = set(c)
x1 = z - x0
yield [x0, x1]
def all_partitions_n(z, ns):
if len(ns) == 0:
yield [z]
return
for c in all_partitions(z, ns[0]):
for d in all_partitions_n(c[1], ns[1:]):
yield c[0:1] + d
z = set(range(np.sum(ns)))
for partitioning in all_partitions_n(z, ns[:]):
x = np.concatenate([list(partition)
for partition in partitioning]).astype(int)
yield x
def _batch_generator(iterable, batch):
"""A generator that yields batches of elements from an iterable"""
iterator = iter(iterable)
if batch <= 0:
raise ValueError("`batch` must be positive.")
z = [item for i, item in zip(range(batch), iterator)]
while z: # we don't want StopIteration without yielding an empty list
yield z
z = [item for i, item in zip(range(batch), iterator)]
def _pairings_permutations_gen(n_permutations, n_samples, n_obs_sample, batch,
random_state):
# Returns a generator that yields arrays of size
# `(batch, n_samples, n_obs_sample)`.
# Each row is an independent permutation of indices 0 to `n_obs_sample`.
batch = min(batch, n_permutations)
if hasattr(random_state, 'permuted'):
def batched_perm_generator():
indices = np.arange(n_obs_sample)
indices = np.tile(indices, (batch, n_samples, 1))
for k in range(0, n_permutations, batch):
batch_actual = min(batch, n_permutations-k)
# Don't permute in place, otherwise results depend on `batch`
permuted_indices = random_state.permuted(indices, axis=-1)
yield permuted_indices[:batch_actual]
else: # RandomState and early Generators don't have `permuted`
def batched_perm_generator():
for k in range(0, n_permutations, batch):
batch_actual = min(batch, n_permutations-k)
size = (batch_actual, n_samples, n_obs_sample)
x = random_state.random(size=size)
yield np.argsort(x, axis=-1)[:batch_actual]
return batched_perm_generator()
def _calculate_null_both(data, statistic, n_permutations, batch,
random_state=None):
"""
Calculate null distribution for independent sample tests.
"""
n_samples = len(data)
# compute number of permutations
# (distinct partitions of data into samples of these sizes)
n_obs_i = [sample.shape[-1] for sample in data] # observations per sample
n_obs_ic = np.cumsum(n_obs_i)
n_obs = n_obs_ic[-1] # total number of observations
n_max = np.prod([comb(n_obs_ic[i], n_obs_ic[i-1])
for i in range(n_samples-1, 0, -1)])
# perm_generator is an iterator that produces permutations of indices
# from 0 to n_obs. We'll concatenate the samples, use these indices to
# permute the data, then split the samples apart again.
if n_permutations >= n_max:
exact_test = True
n_permutations = n_max
perm_generator = _all_partitions_concatenated(n_obs_i)
else:
exact_test = False
# Neither RandomState.permutation nor Generator.permutation
# can permute axis-slices independently. If this feature is
# added in the future, batches of the desired size should be
# generated in a single call.
perm_generator = (random_state.permutation(n_obs)
for i in range(n_permutations))
batch = batch or int(n_permutations)
null_distribution = []
# First, concatenate all the samples. In batches, permute samples with
# indices produced by the `perm_generator`, split them into new samples of
# the original sizes, compute the statistic for each batch, and add these
# statistic values to the null distribution.
data = np.concatenate(data, axis=-1)
for indices in _batch_generator(perm_generator, batch=batch):
indices = np.array(indices)
# `indices` is 2D: each row is a permutation of the indices.
# We use it to index `data` along its last axis, which corresponds
# with observations.
# After indexing, the second to last axis of `data_batch` corresponds
# with permutations, and the last axis corresponds with observations.
data_batch = data[..., indices]
# Move the permutation axis to the front: we'll concatenate a list
# of batched statistic values along this zeroth axis to form the
# null distribution.
data_batch = np.moveaxis(data_batch, -2, 0)
data_batch = np.split(data_batch, n_obs_ic[:-1], axis=-1)
null_distribution.append(statistic(*data_batch, axis=-1))
null_distribution = np.concatenate(null_distribution, axis=0)
return null_distribution, n_permutations, exact_test
def _calculate_null_pairings(data, statistic, n_permutations, batch,
random_state=None):
"""
Calculate null distribution for association tests.
"""
n_samples = len(data)
# compute number of permutations (factorial(n) permutations of each sample)
n_obs_sample = data[0].shape[-1] # observations per sample; same for each
n_max = factorial(n_obs_sample)**n_samples
# `perm_generator` is an iterator that produces a list of permutations of
# indices from 0 to n_obs_sample, one for each sample.
if n_permutations >= n_max:
exact_test = True
n_permutations = n_max
batch = batch or int(n_permutations)
# cartesian product of the sets of all permutations of indices
perm_generator = product(*(permutations(range(n_obs_sample))
for i in range(n_samples)))
batched_perm_generator = _batch_generator(perm_generator, batch=batch)
else:
exact_test = False
batch = batch or int(n_permutations)
# Separate random permutations of indices for each sample.
# Again, it would be nice if RandomState/Generator.permutation
# could permute each axis-slice separately.
args = n_permutations, n_samples, n_obs_sample, batch, random_state
batched_perm_generator = _pairings_permutations_gen(*args)
null_distribution = []
for indices in batched_perm_generator:
indices = np.array(indices)
# `indices` is 3D: the zeroth axis is for permutations, the next is
# for samples, and the last is for observations. Swap the first two
# to make the zeroth axis correspond with samples, as it does for
# `data`.
indices = np.swapaxes(indices, 0, 1)
# When we're done, `data_batch` will be a list of length `n_samples`.
# Each element will be a batch of random permutations of one sample.
# The zeroth axis of each batch will correspond with permutations,
# and the last will correspond with observations. (This makes it
# easy to pass into `statistic`.)
data_batch = [None]*n_samples
for i in range(n_samples):
data_batch[i] = data[i][..., indices[i]]
data_batch[i] = np.moveaxis(data_batch[i], -2, 0)
null_distribution.append(statistic(*data_batch, axis=-1))
null_distribution = np.concatenate(null_distribution, axis=0)
return null_distribution, n_permutations, exact_test
def _calculate_null_samples(data, statistic, n_permutations, batch,
random_state=None):
"""
Calculate null distribution for paired-sample tests.
"""
n_samples = len(data)
# By convention, the meaning of the "samples" permutations type for
# data with only one sample is to flip the sign of the observations.
# Achieve this by adding a second sample - the negative of the original.
if n_samples == 1:
data = [data[0], -data[0]]
# The "samples" permutation strategy is the same as the "pairings"
# strategy except the roles of samples and observations are flipped.
# So swap these axes, then we'll use the function for the "pairings"
# strategy to do all the work!
data = np.swapaxes(data, 0, -1)
# (Of course, the user's statistic doesn't know what we've done here,
# so we need to pass it what it's expecting.)
def statistic_wrapped(*data, axis):
data = np.swapaxes(data, 0, -1)
if n_samples == 1:
data = data[0:1]
return statistic(*data, axis=axis)
return _calculate_null_pairings(data, statistic_wrapped, n_permutations,
batch, random_state)
def _permutation_test_iv(data, statistic, permutation_type, vectorized,
n_resamples, batch, alternative, axis, random_state):
"""Input validation for `permutation_test`."""
axis_int = int(axis)
if axis != axis_int:
raise ValueError("`axis` must be an integer.")
permutation_types = {'samples', 'pairings', 'independent'}
permutation_type = permutation_type.lower()
if permutation_type not in permutation_types:
raise ValueError(f"`permutation_type` must be in {permutation_types}.")
if vectorized not in {True, False, None}:
raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
if vectorized is None:
vectorized = 'axis' in inspect.signature(statistic).parameters
if not vectorized:
statistic = _vectorize_statistic(statistic)
message = "`data` must be a tuple containing at least two samples"
try:
if len(data) < 2 and permutation_type == 'independent':
raise ValueError(message)
except TypeError:
raise TypeError(message)
data = _broadcast_arrays(data, axis)
data_iv = []
for sample in data:
sample = np.atleast_1d(sample)
if sample.shape[axis] <= 1:
raise ValueError("each sample in `data` must contain two or more "
"observations along `axis`.")
sample = np.moveaxis(sample, axis_int, -1)
data_iv.append(sample)
n_resamples_int = (int(n_resamples) if not np.isinf(n_resamples)
else np.inf)
if n_resamples != n_resamples_int or n_resamples_int <= 0:
raise ValueError("`n_resamples` must be a positive integer.")
if batch is None:
batch_iv = batch
else:
batch_iv = int(batch)
if batch != batch_iv or batch_iv <= 0:
raise ValueError("`batch` must be a positive integer or None.")
alternatives = {'two-sided', 'greater', 'less'}
alternative = alternative.lower()
if alternative not in alternatives:
raise ValueError(f"`alternative` must be in {alternatives}")
random_state = check_random_state(random_state)
return (data_iv, statistic, permutation_type, vectorized, n_resamples_int,
batch_iv, alternative, axis_int, random_state)
def permutation_test(data, statistic, *, permutation_type='independent',
vectorized=None, n_resamples=9999, batch=None,
alternative="two-sided", axis=0, random_state=None):
r"""
Performs a permutation test of a given statistic on provided data.
For independent sample statistics, the null hypothesis is that the data are
randomly sampled from the same distribution.
For paired sample statistics, two null hypothesis can be tested:
that the data are paired at random or that the data are assigned to samples
at random.
Parameters
----------
data : iterable of array-like
Contains the samples, each of which is an array of observations.
Dimensions of sample arrays must be compatible for broadcasting except
along `axis`.
statistic : callable
Statistic for which the p-value of the hypothesis test is to be
calculated. `statistic` must be a callable that accepts samples
as separate arguments (e.g. ``statistic(*data)``) and returns the
resulting statistic.
If `vectorized` is set ``True``, `statistic` must also accept a keyword
argument `axis` and be vectorized to compute the statistic along the
provided `axis` of the sample arrays.
permutation_type : {'independent', 'samples', 'pairings'}, optional
The type of permutations to be performed, in accordance with the
null hypothesis. The first two permutation types are for paired sample
statistics, in which all samples contain the same number of
observations and observations with corresponding indices along `axis`
are considered to be paired; the third is for independent sample
statistics.
- ``'samples'`` : observations are assigned to different samples
but remain paired with the same observations from other samples.
This permutation type is appropriate for paired sample hypothesis
tests such as the Wilcoxon signed-rank test and the paired t-test.
- ``'pairings'`` : observations are paired with different observations,
but they remain within the same sample. This permutation type is
appropriate for association/correlation tests with statistics such
as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's
:math:`r`.
- ``'independent'`` (default) : observations are assigned to different
samples. Samples may contain different numbers of observations. This
permutation type is appropriate for independent sample hypothesis
tests such as the Mann-Whitney :math:`U` test and the independent
sample t-test.
Please see the Notes section below for more detailed descriptions
of the permutation types.
vectorized : bool, optional
If `vectorized` is set ``False``, `statistic` will not be passed
keyword argument `axis` and is expected to calculate the statistic
only for 1D samples. If ``True``, `statistic` will be passed keyword
argument `axis` and is expected to calculate the statistic along `axis`
when passed an ND sample array. If ``None`` (default), `vectorized`
will be set ``True`` if ``axis`` is a parameter of `statistic`. Use
of a vectorized statistic typically reduces computation time.
n_resamples : int or np.inf, default: 9999
Number of random permutations (resamples) used to approximate the null
distribution. If greater than or equal to the number of distinct
permutations, the exact null distribution will be computed.
Note that the number of distinct permutations grows very rapidly with
the sizes of samples, so exact tests are feasible only for very small
data sets.
batch : int, optional
The number of permutations to process in each call to `statistic`.
Memory usage is O(`batch`*``n``), where ``n`` is the total size
of all samples, regardless of the value of `vectorized`. Default is
``None``, in which case ``batch`` is the number of permutations.
alternative : {'two-sided', 'less', 'greater'}, optional
The alternative hypothesis for which the p-value is calculated.
For each alternative, the p-value is defined for exact tests as
follows.
- ``'greater'`` : the percentage of the null distribution that is
greater than or equal to the observed value of the test statistic.
- ``'less'`` : the percentage of the null distribution that is
less than or equal to the observed value of the test statistic.
- ``'two-sided'`` (default) : twice the smaller of the p-values above.
Note that p-values for randomized tests are calculated according to the
conservative (over-estimated) approximation suggested in [2]_ and [3]_
rather than the unbiased estimator suggested in [4]_. That is, when
calculating the proportion of the randomized null distribution that is
as extreme as the observed value of the test statistic, the values in
the numerator and denominator are both increased by one. An
interpretation of this adjustment is that the observed value of the
test statistic is always included as an element of the randomized
null distribution.
The convention used for two-sided p-values is not universal;
the observed test statistic and null distribution are returned in
case a different definition is preferred.
axis : int, default: 0
The axis of the (broadcasted) samples over which to calculate the
statistic. If samples have a different number of dimensions,
singleton dimensions are prepended to samples with fewer dimensions
before `axis` is considered.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate permutations.
If `random_state` is ``None`` (default), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
Returns
-------
res : PermutationTestResult
An object with attributes:
statistic : float or ndarray
The observed test statistic of the data.
pvalue : float or ndarray
The p-value for the given alternative.
null_distribution : ndarray
The values of the test statistic generated under the null
hypothesis.
Notes
-----
The three types of permutation tests supported by this function are
described below.
**Unpaired statistics** (``permutation_type='independent'``):
The null hypothesis associated with this permutation type is that all
observations are sampled from the same underlying distribution and that
they have been assigned to one of the samples at random.
Suppose ``data`` contains two samples; e.g. ``a, b = data``.
When ``1 < n_resamples < binom(n, k)``, where
* ``k`` is the number of observations in ``a``,
* ``n`` is the total number of observations in ``a`` and ``b``, and
* ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
the data are pooled (concatenated), randomly assigned to either the first
or second sample, and the statistic is calculated. This process is
performed repeatedly, `permutation` times, generating a distribution of the
statistic under the null hypothesis. The statistic of the original
data is compared to this distribution to determine the p-value.
When ``n_resamples >= binom(n, k)``, an exact test is performed: the data
are *partitioned* between the samples in each distinct way exactly once,
and the exact null distribution is formed.
Note that for a given partitioning of the data between the samples,
only one ordering/permutation of the data *within* each sample is
considered. For statistics that do not depend on the order of the data
within samples, this dramatically reduces computational cost without
affecting the shape of the null distribution (because the frequency/count
of each value is affected by the same factor).
For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this
permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``.
Because only one ordering/permutation of the data *within* each sample
is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]``
and ``y = [a4, a3, b1]`` would *not* be considered distinct from the
example above.
``permutation_type='independent'`` does not support one-sample statistics,
but it can be applied to statistics with more than two samples. In this
case, if ``n`` is an array of the number of observations within each
sample, the number of distinct partitions is::
np.prod([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)])
**Paired statistics, permute pairings** (``permutation_type='pairings'``):
The null hypothesis associated with this permutation type is that
observations within each sample are drawn from the same underlying
distribution and that pairings with elements of other samples are
assigned at random.
Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we
wish to consider all possible pairings of elements of ``a`` with elements
of a second sample, ``b``. Let ``n`` be the number of observations in
``a``, which must also equal the number of observations in ``b``.
When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are
randomly permuted. The user-supplied statistic accepts one data argument,
say ``a_perm``, and calculates the statistic considering ``a_perm`` and
``b``. This process is performed repeatedly, `permutation` times,
generating a distribution of the statistic under the null hypothesis.
The statistic of the original data is compared to this distribution to
determine the p-value.
When ``n_resamples >= factorial(n)``, an exact test is performed:
``a`` is permuted in each distinct way exactly once. Therefore, the
`statistic` is computed for each unique pairing of samples between ``a``
and ``b`` exactly once.
For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this
permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left
in its original order.
``permutation_type='pairings'`` supports ``data`` containing any number
of samples, each of which must contain the same number of observations.
All samples provided in ``data`` are permuted *independently*. Therefore,
if ``m`` is the number of samples and ``n`` is the number of observations
within each sample, then the number of permutations in an exact test is::
factorial(n)**m
Note that if a two-sample statistic, for example, does not inherently
depend on the order in which observations are provided - only on the
*pairings* of observations - then only one of the two samples should be
provided in ``data``. This dramatically reduces computational cost without
affecting the shape of the null distribution (because the frequency/count
of each value is affected by the same factor).
**Paired statistics, permute samples** (``permutation_type='samples'``):
The null hypothesis associated with this permutation type is that
observations within each pair are drawn from the same underlying
distribution and that the sample to which they are assigned is random.
Suppose ``data`` contains two samples; e.g. ``a, b = data``.
Let ``n`` be the number of observations in ``a``, which must also equal
the number of observations in ``b``.
When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are
randomly swapped between samples (maintaining their pairings) and the
statistic is calculated. This process is performed repeatedly,
`permutation` times, generating a distribution of the statistic under the
null hypothesis. The statistic of the original data is compared to this
distribution to determine the p-value.
When ``n_resamples >= 2**n``, an exact test is performed: the observations
are assigned to the two samples in each distinct way (while maintaining
pairings) exactly once.
For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this
permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``.
``permutation_type='samples'`` supports ``data`` containing any number
of samples, each of which must contain the same number of observations.
If ``data`` contains more than one sample, paired observations within
``data`` are exchanged between samples *independently*. Therefore, if ``m``
is the number of samples and ``n`` is the number of observations within
each sample, then the number of permutations in an exact test is::
factorial(m)**n
Several paired-sample statistical tests, such as the Wilcoxon signed rank
test and paired-sample t-test, can be performed considering only the
*difference* between two paired elements. Accordingly, if ``data`` contains
only one sample, then the null distribution is formed by independently
changing the *sign* of each observation.
.. warning::
The p-value is calculated by counting the elements of the null
distribution that are as extreme or more extreme than the observed
value of the statistic. Due to the use of finite precision arithmetic,
some statistic functions return numerically distinct values when the
theoretical values would be exactly equal. In some cases, this could
lead to a large error in the calculated p-value. `permutation_test`
guards against this by considering elements in the null distribution
that are "close" (within a factor of ``1+1e-14``) to the observed
value of the test statistic as equal to the observed value of the
test statistic. However, the user is advised to inspect the null
distribution to assess whether this method of comparison is
appropriate, and if not, calculate the p-value manually. See example
below.
References
----------
.. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951).
.. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."
Statistical Applications in Genetics and Molecular Biology 9.1 (2010).
.. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference".
Statistical Science (2004).
.. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap
(1993).
Examples
--------
Suppose we wish to test whether two samples are drawn from the same
distribution. Assume that the underlying distributions are unknown to us,
and that before observing the data, we hypothesized that the mean of the
first sample would be less than that of the second sample. We decide that
we will use the difference between the sample means as a test statistic,
and we will consider a p-value of 0.05 to be statistically significant.
For efficiency, we write the function defining the test statistic in a
vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the
statistic will be calculated for each axis-slice along `axis`.
>>> import numpy as np
>>> def statistic(x, y, axis):
... return np.mean(x, axis=axis) - np.mean(y, axis=axis)
After collecting our data, we calculate the observed value of the test
statistic.
>>> from scipy.stats import norm
>>> rng = np.random.default_rng()
>>> x = norm.rvs(size=5, random_state=rng)
>>> y = norm.rvs(size=6, loc = 3, random_state=rng)
>>> statistic(x, y, 0)
-3.5411688580987266
Indeed, the test statistic is negative, suggesting that the true mean of
the distribution underlying ``x`` is less than that of the distribution
underlying ``y``. To determine the probability of this occuring by chance
if the two samples were drawn from the same distribution, we perform
a permutation test.
>>> from scipy.stats import permutation_test
>>> # because our statistic is vectorized, we pass `vectorized=True`
>>> # `n_resamples=np.inf` indicates that an exact test is to be performed
>>> res = permutation_test((x, y), statistic, vectorized=True,
... n_resamples=np.inf, alternative='less')
>>> print(res.statistic)
-3.5411688580987266
>>> print(res.pvalue)
0.004329004329004329
The probability of obtaining a test statistic less than or equal to the
observed value under the null hypothesis is 0.4329%. This is less than our
chosen threshold of 5%, so we consider this to be significant evidence
against the null hypothesis in favor of the alternative.
Because the size of the samples above was small, `permutation_test` could
perform an exact test. For larger samples, we resort to a randomized
permutation test.
>>> x = norm.rvs(size=100, random_state=rng)
>>> y = norm.rvs(size=120, loc=0.3, random_state=rng)
>>> res = permutation_test((x, y), statistic, n_resamples=100000,
... vectorized=True, alternative='less',
... random_state=rng)
>>> print(res.statistic)
-0.5230459671240913
>>> print(res.pvalue)
0.00016999830001699983
The approximate probability of obtaining a test statistic less than or
equal to the observed value under the null hypothesis is 0.0225%. This is
again less than our chosen threshold of 5%, so again we have significant
evidence to reject the null hypothesis in favor of the alternative.
For large samples and number of permutations, the result is comparable to
that of the corresponding asymptotic test, the independent sample t-test.
>>> from scipy.stats import ttest_ind
>>> res_asymptotic = ttest_ind(x, y, alternative='less')
>>> print(res_asymptotic.pvalue)
0.00012688101537979522
The permutation distribution of the test statistic is provided for
further investigation.
>>> import matplotlib.pyplot as plt
>>> plt.hist(res.null_distribution, bins=50)
>>> plt.title("Permutation distribution of test statistic")
>>> plt.xlabel("Value of Statistic")
>>> plt.ylabel("Frequency")
>>> plt.show()
Inspection of the null distribution is essential if the statistic suffers
from inaccuracy due to limited machine precision. Consider the following
case:
>>> from scipy.stats import pearsonr
>>> x = [1, 2, 4, 3]
>>> y = [2, 4, 6, 8]
>>> def statistic(x, y):
... return pearsonr(x, y).statistic
>>> res = permutation_test((x, y), statistic, vectorized=False,
... permutation_type='pairings',
... alternative='greater')
>>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution
In this case, some elements of the null distribution differ from the
observed value of the correlation coefficient ``r`` due to numerical noise.
We manually inspect the elements of the null distribution that are nearly
the same as the observed value of the test statistic.
>>> r
0.8
>>> unique = np.unique(null)
>>> unique
array([-1. , -0.8, -0.8, -0.6, -0.4, -0.2, -0.2, 0. , 0.2, 0.2, 0.4,
0.6, 0.8, 0.8, 1. ]) # may vary
>>> unique[np.isclose(r, unique)].tolist()
[0.7999999999999999, 0.8]
If `permutation_test` were to perform the comparison naively, the
elements of the null distribution with value ``0.7999999999999999`` would
not be considered as extreme or more extreme as the observed value of the
statistic, so the calculated p-value would be too small.
>>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null)
>>> incorrect_pvalue
0.1111111111111111 # may vary
Instead, `permutation_test` treats elements of the null distribution that
are within ``max(1e-14, abs(r)*1e-14)`` of the observed value of the
statistic ``r`` to be equal to ``r``.
>>> correct_pvalue = np.count_nonzero(null >= r - 1e-14) / len(null)
>>> correct_pvalue
0.16666666666666666
>>> res.pvalue == correct_pvalue
True
This method of comparison is expected to be accurate in most practical
situations, but the user is advised to assess this by inspecting the
elements of the null distribution that are close to the observed value
of the statistic. Also, consider the use of statistics that can be
calculated using exact arithmetic (e.g. integer statistics).
"""
args = _permutation_test_iv(data, statistic, permutation_type, vectorized,
n_resamples, batch, alternative, axis,
random_state)
(data, statistic, permutation_type, vectorized, n_resamples, batch,
alternative, axis, random_state) = args
observed = statistic(*data, axis=-1)
null_calculators = {"pairings": _calculate_null_pairings,
"samples": _calculate_null_samples,
"independent": _calculate_null_both}
null_calculator_args = (data, statistic, n_resamples,
batch, random_state)
calculate_null = null_calculators[permutation_type]
null_distribution, n_resamples, exact_test = (
calculate_null(*null_calculator_args))
# See References [2] and [3]
adjustment = 0 if exact_test else 1
# relative tolerance for detecting numerically distinct but
# theoretically equal values in the null distribution
eps = 1e-14
gamma = np.maximum(eps, np.abs(eps * observed))
def less(null_distribution, observed):
cmps = null_distribution <= observed + gamma
pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment)
return pvalues
def greater(null_distribution, observed):
cmps = null_distribution >= observed - gamma
pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment)
return pvalues
def two_sided(null_distribution, observed):
pvalues_less = less(null_distribution, observed)
pvalues_greater = greater(null_distribution, observed)
pvalues = np.minimum(pvalues_less, pvalues_greater) * 2
return pvalues
compare = {"less": less,
"greater": greater,
"two-sided": two_sided}
pvalues = compare[alternative](null_distribution, observed)
pvalues = np.clip(pvalues, 0, 1)
return PermutationTestResult(observed, pvalues, null_distribution)
@dataclass
class ResamplingMethod:
"""Configuration information for a statistical resampling method.
Instances of this class can be passed into the `method` parameter of some
hypothesis test functions to perform a resampling or Monte Carlo version
of the hypothesis test.
Attributes
----------
n_resamples : int
The number of resamples to perform or Monte Carlo samples to draw.
batch : int, optional
The number of resamples to process in each vectorized call to
the statistic. Batch sizes >>1 tend to be faster when the statistic
is vectorized, but memory usage scales linearly with the batch size.
Default is ``None``, which processes all resamples in a single batch.
"""
n_resamples: int = 9999
batch: int = None # type: ignore[assignment]
@dataclass
class MonteCarloMethod(ResamplingMethod):
"""Configuration information for a Monte Carlo hypothesis test.
Instances of this class can be passed into the `method` parameter of some
hypothesis test functions to perform a Monte Carlo version of the
hypothesis tests.
Attributes
----------
n_resamples : int, optional
The number of Monte Carlo samples to draw. Default is 9999.
batch : int, optional
The number of Monte Carlo samples to process in each vectorized call to
the statistic. Batch sizes >>1 tend to be faster when the statistic
is vectorized, but memory usage scales linearly with the batch size.
Default is ``None``, which processes all samples in a single batch.
rvs : callable or tuple of callables, optional
A callable or sequence of callables that generates random variates
under the null hypothesis. Each element of `rvs` must be a callable
that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and
returns an N-d array sample of that shape. If `rvs` is a sequence, the
number of callables in `rvs` must match the number of samples passed
to the hypothesis test in which the `MonteCarloMethod` is used. Default
is ``None``, in which case the hypothesis test function chooses values
to match the standard version of the hypothesis test. For example,
the null hypothesis of `scipy.stats.pearsonr` is typically that the
samples are drawn from the standard normal distribution, so
``rvs = (rng.normal, rng.normal)`` where
``rng = np.random.default_rng()``.
"""
rvs: object = None
def _asdict(self):
# `dataclasses.asdict` deepcopies; we don't want that.
return dict(n_resamples=self.n_resamples, batch=self.batch,
rvs=self.rvs)
@dataclass
class PermutationMethod(ResamplingMethod):
"""Configuration information for a permutation hypothesis test.
Instances of this class can be passed into the `method` parameter of some
hypothesis test functions to perform a permutation version of the
hypothesis tests.
Attributes
----------
n_resamples : int, optional
The number of resamples to perform. Default is 9999.
batch : int, optional
The number of resamples to process in each vectorized call to
the statistic. Batch sizes >>1 tend to be faster when the statistic
is vectorized, but memory usage scales linearly with the batch size.
Default is ``None``, which processes all resamples in a single batch.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate resamples.
If `random_state` is already a ``Generator`` or ``RandomState``
instance, then that instance is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is ``None`` (default), the
`numpy.random.RandomState` singleton is used.
"""
random_state: object = None
def _asdict(self):
# `dataclasses.asdict` deepcopies; we don't want that.
return dict(n_resamples=self.n_resamples, batch=self.batch,
random_state=self.random_state)
@dataclass
class BootstrapMethod(ResamplingMethod):
"""Configuration information for a bootstrap confidence interval.
Instances of this class can be passed into the `method` parameter of some
confidence interval methods to generate a bootstrap confidence interval.
Attributes
----------
n_resamples : int, optional
The number of resamples to perform. Default is 9999.
batch : int, optional
The number of resamples to process in each vectorized call to
the statistic. Batch sizes >>1 tend to be faster when the statistic
is vectorized, but memory usage scales linearly with the batch size.
Default is ``None``, which processes all resamples in a single batch.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate resamples.
If `random_state` is already a ``Generator`` or ``RandomState``
instance, then that instance is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is ``None`` (default), the
`numpy.random.RandomState` singleton is used.
method : {'bca', 'percentile', 'basic'}
Whether to use the 'percentile' bootstrap ('percentile'), the 'basic'
(AKA 'reverse') bootstrap ('basic'), or the bias-corrected and
accelerated bootstrap ('BCa', default).
"""
random_state: object = None
method: str = 'BCa'
def _asdict(self):
# `dataclasses.asdict` deepcopies; we don't want that.
return dict(n_resamples=self.n_resamples, batch=self.batch,
random_state=self.random_state, method=self.method)
| 80,044
| 42.314394
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/_boost/setup.py
|
import pathlib
def pre_build_hook(build_ext, ext):
from scipy._build_utils.compiler_helper import get_cxx_std_flag
std_flag = get_cxx_std_flag(build_ext._cxx_compiler)
if std_flag is not None:
ext.extra_compile_args.append(std_flag)
def configuration(parent_package='', top_path=None):
from scipy._lib._boost_utils import _boost_dir
from scipy._build_utils import import_file
from numpy.distutils.misc_util import Configuration
import numpy as np
config = Configuration('_boost', parent_package, top_path)
DEFINES = [
# return nan instead of throwing
('DBOOST_MATH_STANDALONE', '1'),
('BOOST_MATH_DOMAIN_ERROR_POLICY', 'ignore_error'),
('BOOST_MATH_EVALUATION_ERROR_POLICY', 'user_error'),
('BOOST_MATH_OVERFLOW_ERROR_POLICY', 'user_error'),
('BOOST_MATH_PROMOTE_DOUBLE_POLICY', 'false')
]
INCLUDES = [
'include/',
'src/',
np.get_include(),
_boost_dir(),
]
# generate the PXD and PYX wrappers
boost_dir = pathlib.Path(__file__).parent
src_dir = boost_dir / 'src'
_klass_mapper = import_file(boost_dir / 'include', '_info')._klass_mapper
for s in _klass_mapper.values():
ext = config.add_extension(
f'{s.scipy_name}_ufunc',
sources=[f'{src_dir}/{s.scipy_name}_ufunc.cxx'],
include_dirs=INCLUDES,
define_macros=DEFINES,
language='c++',
depends=[
'include/func_defs.hpp',
'include/Templated_PyUFunc.hpp',
],
)
# Add c++11/14 support:
ext._pre_build_hook = pre_build_hook
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,818
| 30.362069
| 77
|
py
|
scipy
|
scipy-main/scipy/stats/_boost/__init__.py
|
from scipy.stats._boost.beta_ufunc import (
_beta_pdf, _beta_cdf, _beta_sf, _beta_ppf,
_beta_isf, _beta_mean, _beta_variance,
_beta_skewness, _beta_kurtosis_excess,
)
from scipy.stats._boost.binom_ufunc import (
_binom_pdf, _binom_cdf, _binom_sf, _binom_ppf,
_binom_isf, _binom_mean, _binom_variance,
_binom_skewness, _binom_kurtosis_excess,
)
from scipy.stats._boost.nbinom_ufunc import (
_nbinom_pdf, _nbinom_cdf, _nbinom_sf, _nbinom_ppf,
_nbinom_isf, _nbinom_mean, _nbinom_variance,
_nbinom_skewness, _nbinom_kurtosis_excess,
)
from scipy.stats._boost.hypergeom_ufunc import (
_hypergeom_pdf, _hypergeom_cdf, _hypergeom_sf, _hypergeom_ppf,
_hypergeom_isf, _hypergeom_mean, _hypergeom_variance,
_hypergeom_skewness, _hypergeom_kurtosis_excess,
)
from scipy.stats._boost.ncf_ufunc import (
_ncf_pdf, _ncf_cdf, _ncf_sf, _ncf_ppf,
_ncf_isf, _ncf_mean, _ncf_variance,
_ncf_skewness, _ncf_kurtosis_excess,
)
from scipy.stats._boost.ncx2_ufunc import (
_ncx2_pdf, _ncx2_cdf, _ncx2_sf, _ncx2_ppf,
_ncx2_isf, _ncx2_mean, _ncx2_variance,
_ncx2_skewness, _ncx2_kurtosis_excess,
)
from scipy.stats._boost.nct_ufunc import (
_nct_pdf, _nct_cdf, _nct_sf, _nct_ppf,
_nct_isf, _nct_mean, _nct_variance,
_nct_skewness, _nct_kurtosis_excess,
)
from scipy.stats._boost.skewnorm_ufunc import (
_skewnorm_pdf, _skewnorm_cdf, _skewnorm_sf, _skewnorm_ppf,
_skewnorm_isf, _skewnorm_mean, _skewnorm_variance,
_skewnorm_skewness, _skewnorm_kurtosis_excess,
)
from scipy.stats._boost.invgauss_ufunc import (
_invgauss_pdf, _invgauss_cdf, _invgauss_sf, _invgauss_ppf,
_invgauss_isf, _invgauss_mean, _invgauss_variance,
_invgauss_skewness, _invgauss_kurtosis_excess,
)
| 1,759
| 31.592593
| 66
|
py
|
scipy
|
scipy-main/scipy/stats/_boost/include/gen_func_defs_pxd.py
|
'''Generate func_defs.pxd'''
import pathlib
def _gen_func_defs_pxd(outfile, x_funcs, no_x_funcs, max_num_inputs=4):
'''
Cython does not support template parameter packs, so to keep it
from freaking out, we'll manually produce all the different template
expansions we need to call in the cython wrappers.
'''
contents = ('# This file was generated by stats/_boost/include/'
'_gen_func_defs_pxd.py\n')
contents += '# All modifications to this file will be overwritten.\n'
hdr = str((pathlib.Path(__file__).parent / "func_defs.hpp").as_posix())
contents += f'cdef extern from "{hdr}" namespace "" nogil:\n'
for ii in range(1, max_num_inputs+1):
template_args = ', '.join(f'T{jj} arg{jj}' for jj in range(1, ii+1))
template_types = ', '.join(f'T{jj}' for jj in range(1, ii+1))
# for all the different "overloads", we need to produce a
# distinct Cython reference;
# assumes that all number template types are the same,
# i.e. RealType == T1 == T2 == etc
for func in x_funcs:
fname = f'boost_{func}'
tmpl = f'Dist, RealType, {template_types}'
contents += (f' RealType {fname}{ii} "{fname}" '
f'[{tmpl}](RealType x, {template_args})\n')
for func in no_x_funcs:
fname = f'boost_{func}'
tmpl = f'Dist, RealType, {template_types}'
contents += (f' RealType {fname}{ii} "{fname}" '
f'[{tmpl}]({template_args})\n')
# patch for boost::math::beta_distibution to handle x = 0 when a < 1
# and x = 1 when beta < 1
contents += (' RealType boost_pdf_beta2 "boost_pdf_beta" '
'[Dist, RealType, T1, T2](RealType x, T1 a, T2 b)')
with open(outfile, 'w') as fp:
fp.write(contents)
| 1,859
| 40.333333
| 76
|
py
|
scipy
|
scipy-main/scipy/stats/_boost/include/code_gen.py
|
'''Generate Cython PYX wrappers for Boost stats distributions.'''
from typing import NamedTuple
from warnings import warn
from textwrap import dedent
from shutil import copyfile
import pathlib
import argparse
from gen_func_defs_pxd import ( # type: ignore
_gen_func_defs_pxd)
from _info import ( # type: ignore
_x_funcs, _no_x_funcs, _klass_mapper)
class _MethodDef(NamedTuple):
ufunc_name: str
num_inputs: int
boost_func_name: str
def _ufunc_gen(scipy_dist: str, types: list, ctor_args: tuple,
filename: str, boost_dist: str, x_funcs: list,
no_x_funcs: list, distutils_build: bool):
'''
We need methods defined for each rv_continuous/_discrete internal method:
i.e.: _pdf, _cdf, etc.
Some of these methods take constructor arguments and 1 extra argument,
e.g.: _pdf(x, *ctor_args), _ppf(q, *ctor_args)
while some of the methods take only constructor arguments:
e.g.: _stats(*ctor_args)
'''
num_ctor_args = len(ctor_args)
methods = [_MethodDef(
ufunc_name=f'_{scipy_dist}_{x_func}',
num_inputs=num_ctor_args+1, # +1 for the x argument
# PDF for the beta distribution has a custom wrapper:
boost_func_name=x_func if boost_dist != 'beta_distribution'
else 'pdf_beta' if x_func == 'pdf' else x_func,
) for x_func in x_funcs]
methods += [_MethodDef(
ufunc_name=f'_{scipy_dist}_{func}',
num_inputs=num_ctor_args,
boost_func_name=func,
) for func in no_x_funcs]
# Identify potential ufunc issues:
no_input_methods = [m for m in methods if m.num_inputs == 0]
if no_input_methods:
raise ValueError("ufuncs must have >0 arguments! "
f"Cannot construct these ufuncs: {no_input_methods}")
boost_hdr_name = boost_dist.split('_distribution')[0]
unique_num_inputs = set({m.num_inputs for m in methods})
has_NPY_FLOAT16 = 'NPY_FLOAT16' in types
line_joiner = ',\n ' + ' '*12
num_types = len(types)
loop_fun = 'PyUFunc_T'
func_defs_cimports = line_joiner.join(
f"boost_{m.boost_func_name}{num_ctor_args}" for m in methods)
nontype_params = line_joiner[1:].join(
f'ctypedef int NINPUTS{n} "{n}"' for n in unique_num_inputs)
with open(filename, 'w') as fp:
boost_hdr = f'boost/math/distributions/{boost_hdr_name}.hpp'
if distutils_build:
# There's no __init__.py here, so no `from .xxx cimport`
relimport = ''
else:
relimport = '.'
fp.write(dedent(f'''\
# distutils: language = c++
# cython: language_level=3
# This file was generated by stats/_boost/include/code_gen.py
# All modifications to this file will be overwritten.
from numpy cimport (
import_array,
import_ufunc,
PyUFunc_FromFuncAndData,
PyUFuncGenericFunction,
PyUFunc_None,
{line_joiner.join(types)}
)
from {relimport}templated_pyufunc cimport PyUFunc_T
from {relimport}func_defs cimport (
{func_defs_cimports},
)
cdef extern from "{boost_hdr}" namespace "boost::math" nogil:
cdef cppclass {boost_dist} nogil:
pass
# Workaround for Cython's lack of non-type template parameter
# support
cdef extern from * nogil:
{nontype_params}
_DUMMY = ""
import_array()
import_ufunc()
'''))
if has_NPY_FLOAT16:
warn('Boost stats NPY_FLOAT16 ufunc generation not '
'currently not supported!')
# Generate ufuncs for each method
for ii, m in enumerate(methods):
fp.write(dedent(f'''
cdef PyUFuncGenericFunction loop_func{ii}[{num_types}]
cdef void* func{ii}[1*{num_types}]
cdef char types{ii}[{m.num_inputs+1}*{num_types}]
''')) # m.num_inputs+1 for output arg
for jj, T in enumerate(types):
ctype = {
'NPY_DOUBLE': 'double',
'NPY_FLOAT': 'float',
'NPY_FLOAT16': 'npy_half',
}[T]
boost_fun = f'boost_{m.boost_func_name}{num_ctor_args}'
type_str = ", ".join([ctype]*(1+num_ctor_args))
boost_tmpl = f'{boost_dist}, {type_str}'
N = m.num_inputs
fp.write(f'''\
loop_func{ii}[{jj}] = <PyUFuncGenericFunction>{loop_fun}[{ctype}, NINPUTS{N}]
func{ii}[{jj}] = <void*>{boost_fun}[{boost_tmpl}]
''')
for tidx in range(m.num_inputs+1):
fp.write(
f'types{ii}[{tidx}+{jj}*{m.num_inputs+1}] = {T}\n')
arg_list_str = ', '.join(ctor_args)
if m.boost_func_name in x_funcs:
arg_list_str = 'x, ' + arg_list_str
fp.write(dedent(f'''
{m.ufunc_name} = PyUFunc_FromFuncAndData(
loop_func{ii},
func{ii},
types{ii},
{num_types}, # number of supported input types
{m.num_inputs}, # number of input args
1, # number of output args
PyUFunc_None, # `identity` element, never mind this
"{m.ufunc_name}", # function name
("{m.ufunc_name}({arg_list_str}) -> computes "
"{m.boost_func_name} of {scipy_dist} distribution"),
0 # unused
)
'''))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
parser.add_argument("--distutils-build", type=bool, default=False,
help="Whether or not this is a distutils build")
args = parser.parse_args()
_boost_dir = pathlib.Path(__file__).resolve().parent.parent
if args.outdir:
src_dir = pathlib.Path(args.outdir)
else:
# We're using setup.py here, not Meson. Create target directory
src_dir = _boost_dir / 'src'
src_dir.mkdir(exist_ok=True, parents=True)
# copy contents of include into directory to satisfy Cython
# PXD include conditions
inc_dir = _boost_dir / 'include'
src = 'templated_pyufunc.pxd'
copyfile(inc_dir / src, src_dir / src)
# generate the PXD and PYX wrappers
_gen_func_defs_pxd(
f'{src_dir}/func_defs.pxd',
x_funcs=_x_funcs,
no_x_funcs=_no_x_funcs)
float_types = ['NPY_FLOAT', 'NPY_DOUBLE']
for b, s in _klass_mapper.items():
_ufunc_gen(
scipy_dist=s.scipy_name,
types=float_types,
ctor_args=s.ctor_args,
filename=f'{src_dir}/{s.scipy_name}_ufunc.pyx',
boost_dist=f'{b}_distribution',
x_funcs=_x_funcs,
no_x_funcs=_no_x_funcs,
distutils_build=args.distutils_build,
)
| 7,233
| 36.481865
| 78
|
py
|
scipy
|
scipy-main/scipy/stats/_boost/include/_info.py
|
from typing import NamedTuple
class _KlassMap(NamedTuple):
scipy_name: str
ctor_args: tuple
# map boost stats classes to scipy class names and
# constructor arguments; b -> (s, ('ctor', 'args', ...))
_klass_mapper = {
'beta': _KlassMap('beta', ('a', 'b')),
'binomial': _KlassMap('binom', ('n', 'p')),
'negative_binomial': _KlassMap('nbinom', ('n', 'p')),
'hypergeometric': _KlassMap('hypergeom', ('r', 'n', 'N')),
'non_central_f': _KlassMap('ncf', ('dfn', 'dfd', 'nc')),
'non_central_chi_squared': _KlassMap('ncx2', ('df', 'nc')),
'non_central_t': _KlassMap('nct', ('df', 'nc')),
'skew_normal': _KlassMap('skewnorm', ('loc', 'scale', 'a',)),
'inverse_gaussian': _KlassMap('invgauss', ('mu', 'mean')),
}
# functions that take ctor params and parameter "x"
_x_funcs = ('pdf', 'cdf', 'sf', 'ppf', 'isf')
# functions that take only ctor params
_no_x_funcs = ('mean', 'variance', 'skewness', 'kurtosis_excess')
| 956
| 33.178571
| 65
|
py
|
scipy
|
scipy-main/scipy/stats/_levy_stable/setup.py
|
from os.path import join
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_levy_stable', parent_package, top_path)
config.add_library(
'_levyst',
sources=[join('c_src', 'levyst.c')],
headers=[join('c_src', 'levyst.h')]
)
config.add_extension(
'levyst',
libraries=['_levyst'],
sources=['levyst.c']
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 594
| 22.8
| 68
|
py
|
scipy
|
scipy-main/scipy/stats/_levy_stable/__init__.py
|
#
import warnings
from functools import partial
import numpy as np
from scipy import optimize
from scipy import integrate
from scipy.integrate._quadrature import _builtincoeffs
from scipy import interpolate
from scipy.interpolate import RectBivariateSpline
import scipy.special as sc
from scipy._lib._util import _lazywhere
from .._distn_infrastructure import rv_continuous, _ShapeInfo
from .._continuous_distns import uniform, expon, _norm_pdf, _norm_cdf
from .levyst import Nolan
from scipy._lib.doccer import inherit_docstring_from
__all__ = ["levy_stable", "levy_stable_gen", "pdf_from_cf_with_fft"]
# Stable distributions are known for various parameterisations
# some being advantageous for numerical considerations and others
# useful due to their location/scale awareness.
#
# Here we follow [NO] convention (see the references in the docstring
# for levy_stable_gen below).
#
# S0 / Z0 / x0 (aka Zoleterav's M)
# S1 / Z1 / x1
#
# Where S* denotes parameterisation, Z* denotes standardized
# version where gamma = 1, delta = 0 and x* denotes variable.
#
# Scipy's original Stable was a random variate generator. It
# uses S1 and unfortunately is not a location/scale aware.
# default numerical integration tolerance
# used for epsrel in piecewise and both epsrel and epsabs in dni
# (epsabs needed in dni since weighted quad requires epsabs > 0)
_QUAD_EPS = 1.2e-14
def _Phi_Z0(alpha, t):
return (
-np.tan(np.pi * alpha / 2) * (np.abs(t) ** (1 - alpha) - 1)
if alpha != 1
else -2.0 * np.log(np.abs(t)) / np.pi
)
def _Phi_Z1(alpha, t):
return (
np.tan(np.pi * alpha / 2)
if alpha != 1
else -2.0 * np.log(np.abs(t)) / np.pi
)
def _cf(Phi, t, alpha, beta):
"""Characteristic function."""
return np.exp(
-(np.abs(t) ** alpha) * (1 - 1j * beta * np.sign(t) * Phi(alpha, t))
)
_cf_Z0 = partial(_cf, _Phi_Z0)
_cf_Z1 = partial(_cf, _Phi_Z1)
def _pdf_single_value_cf_integrate(Phi, x, alpha, beta, **kwds):
"""To improve DNI accuracy convert characteristic function in to real
valued integral using Euler's formula, then exploit cosine symmetry to
change limits to [0, inf). Finally use cosine addition formula to split
into two parts that can be handled by weighted quad pack.
"""
quad_eps = kwds.get("quad_eps", _QUAD_EPS)
def integrand1(t):
if t == 0:
return 0
return np.exp(-(t ** alpha)) * (
np.cos(beta * (t ** alpha) * Phi(alpha, t))
)
def integrand2(t):
if t == 0:
return 0
return np.exp(-(t ** alpha)) * (
np.sin(beta * (t ** alpha) * Phi(alpha, t))
)
with np.errstate(invalid="ignore"):
int1, *ret1 = integrate.quad(
integrand1,
0,
np.inf,
weight="cos",
wvar=x,
limit=1000,
epsabs=quad_eps,
epsrel=quad_eps,
full_output=1,
)
int2, *ret2 = integrate.quad(
integrand2,
0,
np.inf,
weight="sin",
wvar=x,
limit=1000,
epsabs=quad_eps,
epsrel=quad_eps,
full_output=1,
)
return (int1 + int2) / np.pi
_pdf_single_value_cf_integrate_Z0 = partial(
_pdf_single_value_cf_integrate, _Phi_Z0
)
_pdf_single_value_cf_integrate_Z1 = partial(
_pdf_single_value_cf_integrate, _Phi_Z1
)
def _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta):
"""Round x close to zeta for Nolan's method in [NO]."""
# "8. When |x0-beta*tan(pi*alpha/2)| is small, the
# computations of the density and cumulative have numerical problems.
# The program works around this by setting
# z = beta*tan(pi*alpha/2) when
# |z-beta*tan(pi*alpha/2)| < tol(5)*alpha**(1/alpha).
# (The bound on the right is ad hoc, to get reasonable behavior
# when alpha is small)."
# where tol(5) = 0.5e-2 by default.
#
# We seem to have partially addressed this through re-expression of
# g(theta) here, but it still needs to be used in some extreme cases.
# Perhaps tol(5) = 0.5e-2 could be reduced for our implementation.
if np.abs(x0 - zeta) < x_tol_near_zeta * alpha ** (1 / alpha):
x0 = zeta
return x0
def _nolan_round_difficult_input(
x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one
):
"""Round difficult input values for Nolan's method in [NO]."""
# following Nolan's STABLE,
# "1. When 0 < |alpha-1| < 0.005, the program has numerical problems
# evaluating the pdf and cdf. The current version of the program sets
# alpha=1 in these cases. This approximation is not bad in the S0
# parameterization."
if np.abs(alpha - 1) < alpha_tol_near_one:
alpha = 1.0
# "2. When alpha=1 and |beta| < 0.005, the program has numerical
# problems. The current version sets beta=0."
# We seem to have addressed this through re-expression of g(theta) here
x0 = _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta)
return x0, alpha, beta
def _pdf_single_value_piecewise_Z1(x, alpha, beta, **kwds):
# convert from Nolan's S_1 (aka S) to S_0 (aka Zolaterev M)
# parameterization
zeta = -beta * np.tan(np.pi * alpha / 2.0)
x0 = x + zeta if alpha != 1 else x
return _pdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds)
def _pdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds):
quad_eps = kwds.get("quad_eps", _QUAD_EPS)
x_tol_near_zeta = kwds.get("piecewise_x_tol_near_zeta", 0.005)
alpha_tol_near_one = kwds.get("piecewise_alpha_tol_near_one", 0.005)
zeta = -beta * np.tan(np.pi * alpha / 2.0)
x0, alpha, beta = _nolan_round_difficult_input(
x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one
)
# some other known distribution pdfs / analytical cases
# TODO: add more where possible with test coverage,
# eg https://en.wikipedia.org/wiki/Stable_distribution#Other_analytic_cases
if alpha == 2.0:
# normal
return _norm_pdf(x0 / np.sqrt(2)) / np.sqrt(2)
elif alpha == 0.5 and beta == 1.0:
# levy
# since S(1/2, 1, gamma, delta; <x>) ==
# S(1/2, 1, gamma, gamma + delta; <x0>).
_x = x0 + 1
if _x <= 0:
return 0
return 1 / np.sqrt(2 * np.pi * _x) / _x * np.exp(-1 / (2 * _x))
elif alpha == 0.5 and beta == 0.0 and x0 != 0:
# analytical solution [HO]
S, C = sc.fresnel([1 / np.sqrt(2 * np.pi * np.abs(x0))])
arg = 1 / (4 * np.abs(x0))
return (
np.sin(arg) * (0.5 - S[0]) + np.cos(arg) * (0.5 - C[0])
) / np.sqrt(2 * np.pi * np.abs(x0) ** 3)
elif alpha == 1.0 and beta == 0.0:
# cauchy
return 1 / (1 + x0 ** 2) / np.pi
return _pdf_single_value_piecewise_post_rounding_Z0(
x0, alpha, beta, quad_eps, x_tol_near_zeta
)
def _pdf_single_value_piecewise_post_rounding_Z0(x0, alpha, beta, quad_eps, x_tol_near_zeta):
"""Calculate pdf using Nolan's methods as detailed in [NO].
"""
_nolan = Nolan(alpha, beta, x0)
zeta = _nolan.zeta
xi = _nolan.xi
c2 = _nolan.c2
g = _nolan.g
# round x0 to zeta again if needed. zeta was recomputed and may have
# changed due to floating point differences.
# See https://github.com/scipy/scipy/pull/18133
x0 = _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta)
# handle Nolan's initial case logic
if x0 == zeta:
return (
sc.gamma(1 + 1 / alpha)
* np.cos(xi)
/ np.pi
/ ((1 + zeta ** 2) ** (1 / alpha / 2))
)
elif x0 < zeta:
return _pdf_single_value_piecewise_post_rounding_Z0(
-x0, alpha, -beta, quad_eps, x_tol_near_zeta
)
# following Nolan, we may now assume
# x0 > zeta when alpha != 1
# beta != 0 when alpha == 1
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi / 2, rtol=1e-014, atol=1e-014):
return 0.0
def integrand(theta):
# limit any numerical issues leading to g_1 < 0 near theta limits
g_1 = g(theta)
if not np.isfinite(g_1) or g_1 < 0:
g_1 = 0
return g_1 * np.exp(-g_1)
with np.errstate(all="ignore"):
peak = optimize.bisect(
lambda t: g(t) - 1, -xi, np.pi / 2, xtol=quad_eps
)
# this integrand can be very peaked, so we need to force
# QUADPACK to evaluate the function inside its support
#
# lastly, we add additional samples at
# ~exp(-100), ~exp(-10), ~exp(-5), ~exp(-1)
# to improve QUADPACK's detection of rapidly descending tail behavior
# (this choice is fairly ad hoc)
tail_points = [
optimize.bisect(lambda t: g(t) - exp_height, -xi, np.pi / 2)
for exp_height in [100, 10, 5]
# exp_height = 1 is handled by peak
]
intg_points = [0, peak] + tail_points
intg, *ret = integrate.quad(
integrand,
-xi,
np.pi / 2,
points=intg_points,
limit=100,
epsrel=quad_eps,
epsabs=0,
full_output=1,
)
return c2 * intg
def _cdf_single_value_piecewise_Z1(x, alpha, beta, **kwds):
# convert from Nolan's S_1 (aka S) to S_0 (aka Zolaterev M)
# parameterization
zeta = -beta * np.tan(np.pi * alpha / 2.0)
x0 = x + zeta if alpha != 1 else x
return _cdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds)
def _cdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds):
quad_eps = kwds.get("quad_eps", _QUAD_EPS)
x_tol_near_zeta = kwds.get("piecewise_x_tol_near_zeta", 0.005)
alpha_tol_near_one = kwds.get("piecewise_alpha_tol_near_one", 0.005)
zeta = -beta * np.tan(np.pi * alpha / 2.0)
x0, alpha, beta = _nolan_round_difficult_input(
x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one
)
# some other known distribution cdfs / analytical cases
# TODO: add more where possible with test coverage,
# eg https://en.wikipedia.org/wiki/Stable_distribution#Other_analytic_cases
if alpha == 2.0:
# normal
return _norm_cdf(x0 / np.sqrt(2))
elif alpha == 0.5 and beta == 1.0:
# levy
# since S(1/2, 1, gamma, delta; <x>) ==
# S(1/2, 1, gamma, gamma + delta; <x0>).
_x = x0 + 1
if _x <= 0:
return 0
return sc.erfc(np.sqrt(0.5 / _x))
elif alpha == 1.0 and beta == 0.0:
# cauchy
return 0.5 + np.arctan(x0) / np.pi
return _cdf_single_value_piecewise_post_rounding_Z0(
x0, alpha, beta, quad_eps, x_tol_near_zeta
)
def _cdf_single_value_piecewise_post_rounding_Z0(x0, alpha, beta, quad_eps, x_tol_near_zeta):
"""Calculate cdf using Nolan's methods as detailed in [NO].
"""
_nolan = Nolan(alpha, beta, x0)
zeta = _nolan.zeta
xi = _nolan.xi
c1 = _nolan.c1
# c2 = _nolan.c2
c3 = _nolan.c3
g = _nolan.g
# round x0 to zeta again if needed. zeta was recomputed and may have
# changed due to floating point differences.
# See https://github.com/scipy/scipy/pull/18133
x0 = _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta)
# handle Nolan's initial case logic
if (alpha == 1 and beta < 0) or x0 < zeta:
# NOTE: Nolan's paper has a typo here!
# He states F(x) = 1 - F(x, alpha, -beta), but this is clearly
# incorrect since F(-infty) would be 1.0 in this case
# Indeed, the alpha != 1, x0 < zeta case is correct here.
return 1 - _cdf_single_value_piecewise_post_rounding_Z0(
-x0, alpha, -beta, quad_eps, x_tol_near_zeta
)
elif x0 == zeta:
return 0.5 - xi / np.pi
# following Nolan, we may now assume
# x0 > zeta when alpha != 1
# beta > 0 when alpha == 1
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi / 2, rtol=1e-014, atol=1e-014):
return c1
def integrand(theta):
g_1 = g(theta)
return np.exp(-g_1)
with np.errstate(all="ignore"):
# shrink supports where required
left_support = -xi
right_support = np.pi / 2
if alpha > 1:
# integrand(t) monotonic 0 to 1
if integrand(-xi) != 0.0:
res = optimize.minimize(
integrand,
(-xi,),
method="L-BFGS-B",
bounds=[(-xi, np.pi / 2)],
)
left_support = res.x[0]
else:
# integrand(t) monotonic 1 to 0
if integrand(np.pi / 2) != 0.0:
res = optimize.minimize(
integrand,
(np.pi / 2,),
method="L-BFGS-B",
bounds=[(-xi, np.pi / 2)],
)
right_support = res.x[0]
intg, *ret = integrate.quad(
integrand,
left_support,
right_support,
points=[left_support, right_support],
limit=100,
epsrel=quad_eps,
epsabs=0,
full_output=1,
)
return c1 + c3 * intg
def _rvs_Z1(alpha, beta, size=None, random_state=None):
"""Simulate random variables using Nolan's methods as detailed in [NO].
"""
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (
2
/ np.pi
* (
(np.pi / 2 + bTH) * tanTH
- beta * np.log((np.pi / 2 * W * cosTH) / (np.pi / 2 + bTH))
)
)
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (
W
/ (cosTH / np.tan(aTH) + np.sin(TH))
* ((np.cos(aTH) + np.sin(aTH) * tanTH) / W) ** (1.0 / alpha)
)
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta * np.tan(np.pi * alpha / 2)
th0 = np.arctan(val0) / alpha
val3 = W / (cosTH / np.tan(alpha * (th0 + TH)) + np.sin(TH))
res3 = val3 * (
(
np.cos(aTH)
+ np.sin(aTH) * tanTH
- val0 * (np.sin(aTH) - np.cos(aTH) * tanTH)
)
/ W
) ** (1.0 / alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(
beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func,
f2=otherwise,
)
return res
alpha = np.broadcast_to(alpha, size)
beta = np.broadcast_to(beta, size)
TH = uniform.rvs(
loc=-np.pi / 2.0, scale=np.pi, size=size, random_state=random_state
)
W = expon.rvs(size=size, random_state=random_state)
aTH = alpha * TH
bTH = beta * TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(
alpha == 1,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func,
f2=alphanot1func,
)
return res
def _fitstart_S0(data):
alpha, beta, delta1, gamma = _fitstart_S1(data)
# Formulas for mapping parameters in S1 parameterization to
# those in S0 parameterization can be found in [NO]. Note that
# only delta changes.
if alpha != 1:
delta0 = delta1 + beta * gamma * np.tan(np.pi * alpha / 2.0)
else:
delta0 = delta1 + 2 * beta * gamma * np.log(gamma) / np.pi
return alpha, beta, delta0, gamma
def _fitstart_S1(data):
# We follow McCullock 1986 method - Simple Consistent Estimators
# of Stable Distribution Parameters
# fmt: off
# Table III and IV
nu_alpha_range = [2.439, 2.5, 2.6, 2.7, 2.8, 3, 3.2, 3.5, 4,
5, 6, 8, 10, 15, 25]
nu_beta_range = [0, 0.1, 0.2, 0.3, 0.5, 0.7, 1]
# table III - alpha = psi_1(nu_alpha, nu_beta)
alpha_table = np.array([
[2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000],
[1.916, 1.924, 1.924, 1.924, 1.924, 1.924, 1.924],
[1.808, 1.813, 1.829, 1.829, 1.829, 1.829, 1.829],
[1.729, 1.730, 1.737, 1.745, 1.745, 1.745, 1.745],
[1.664, 1.663, 1.663, 1.668, 1.676, 1.676, 1.676],
[1.563, 1.560, 1.553, 1.548, 1.547, 1.547, 1.547],
[1.484, 1.480, 1.471, 1.460, 1.448, 1.438, 1.438],
[1.391, 1.386, 1.378, 1.364, 1.337, 1.318, 1.318],
[1.279, 1.273, 1.266, 1.250, 1.210, 1.184, 1.150],
[1.128, 1.121, 1.114, 1.101, 1.067, 1.027, 0.973],
[1.029, 1.021, 1.014, 1.004, 0.974, 0.935, 0.874],
[0.896, 0.892, 0.884, 0.883, 0.855, 0.823, 0.769],
[0.818, 0.812, 0.806, 0.801, 0.780, 0.756, 0.691],
[0.698, 0.695, 0.692, 0.689, 0.676, 0.656, 0.597],
[0.593, 0.590, 0.588, 0.586, 0.579, 0.563, 0.513]]).T
# transpose because interpolation with `RectBivariateSpline` is with
# `nu_beta` as `x` and `nu_alpha` as `y`
# table IV - beta = psi_2(nu_alpha, nu_beta)
beta_table = np.array([
[0, 2.160, 1.000, 1.000, 1.000, 1.000, 1.000],
[0, 1.592, 3.390, 1.000, 1.000, 1.000, 1.000],
[0, 0.759, 1.800, 1.000, 1.000, 1.000, 1.000],
[0, 0.482, 1.048, 1.694, 1.000, 1.000, 1.000],
[0, 0.360, 0.760, 1.232, 2.229, 1.000, 1.000],
[0, 0.253, 0.518, 0.823, 1.575, 1.000, 1.000],
[0, 0.203, 0.410, 0.632, 1.244, 1.906, 1.000],
[0, 0.165, 0.332, 0.499, 0.943, 1.560, 1.000],
[0, 0.136, 0.271, 0.404, 0.689, 1.230, 2.195],
[0, 0.109, 0.216, 0.323, 0.539, 0.827, 1.917],
[0, 0.096, 0.190, 0.284, 0.472, 0.693, 1.759],
[0, 0.082, 0.163, 0.243, 0.412, 0.601, 1.596],
[0, 0.074, 0.147, 0.220, 0.377, 0.546, 1.482],
[0, 0.064, 0.128, 0.191, 0.330, 0.478, 1.362],
[0, 0.056, 0.112, 0.167, 0.285, 0.428, 1.274]]).T
# Table V and VII
# These are ordered with decreasing `alpha_range`; so we will need to
# reverse them as required by RectBivariateSpline.
alpha_range = [2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1,
1, 0.9, 0.8, 0.7, 0.6, 0.5][::-1]
beta_range = [0, 0.25, 0.5, 0.75, 1]
# Table V - nu_c = psi_3(alpha, beta)
nu_c_table = np.array([
[1.908, 1.908, 1.908, 1.908, 1.908],
[1.914, 1.915, 1.916, 1.918, 1.921],
[1.921, 1.922, 1.927, 1.936, 1.947],
[1.927, 1.930, 1.943, 1.961, 1.987],
[1.933, 1.940, 1.962, 1.997, 2.043],
[1.939, 1.952, 1.988, 2.045, 2.116],
[1.946, 1.967, 2.022, 2.106, 2.211],
[1.955, 1.984, 2.067, 2.188, 2.333],
[1.965, 2.007, 2.125, 2.294, 2.491],
[1.980, 2.040, 2.205, 2.435, 2.696],
[2.000, 2.085, 2.311, 2.624, 2.973],
[2.040, 2.149, 2.461, 2.886, 3.356],
[2.098, 2.244, 2.676, 3.265, 3.912],
[2.189, 2.392, 3.004, 3.844, 4.775],
[2.337, 2.634, 3.542, 4.808, 6.247],
[2.588, 3.073, 4.534, 6.636, 9.144]])[::-1].T
# transpose because interpolation with `RectBivariateSpline` is with
# `beta` as `x` and `alpha` as `y`
# Table VII - nu_zeta = psi_5(alpha, beta)
nu_zeta_table = np.array([
[0, 0.000, 0.000, 0.000, 0.000],
[0, -0.017, -0.032, -0.049, -0.064],
[0, -0.030, -0.061, -0.092, -0.123],
[0, -0.043, -0.088, -0.132, -0.179],
[0, -0.056, -0.111, -0.170, -0.232],
[0, -0.066, -0.134, -0.206, -0.283],
[0, -0.075, -0.154, -0.241, -0.335],
[0, -0.084, -0.173, -0.276, -0.390],
[0, -0.090, -0.192, -0.310, -0.447],
[0, -0.095, -0.208, -0.346, -0.508],
[0, -0.098, -0.223, -0.380, -0.576],
[0, -0.099, -0.237, -0.424, -0.652],
[0, -0.096, -0.250, -0.469, -0.742],
[0, -0.089, -0.262, -0.520, -0.853],
[0, -0.078, -0.272, -0.581, -0.997],
[0, -0.061, -0.279, -0.659, -1.198]])[::-1].T
# fmt: on
psi_1 = RectBivariateSpline(nu_beta_range, nu_alpha_range,
alpha_table, kx=1, ky=1, s=0)
def psi_1_1(nu_beta, nu_alpha):
return psi_1(nu_beta, nu_alpha) \
if nu_beta > 0 else psi_1(-nu_beta, nu_alpha)
psi_2 = RectBivariateSpline(nu_beta_range, nu_alpha_range,
beta_table, kx=1, ky=1, s=0)
def psi_2_1(nu_beta, nu_alpha):
return psi_2(nu_beta, nu_alpha) \
if nu_beta > 0 else -psi_2(-nu_beta, nu_alpha)
phi_3 = RectBivariateSpline(beta_range, alpha_range, nu_c_table,
kx=1, ky=1, s=0)
def phi_3_1(beta, alpha):
return phi_3(beta, alpha) if beta > 0 else phi_3(-beta, alpha)
phi_5 = RectBivariateSpline(beta_range, alpha_range, nu_zeta_table,
kx=1, ky=1, s=0)
def phi_5_1(beta, alpha):
return phi_5(beta, alpha) if beta > 0 else -phi_5(-beta, alpha)
# quantiles
p05 = np.percentile(data, 5)
p50 = np.percentile(data, 50)
p95 = np.percentile(data, 95)
p25 = np.percentile(data, 25)
p75 = np.percentile(data, 75)
nu_alpha = (p95 - p05) / (p75 - p25)
nu_beta = (p95 + p05 - 2 * p50) / (p95 - p05)
if nu_alpha >= 2.439:
eps = np.finfo(float).eps
alpha = np.clip(psi_1_1(nu_beta, nu_alpha)[0, 0], eps, 2.)
beta = np.clip(psi_2_1(nu_beta, nu_alpha)[0, 0], -1.0, 1.0)
else:
alpha = 2.0
beta = np.sign(nu_beta)
c = (p75 - p25) / phi_3_1(beta, alpha)[0, 0]
zeta = p50 + c * phi_5_1(beta, alpha)[0, 0]
delta = zeta-beta*c*np.tan(np.pi*alpha/2.) if alpha != 1. else zeta
return (alpha, beta, delta, c)
class levy_stable_gen(rv_continuous):
r"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l, cauchy, norm
Notes
-----
The distribution for `levy_stable` has characteristic function:
.. math::
\varphi(t, \alpha, \beta, c, \mu) =
e^{it\mu -|ct|^{\alpha}(1-i\beta\operatorname{sign}(t)\Phi(\alpha, t))}
where two different parameterizations are supported. The first :math:`S_1`:
.. math::
\Phi = \begin{cases}
\tan \left({\frac {\pi \alpha }{2}}\right)&\alpha \neq 1\\
-{\frac {2}{\pi }}\log |t|&\alpha =1
\end{cases}
The second :math:`S_0`:
.. math::
\Phi = \begin{cases}
-\tan \left({\frac {\pi \alpha }{2}}\right)(|ct|^{1-\alpha}-1)
&\alpha \neq 1\\
-{\frac {2}{\pi }}\log |ct|&\alpha =1
\end{cases}
The probability density function for `levy_stable` is:
.. math::
f(x) = \frac{1}{2\pi}\int_{-\infty}^\infty \varphi(t)e^{-ixt}\,dt
where :math:`-\infty < t < \infty`. This integral does not have a known
closed form.
`levy_stable` generalizes several distributions. Where possible, they
should be used instead. Specifically, when the shape parameters
assume the values in the table below, the corresponding equivalent
distribution should be used.
========= ======== ===========
``alpha`` ``beta`` Equivalent
========= ======== ===========
1/2 -1 `levy_l`
1/2 1 `levy`
1 0 `cauchy`
2 any `norm` (with ``scale=sqrt(2)``)
========= ======== ===========
Evaluation of the pdf uses Nolan's piecewise integration approach with the
Zolotarev :math:`M` parameterization by default. There is also the option
to use direct numerical integration of the standard parameterization of the
characteristic function or to evaluate by taking the FFT of the
characteristic function.
The default method can changed by setting the class variable
``levy_stable.pdf_default_method`` to one of 'piecewise' for Nolan's
approach, 'dni' for direct numerical integration, or 'fft-simpson' for the
FFT based approach. For the sake of backwards compatibility, the methods
'best' and 'zolotarev' are equivalent to 'piecewise' and the method
'quadrature' is equivalent to 'dni'.
The parameterization can be changed by setting the class variable
``levy_stable.parameterization`` to either 'S0' or 'S1'.
The default is 'S1'.
To improve performance of piecewise and direct numerical integration one
can specify ``levy_stable.quad_eps`` (defaults to 1.2e-14). This is used
as both the absolute and relative quadrature tolerance for direct numerical
integration and as the relative quadrature tolerance for the piecewise
method. One can also specify ``levy_stable.piecewise_x_tol_near_zeta``
(defaults to 0.005) for how close x is to zeta before it is considered the
same as x [NO]. The exact check is
``abs(x0 - zeta) < piecewise_x_tol_near_zeta*alpha**(1/alpha)``. One can
also specify ``levy_stable.piecewise_alpha_tol_near_one`` (defaults to
0.005) for how close alpha is to 1 before being considered equal to 1.
To increase accuracy of FFT calculation one can specify
``levy_stable.pdf_fft_grid_spacing`` (defaults to 0.001) and
``pdf_fft_n_points_two_power`` (defaults to None which means a value is
calculated that sufficiently covers the input range).
Further control over FFT calculation is available by setting
``pdf_fft_interpolation_degree`` (defaults to 3) for spline order and
``pdf_fft_interpolation_level`` for determining the number of points to use
in the Newton-Cotes formula when approximating the characteristic function
(considered experimental).
Evaluation of the cdf uses Nolan's piecewise integration approach with the
Zolatarev :math:`S_0` parameterization by default. There is also the option
to evaluate through integration of an interpolated spline of the pdf
calculated by means of the FFT method. The settings affecting FFT
calculation are the same as for pdf calculation. The default cdf method can
be changed by setting ``levy_stable.cdf_default_method`` to either
'piecewise' or 'fft-simpson'. For cdf calculations the Zolatarev method is
superior in accuracy, so FFT is disabled by default.
Fitting estimate uses quantile estimation method in [MC]. MLE estimation of
parameters in fit method uses this quantile estimate initially. Note that
MLE doesn't always converge if using FFT for pdf calculations; this will be
the case if alpha <= 1 where the FFT approach doesn't give good
approximations.
Any non-missing value for the attribute
``levy_stable.pdf_fft_min_points_threshold`` will set
``levy_stable.pdf_default_method`` to 'fft-simpson' if a valid
default method is not otherwise set.
.. warning::
For pdf calculations FFT calculation is considered experimental.
For cdf calculations FFT calculation is considered experimental. Use
Zolatarev's method instead (default).
%(after_notes)s
References
----------
.. [MC] McCulloch, J., 1986. Simple consistent estimators of stable
distribution parameters. Communications in Statistics - Simulation and
Computation 15, 11091136.
.. [WZ] Wang, Li and Zhang, Ji-Hong, 2008. Simpson's rule based FFT method
to compute densities of stable distribution.
.. [NO] Nolan, J., 1997. Numerical Calculation of Stable Densities and
distributions Functions.
.. [HO] Hopcraft, K. I., Jakeman, E., Tanner, R. M. J., 1999. Lévy random
walks with fluctuating step number and multiscale behavior.
%(example)s
"""
# Configurable options as class variables
# (accesible from self by attribute lookup).
parameterization = "S1"
pdf_default_method = "piecewise"
cdf_default_method = "piecewise"
quad_eps = _QUAD_EPS
piecewise_x_tol_near_zeta = 0.005
piecewise_alpha_tol_near_one = 0.005
pdf_fft_min_points_threshold = None
pdf_fft_grid_spacing = 0.001
pdf_fft_n_points_two_power = None
pdf_fft_interpolation_level = 3
pdf_fft_interpolation_degree = 3
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _shape_info(self):
ialpha = _ShapeInfo("alpha", False, (0, 2), (False, True))
ibeta = _ShapeInfo("beta", False, (-1, 1), (True, True))
return [ialpha, ibeta]
def _parameterization(self):
allowed = ("S0", "S1")
pz = self.parameterization
if pz not in allowed:
raise RuntimeError(
f"Parameterization '{pz}' in supported list: {allowed}"
)
return pz
@inherit_docstring_from(rv_continuous)
def rvs(self, *args, **kwds):
X1 = super().rvs(*args, **kwds)
discrete = kwds.pop("discrete", None) # noqa
rndm = kwds.pop("random_state", None) # noqa
(alpha, beta), delta, gamma, size = self._parse_args_rvs(*args, **kwds)
# shift location for this parameterisation (S1)
X1 = np.where(
alpha == 1.0, X1 + 2 * beta * gamma * np.log(gamma) / np.pi, X1
)
if self._parameterization() == "S0":
return np.where(
alpha == 1.0,
X1 - (beta * 2 * gamma * np.log(gamma) / np.pi),
X1 - gamma * beta * np.tan(np.pi * alpha / 2.0),
)
elif self._parameterization() == "S1":
return X1
def _rvs(self, alpha, beta, size=None, random_state=None):
return _rvs_Z1(alpha, beta, size, random_state)
@inherit_docstring_from(rv_continuous)
def pdf(self, x, *args, **kwds):
# override base class version to correct
# location for S1 parameterization
if self._parameterization() == "S0":
return super().pdf(x, *args, **kwds)
elif self._parameterization() == "S1":
(alpha, beta), delta, gamma = self._parse_args(*args, **kwds)
if np.all(np.reshape(alpha, (1, -1))[0, :] != 1):
return super().pdf(x, *args, **kwds)
else:
# correct location for this parameterisation
x = np.reshape(x, (1, -1))[0, :]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in), 1))
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
for pair in uniq_param_pairs:
_alpha, _beta = pair
_delta = (
delta + 2 * _beta * gamma * np.log(gamma) / np.pi
if _alpha == 1.0
else delta
)
data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
_x = data_in[data_mask, 0]
data_out[data_mask] = (
super()
.pdf(_x, _alpha, _beta, loc=_delta, scale=gamma)
.reshape(len(_x), 1)
)
output = data_out.T[0]
if output.shape == (1,):
return output[0]
return output
def _pdf(self, x, alpha, beta):
if self._parameterization() == "S0":
_pdf_single_value_piecewise = _pdf_single_value_piecewise_Z0
_pdf_single_value_cf_integrate = _pdf_single_value_cf_integrate_Z0
_cf = _cf_Z0
elif self._parameterization() == "S1":
_pdf_single_value_piecewise = _pdf_single_value_piecewise_Z1
_pdf_single_value_cf_integrate = _pdf_single_value_cf_integrate_Z1
_cf = _cf_Z1
x = np.asarray(x).reshape(1, -1)[0, :]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in), 1))
pdf_default_method_name = self.pdf_default_method
if pdf_default_method_name in ("piecewise", "best", "zolotarev"):
pdf_single_value_method = _pdf_single_value_piecewise
elif pdf_default_method_name in ("dni", "quadrature"):
pdf_single_value_method = _pdf_single_value_cf_integrate
elif (
pdf_default_method_name == "fft-simpson"
or self.pdf_fft_min_points_threshold is not None
):
pdf_single_value_method = None
pdf_single_value_kwds = {
"quad_eps": self.quad_eps,
"piecewise_x_tol_near_zeta": self.piecewise_x_tol_near_zeta,
"piecewise_alpha_tol_near_one": self.piecewise_alpha_tol_near_one,
}
fft_grid_spacing = self.pdf_fft_grid_spacing
fft_n_points_two_power = self.pdf_fft_n_points_two_power
fft_interpolation_level = self.pdf_fft_interpolation_level
fft_interpolation_degree = self.pdf_fft_interpolation_degree
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if pdf_single_value_method is not None:
data_out[data_mask] = np.array(
[
pdf_single_value_method(
_x, _alpha, _beta, **pdf_single_value_kwds
)
for _x, _alpha, _beta in data_subset
]
).reshape(len(data_subset), 1)
else:
warnings.warn(
"Density calculations experimental for FFT method."
+ " Use combination of piecewise and dni methods instead.",
RuntimeWarning,
)
_alpha, _beta = pair
_x = data_subset[:, (0,)]
if _alpha < 1.0:
raise RuntimeError(
"FFT method does not work well for alpha less than 1."
)
# need enough points to "cover" _x for interpolation
if fft_grid_spacing is None and fft_n_points_two_power is None:
raise ValueError(
"One of fft_grid_spacing or fft_n_points_two_power "
+ "needs to be set."
)
max_abs_x = np.max(np.abs(_x))
h = (
2 ** (3 - fft_n_points_two_power) * max_abs_x
if fft_grid_spacing is None
else fft_grid_spacing
)
q = (
np.ceil(np.log(2 * max_abs_x / h) / np.log(2)) + 2
if fft_n_points_two_power is None
else int(fft_n_points_two_power)
)
# for some parameters, the range of x can be quite
# large, let's choose an arbitrary cut off (8GB) to save on
# computer memory.
MAX_Q = 30
if q > MAX_Q:
raise RuntimeError(
"fft_n_points_two_power has a maximum "
+ f"value of {MAX_Q}"
)
density_x, density = pdf_from_cf_with_fft(
lambda t: _cf(t, _alpha, _beta),
h=h,
q=q,
level=fft_interpolation_level,
)
f = interpolate.InterpolatedUnivariateSpline(
density_x, np.real(density), k=fft_interpolation_degree
) # patch FFT to use cubic
data_out[data_mask] = f(_x)
return data_out.T[0]
@inherit_docstring_from(rv_continuous)
def cdf(self, x, *args, **kwds):
# override base class version to correct
# location for S1 parameterization
# NOTE: this is near identical to pdf() above
if self._parameterization() == "S0":
return super().cdf(x, *args, **kwds)
elif self._parameterization() == "S1":
(alpha, beta), delta, gamma = self._parse_args(*args, **kwds)
if np.all(np.reshape(alpha, (1, -1))[0, :] != 1):
return super().cdf(x, *args, **kwds)
else:
# correct location for this parameterisation
x = np.reshape(x, (1, -1))[0, :]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in), 1))
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
for pair in uniq_param_pairs:
_alpha, _beta = pair
_delta = (
delta + 2 * _beta * gamma * np.log(gamma) / np.pi
if _alpha == 1.0
else delta
)
data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
_x = data_in[data_mask, 0]
data_out[data_mask] = (
super()
.cdf(_x, _alpha, _beta, loc=_delta, scale=gamma)
.reshape(len(_x), 1)
)
output = data_out.T[0]
if output.shape == (1,):
return output[0]
return output
def _cdf(self, x, alpha, beta):
if self._parameterization() == "S0":
_cdf_single_value_piecewise = _cdf_single_value_piecewise_Z0
_cf = _cf_Z0
elif self._parameterization() == "S1":
_cdf_single_value_piecewise = _cdf_single_value_piecewise_Z1
_cf = _cf_Z1
x = np.asarray(x).reshape(1, -1)[0, :]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in), 1))
cdf_default_method_name = self.cdf_default_method
if cdf_default_method_name == "piecewise":
cdf_single_value_method = _cdf_single_value_piecewise
elif cdf_default_method_name == "fft-simpson":
cdf_single_value_method = None
cdf_single_value_kwds = {
"quad_eps": self.quad_eps,
"piecewise_x_tol_near_zeta": self.piecewise_x_tol_near_zeta,
"piecewise_alpha_tol_near_one": self.piecewise_alpha_tol_near_one,
}
fft_grid_spacing = self.pdf_fft_grid_spacing
fft_n_points_two_power = self.pdf_fft_n_points_two_power
fft_interpolation_level = self.pdf_fft_interpolation_level
fft_interpolation_degree = self.pdf_fft_interpolation_degree
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if cdf_single_value_method is not None:
data_out[data_mask] = np.array(
[
cdf_single_value_method(
_x, _alpha, _beta, **cdf_single_value_kwds
)
for _x, _alpha, _beta in data_subset
]
).reshape(len(data_subset), 1)
else:
warnings.warn(
"Cumulative density calculations experimental for FFT"
+ " method. Use piecewise method instead.",
RuntimeWarning,
)
_alpha, _beta = pair
_x = data_subset[:, (0,)]
# need enough points to "cover" _x for interpolation
if fft_grid_spacing is None and fft_n_points_two_power is None:
raise ValueError(
"One of fft_grid_spacing or fft_n_points_two_power "
+ "needs to be set."
)
max_abs_x = np.max(np.abs(_x))
h = (
2 ** (3 - fft_n_points_two_power) * max_abs_x
if fft_grid_spacing is None
else fft_grid_spacing
)
q = (
np.ceil(np.log(2 * max_abs_x / h) / np.log(2)) + 2
if fft_n_points_two_power is None
else int(fft_n_points_two_power)
)
density_x, density = pdf_from_cf_with_fft(
lambda t: _cf(t, _alpha, _beta),
h=h,
q=q,
level=fft_interpolation_level,
)
f = interpolate.InterpolatedUnivariateSpline(
density_x, np.real(density), k=fft_interpolation_degree
)
data_out[data_mask] = np.array(
[f.integral(self.a, x_1) for x_1 in _x]
).reshape(data_out[data_mask].shape)
return data_out.T[0]
def _fitstart(self, data):
if self._parameterization() == "S0":
_fitstart = _fitstart_S0
elif self._parameterization() == "S1":
_fitstart = _fitstart_S1
return _fitstart(data)
def _stats(self, alpha, beta):
mu = 0 if alpha > 1 else np.nan
mu2 = 2 if alpha == 2 else np.inf
g1 = 0.0 if alpha == 2.0 else np.nan
g2 = 0.0 if alpha == 2.0 else np.nan
return mu, mu2, g1, g2
# cotes numbers - see sequence from http://oeis.org/A100642
Cotes_table = np.array(
[[], [1]] + [v[2] for v in _builtincoeffs.values()], dtype=object
)
Cotes = np.array(
[
np.pad(r, (0, len(Cotes_table) - 1 - len(r)), mode='constant')
for r in Cotes_table
]
)
def pdf_from_cf_with_fft(cf, h=0.01, q=9, level=3):
"""Calculates pdf from characteristic function.
Uses fast Fourier transform with Newton-Cotes integration following [WZ].
Defaults to using Simpson's method (3-point Newton-Cotes integration).
Parameters
----------
cf : callable
Single argument function from float -> complex expressing a
characteristic function for some distribution.
h : Optional[float]
Step size for Newton-Cotes integration. Default: 0.01
q : Optional[int]
Use 2**q steps when peforming Newton-Cotes integration.
The infinite integral in the inverse Fourier transform will then
be restricted to the interval [-2**q * h / 2, 2**q * h / 2]. Setting
the number of steps equal to a power of 2 allows the fft to be
calculated in O(n*log(n)) time rather than O(n**2).
Default: 9
level : Optional[int]
Calculate integral using n-point Newton-Cotes integration for
n = level. The 3-point Newton-Cotes formula corresponds to Simpson's
rule. Default: 3
Returns
-------
x_l : ndarray
Array of points x at which pdf is estimated. 2**q equally spaced
points from -pi/h up to but not including pi/h.
density : ndarray
Estimated values of pdf corresponding to cf at points in x_l.
References
----------
.. [WZ] Wang, Li and Zhang, Ji-Hong, 2008. Simpson's rule based FFT method
to compute densities of stable distribution.
"""
n = level
N = 2**q
steps = np.arange(0, N)
L = N * h / 2
x_l = np.pi * (steps - N / 2) / L
if level > 1:
indices = np.arange(n).reshape(n, 1)
s1 = np.sum(
(-1) ** steps * Cotes[n, indices] * np.fft.fft(
(-1)**steps * cf(-L + h * steps + h * indices / (n - 1))
) * np.exp(
1j * np.pi * indices / (n - 1)
- 2 * 1j * np.pi * indices * steps /
(N * (n - 1))
),
axis=0
)
else:
s1 = (-1) ** steps * Cotes[n, 0] * np.fft.fft(
(-1) ** steps * cf(-L + h * steps)
)
density = h * s1 / (2 * np.pi * np.sum(Cotes[n]))
return (x_l, density)
levy_stable = levy_stable_gen(name="levy_stable")
| 44,613
| 35.810231
| 93
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_odds_ratio.py
|
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from .._discrete_distns import nchypergeom_fisher, hypergeom
from scipy.stats._odds_ratio import odds_ratio
from .data.fisher_exact_results_from_r import data
class TestOddsRatio:
@pytest.mark.parametrize('parameters, rresult', data)
def test_results_from_r(self, parameters, rresult):
alternative = parameters.alternative.replace('.', '-')
result = odds_ratio(parameters.table)
# The results computed by R are not very accurate.
if result.statistic < 400:
or_rtol = 5e-4
ci_rtol = 2e-2
else:
or_rtol = 5e-2
ci_rtol = 1e-1
assert_allclose(result.statistic,
rresult.conditional_odds_ratio, rtol=or_rtol)
ci = result.confidence_interval(parameters.confidence_level,
alternative)
assert_allclose((ci.low, ci.high), rresult.conditional_odds_ratio_ci,
rtol=ci_rtol)
# Also do a self-check for the conditional odds ratio.
# With the computed conditional odds ratio as the noncentrality
# parameter of the noncentral hypergeometric distribution with
# parameters table.sum(), table[0].sum(), and table[:,0].sum() as
# total, ngood and nsample, respectively, the mean of the distribution
# should equal table[0, 0].
cor = result.statistic
table = np.array(parameters.table)
total = table.sum()
ngood = table[0].sum()
nsample = table[:, 0].sum()
# nchypergeom_fisher does not allow the edge cases where the
# noncentrality parameter is 0 or inf, so handle those values
# separately here.
if cor == 0:
nchg_mean = hypergeom.support(total, ngood, nsample)[0]
elif cor == np.inf:
nchg_mean = hypergeom.support(total, ngood, nsample)[1]
else:
nchg_mean = nchypergeom_fisher.mean(total, ngood, nsample, cor)
assert_allclose(nchg_mean, table[0, 0], rtol=1e-13)
# Check that the confidence interval is correct.
alpha = 1 - parameters.confidence_level
if alternative == 'two-sided':
if ci.low > 0:
sf = nchypergeom_fisher.sf(table[0, 0] - 1,
total, ngood, nsample, ci.low)
assert_allclose(sf, alpha/2, rtol=1e-11)
if np.isfinite(ci.high):
cdf = nchypergeom_fisher.cdf(table[0, 0],
total, ngood, nsample, ci.high)
assert_allclose(cdf, alpha/2, rtol=1e-11)
elif alternative == 'less':
if np.isfinite(ci.high):
cdf = nchypergeom_fisher.cdf(table[0, 0],
total, ngood, nsample, ci.high)
assert_allclose(cdf, alpha, rtol=1e-11)
else:
# alternative == 'greater'
if ci.low > 0:
sf = nchypergeom_fisher.sf(table[0, 0] - 1,
total, ngood, nsample, ci.low)
assert_allclose(sf, alpha, rtol=1e-11)
@pytest.mark.parametrize('table', [
[[0, 0], [5, 10]],
[[5, 10], [0, 0]],
[[0, 5], [0, 10]],
[[5, 0], [10, 0]],
])
def test_row_or_col_zero(self, table):
result = odds_ratio(table)
assert_equal(result.statistic, np.nan)
ci = result.confidence_interval()
assert_equal((ci.low, ci.high), (0, np.inf))
@pytest.mark.parametrize("case",
[[0.95, 'two-sided', 0.4879913, 2.635883],
[0.90, 'two-sided', 0.5588516, 2.301663]])
def test_sample_odds_ratio_ci(self, case):
# Compare the sample odds ratio confidence interval to the R function
# oddsratio.wald from the epitools package, e.g.
# > library(epitools)
# > table = matrix(c(10, 20, 41, 93), nrow=2, ncol=2, byrow=TRUE)
# > result = oddsratio.wald(table)
# > result$measure
# odds ratio with 95% C.I.
# Predictor estimate lower upper
# Exposed1 1.000000 NA NA
# Exposed2 1.134146 0.4879913 2.635883
confidence_level, alternative, ref_low, ref_high = case
table = [[10, 20], [41, 93]]
result = odds_ratio(table, kind='sample')
assert_allclose(result.statistic, 1.134146, rtol=1e-6)
ci = result.confidence_interval(confidence_level, alternative)
assert_allclose([ci.low, ci.high], [ref_low, ref_high], rtol=1e-6)
@pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
def test_sample_odds_ratio_one_sided_ci(self, alternative):
# can't find a good reference for one-sided CI, so bump up the sample
# size and compare against the conditional odds ratio CI
table = [[1000, 2000], [4100, 9300]]
res = odds_ratio(table, kind='sample')
ref = odds_ratio(table, kind='conditional')
assert_allclose(res.statistic, ref.statistic, atol=1e-5)
assert_allclose(res.confidence_interval(alternative=alternative),
ref.confidence_interval(alternative=alternative),
atol=2e-3)
@pytest.mark.parametrize('kind', ['sample', 'conditional'])
@pytest.mark.parametrize('bad_table', [123, "foo", [10, 11, 12]])
def test_invalid_table_shape(self, kind, bad_table):
with pytest.raises(ValueError, match="Invalid shape"):
odds_ratio(bad_table, kind=kind)
def test_invalid_table_type(self):
with pytest.raises(ValueError, match='must be an array of integers'):
odds_ratio([[1.0, 3.4], [5.0, 9.9]])
def test_negative_table_values(self):
with pytest.raises(ValueError, match='must be nonnegative'):
odds_ratio([[1, 2], [3, -4]])
def test_invalid_kind(self):
with pytest.raises(ValueError, match='`kind` must be'):
odds_ratio([[10, 20], [30, 14]], kind='magnetoreluctance')
def test_invalid_alternative(self):
result = odds_ratio([[5, 10], [2, 32]])
with pytest.raises(ValueError, match='`alternative` must be'):
result.confidence_interval(alternative='depleneration')
@pytest.mark.parametrize('level', [-0.5, 1.5])
def test_invalid_confidence_level(self, level):
result = odds_ratio([[5, 10], [2, 32]])
with pytest.raises(ValueError, match='must be between 0 and 1'):
result.confidence_interval(confidence_level=level)
| 6,705
| 44.310811
| 78
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_survival.py
|
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from scipy import stats
from scipy.stats import _survival
def _kaplan_meier_reference(times, censored):
# This is a very straightforward implementation of the Kaplan-Meier
# estimator that does almost everything differently from the implementation
# in stats.ecdf.
# Begin by sorting the raw data. Note that the order of death and loss
# at a given time matters: death happens first. See [2] page 461:
# "These conventions may be paraphrased by saying that deaths recorded as
# of an age t are treated as if they occurred slightly before t, and losses
# recorded as of an age t are treated as occurring slightly after t."
# We implement this by sorting the data first by time, then by `censored`,
# (which is 0 when there is a death and 1 when there is only a loss).
dtype = [('time', float), ('censored', int)]
data = np.array([(t, d) for t, d in zip(times, censored)], dtype=dtype)
data = np.sort(data, order=('time', 'censored'))
times = data['time']
died = np.logical_not(data['censored'])
m = times.size
n = np.arange(m, 0, -1) # number at risk
sf = np.cumprod((n - died) / n)
# Find the indices of the *last* occurrence of unique times. The
# corresponding entries of `times` and `sf` are what we want.
_, indices = np.unique(times[::-1], return_index=True)
ref_times = times[-indices - 1]
ref_sf = sf[-indices - 1]
return ref_times, ref_sf
class TestSurvival:
@staticmethod
def get_random_sample(rng, n_unique):
# generate random sample
unique_times = rng.random(n_unique)
# convert to `np.int32` to resolve `np.repeat` failure in 32-bit CI
repeats = rng.integers(1, 4, n_unique).astype(np.int32)
times = rng.permuted(np.repeat(unique_times, repeats))
censored = rng.random(size=times.size) > rng.random()
sample = stats.CensoredData.right_censored(times, censored)
return sample, times, censored
def test_input_validation(self):
message = '`sample` must be a one-dimensional sequence.'
with pytest.raises(ValueError, match=message):
stats.ecdf([[1]])
with pytest.raises(ValueError, match=message):
stats.ecdf(1)
message = '`sample` must not contain nan'
with pytest.raises(ValueError, match=message):
stats.ecdf([np.nan])
message = 'Currently, only uncensored and right-censored data...'
with pytest.raises(NotImplementedError, match=message):
stats.ecdf(stats.CensoredData.left_censored([1], censored=[True]))
message = 'method` must be one of...'
res = stats.ecdf([1, 2, 3])
with pytest.raises(ValueError, match=message):
res.cdf.confidence_interval(method='ekki-ekki')
with pytest.raises(ValueError, match=message):
res.sf.confidence_interval(method='shrubbery')
message = 'confidence_level` must be a scalar between 0 and 1'
with pytest.raises(ValueError, match=message):
res.cdf.confidence_interval(-1)
with pytest.raises(ValueError, match=message):
res.sf.confidence_interval([0.5, 0.6])
message = 'The confidence interval is undefined at some observations.'
with pytest.warns(RuntimeWarning, match=message):
ci = res.cdf.confidence_interval()
message = 'Confidence interval bounds do not implement...'
with pytest.raises(NotImplementedError, match=message):
ci.low.confidence_interval()
with pytest.raises(NotImplementedError, match=message):
ci.high.confidence_interval()
def test_edge_cases(self):
res = stats.ecdf([])
assert_equal(res.cdf.quantiles, [])
assert_equal(res.cdf.probabilities, [])
res = stats.ecdf([1])
assert_equal(res.cdf.quantiles, [1])
assert_equal(res.cdf.probabilities, [1])
def test_unique(self):
# Example with unique observations; `stats.ecdf` ref. [1] page 80
sample = [6.23, 5.58, 7.06, 6.42, 5.20]
res = stats.ecdf(sample)
ref_x = np.sort(np.unique(sample))
ref_cdf = np.arange(1, 6) / 5
ref_sf = 1 - ref_cdf
assert_equal(res.cdf.quantiles, ref_x)
assert_equal(res.cdf.probabilities, ref_cdf)
assert_equal(res.sf.quantiles, ref_x)
assert_equal(res.sf.probabilities, ref_sf)
def test_nonunique(self):
# Example with non-unique observations; `stats.ecdf` ref. [1] page 82
sample = [0, 2, 1, 2, 3, 4]
res = stats.ecdf(sample)
ref_x = np.sort(np.unique(sample))
ref_cdf = np.array([1/6, 2/6, 4/6, 5/6, 1])
ref_sf = 1 - ref_cdf
assert_equal(res.cdf.quantiles, ref_x)
assert_equal(res.cdf.probabilities, ref_cdf)
assert_equal(res.sf.quantiles, ref_x)
assert_equal(res.sf.probabilities, ref_sf)
def test_evaluate_methods(self):
# Test CDF and SF `evaluate` methods
rng = np.random.default_rng(1162729143302572461)
sample, _, _ = self.get_random_sample(rng, 15)
res = stats.ecdf(sample)
x = res.cdf.quantiles
xr = x + np.diff(x, append=x[-1]+1)/2 # right shifted points
assert_equal(res.cdf.evaluate(x), res.cdf.probabilities)
assert_equal(res.cdf.evaluate(xr), res.cdf.probabilities)
assert_equal(res.cdf.evaluate(x[0]-1), 0) # CDF starts at 0
assert_equal(res.cdf.evaluate([-np.inf, np.inf]), [0, 1])
assert_equal(res.sf.evaluate(x), res.sf.probabilities)
assert_equal(res.sf.evaluate(xr), res.sf.probabilities)
assert_equal(res.sf.evaluate(x[0]-1), 1) # SF starts at 1
assert_equal(res.sf.evaluate([-np.inf, np.inf]), [1, 0])
# ref. [1] page 91
t1 = [37, 43, 47, 56, 60, 62, 71, 77, 80, 81] # times
d1 = [0, 0, 1, 1, 0, 0, 0, 1, 1, 1] # 1 means deaths (not censored)
r1 = [1, 1, 0.875, 0.75, 0.75, 0.75, 0.75, 0.5, 0.25, 0] # reference SF
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html # noqa
t2 = [8, 12, 26, 14, 21, 27, 8, 32, 20, 40]
d2 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]
r2 = [0.9, 0.788, 0.675, 0.675, 0.54, 0.405, 0.27, 0.27, 0.27]
t3 = [33, 28, 41, 48, 48, 25, 37, 48, 25, 43]
d3 = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
r3 = [1, 0.875, 0.75, 0.75, 0.6, 0.6, 0.6]
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/bs704_survival4.html # noqa
t4 = [24, 3, 11, 19, 24, 13, 14, 2, 18, 17,
24, 21, 12, 1, 10, 23, 6, 5, 9, 17]
d4 = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1]
r4 = [0.95, 0.95, 0.897, 0.844, 0.844, 0.844, 0.844, 0.844, 0.844,
0.844, 0.76, 0.676, 0.676, 0.676, 0.676, 0.507, 0.507]
# https://www.real-statistics.com/survival-analysis/kaplan-meier-procedure/confidence-interval-for-the-survival-function/ # noqa
t5 = [3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11]
d5 = [1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1]
r5 = [0.944, 0.889, 0.722, 0.542, 0.542, 0.542, 0.361, 0.181, 0.181, 0.181]
@pytest.mark.parametrize("case", [(t1, d1, r1), (t2, d2, r2), (t3, d3, r3),
(t4, d4, r4), (t5, d5, r5)])
def test_right_censored_against_examples(self, case):
# test `ecdf` against other implementations on example problems
times, died, ref = case
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
assert_allclose(res.sf.probabilities, ref, atol=1e-3)
assert_equal(res.sf.quantiles, np.sort(np.unique(times)))
# test reference implementation against other implementations
res = _kaplan_meier_reference(times, np.logical_not(died))
assert_equal(res[0], np.sort(np.unique(times)))
assert_allclose(res[1], ref, atol=1e-3)
@pytest.mark.parametrize('seed', [182746786639392128, 737379171436494115,
576033618403180168, 308115465002673650])
def test_right_censored_against_reference_implementation(self, seed):
# test `ecdf` against reference implementation on random problems
rng = np.random.default_rng(seed)
n_unique = rng.integers(10, 100)
sample, times, censored = self.get_random_sample(rng, n_unique)
res = stats.ecdf(sample)
ref = _kaplan_meier_reference(times, censored)
assert_allclose(res.sf.quantiles, ref[0])
assert_allclose(res.sf.probabilities, ref[1])
# If all observations are uncensored, the KM estimate should match
# the usual estimate for uncensored data
sample = stats.CensoredData(uncensored=times)
res = _survival._ecdf_right_censored(sample) # force Kaplan-Meier
ref = stats.ecdf(times)
assert_equal(res[0], ref.sf.quantiles)
assert_allclose(res[1], ref.cdf.probabilities, rtol=1e-14)
assert_allclose(res[2], ref.sf.probabilities, rtol=1e-14)
def test_right_censored_ci(self):
# test "greenwood" confidence interval against example 4 (URL above).
times, died = self.t4, self.d4
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
ref_allowance = [0.096, 0.096, 0.135, 0.162, 0.162, 0.162, 0.162,
0.162, 0.162, 0.162, 0.214, 0.246, 0.246, 0.246,
0.246, 0.341, 0.341]
sf_ci = res.sf.confidence_interval()
cdf_ci = res.cdf.confidence_interval()
allowance = res.sf.probabilities - sf_ci.low.probabilities
assert_allclose(allowance, ref_allowance, atol=1e-3)
assert_allclose(sf_ci.low.probabilities,
np.clip(res.sf.probabilities - allowance, 0, 1))
assert_allclose(sf_ci.high.probabilities,
np.clip(res.sf.probabilities + allowance, 0, 1))
assert_allclose(cdf_ci.low.probabilities,
np.clip(res.cdf.probabilities - allowance, 0, 1))
assert_allclose(cdf_ci.high.probabilities,
np.clip(res.cdf.probabilities + allowance, 0, 1))
# test "log-log" confidence interval against Mathematica
# e = {24, 3, 11, 19, 24, 13, 14, 2, 18, 17, 24, 21, 12, 1, 10, 23, 6, 5,
# 9, 17}
# ci = {1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0}
# R = EventData[e, ci]
# S = SurvivalModelFit[R]
# S["PointwiseIntervals", ConfidenceLevel->0.95,
# ConfidenceTransform->"LogLog"]
ref_low = [0.694743, 0.694743, 0.647529, 0.591142, 0.591142, 0.591142,
0.591142, 0.591142, 0.591142, 0.591142, 0.464605, 0.370359,
0.370359, 0.370359, 0.370359, 0.160489, 0.160489]
ref_high = [0.992802, 0.992802, 0.973299, 0.947073, 0.947073, 0.947073,
0.947073, 0.947073, 0.947073, 0.947073, 0.906422, 0.856521,
0.856521, 0.856521, 0.856521, 0.776724, 0.776724]
sf_ci = res.sf.confidence_interval(method='log-log')
assert_allclose(sf_ci.low.probabilities, ref_low, atol=1e-6)
assert_allclose(sf_ci.high.probabilities, ref_high, atol=1e-6)
def test_right_censored_ci_example_5(self):
# test "exponential greenwood" confidence interval against example 5
times, died = self.t5, self.d5
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
lower = np.array([0.66639, 0.624174, 0.456179, 0.287822, 0.287822,
0.287822, 0.128489, 0.030957, 0.030957, 0.030957])
upper = np.array([0.991983, 0.970995, 0.87378, 0.739467, 0.739467,
0.739467, 0.603133, 0.430365, 0.430365, 0.430365])
sf_ci = res.sf.confidence_interval(method='log-log')
cdf_ci = res.cdf.confidence_interval(method='log-log')
assert_allclose(sf_ci.low.probabilities, lower, atol=1e-5)
assert_allclose(sf_ci.high.probabilities, upper, atol=1e-5)
assert_allclose(cdf_ci.low.probabilities, 1-upper, atol=1e-5)
assert_allclose(cdf_ci.high.probabilities, 1-lower, atol=1e-5)
# Test against R's `survival` library `survfit` function, 90%CI
# library(survival)
# options(digits=16)
# time = c(3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11)
# status = c(1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1)
# res = survfit(Surv(time, status)
# ~1, conf.type = "log-log", conf.int = 0.90)
# res$time; res$lower; res$upper
low = [0.74366748406861172, 0.68582332289196246, 0.50596835651480121,
0.32913131413336727, 0.32913131413336727, 0.32913131413336727,
0.15986912028781664, 0.04499539918147757, 0.04499539918147757,
0.04499539918147757]
high = [0.9890291867238429, 0.9638835422144144, 0.8560366823086629,
0.7130167643978450, 0.7130167643978450, 0.7130167643978450,
0.5678602982997164, 0.3887616766886558, 0.3887616766886558,
0.3887616766886558]
sf_ci = res.sf.confidence_interval(method='log-log',
confidence_level=0.9)
assert_allclose(sf_ci.low.probabilities, low)
assert_allclose(sf_ci.high.probabilities, high)
# And with conf.type = "plain"
low = [0.8556383113628162, 0.7670478794850761, 0.5485720663578469,
0.3441515412527123, 0.3441515412527123, 0.3441515412527123,
0.1449184105424544, 0., 0., 0.]
high = [1., 1., 0.8958723780865975, 0.7391817920806210,
0.7391817920806210, 0.7391817920806210, 0.5773038116797676,
0.3642270254596720, 0.3642270254596720, 0.3642270254596720]
sf_ci = res.sf.confidence_interval(confidence_level=0.9)
assert_allclose(sf_ci.low.probabilities, low)
assert_allclose(sf_ci.high.probabilities, high)
def test_right_censored_ci_nans(self):
# test `ecdf` confidence interval on a problem that results in NaNs
times, died = self.t1, self.d1
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
# Reference values generated with Matlab
# format long
# t = [37 43 47 56 60 62 71 77 80 81];
# d = [0 0 1 1 0 0 0 1 1 1];
# censored = ~d1;
# [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Alpha', 0.05);
x = [37, 47, 56, 77, 80, 81]
flo = [np.nan, 0, 0, 0.052701464070711, 0.337611126231790, np.nan]
fup = [np.nan, 0.35417230377, 0.5500569798, 0.9472985359, 1.0, np.nan]
i = np.searchsorted(res.cdf.quantiles, x)
message = "The confidence interval is undefined at some observations"
with pytest.warns(RuntimeWarning, match=message):
ci = res.cdf.confidence_interval()
# Matlab gives NaN as the first element of the CIs. Mathematica agrees,
# but R's survfit does not. It makes some sense, but it's not what the
# formula gives, so skip that element.
assert_allclose(ci.low.probabilities[i][1:], flo[1:])
assert_allclose(ci.high.probabilities[i][1:], fup[1:])
# [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Function',
# 'survivor', 'Alpha', 0.05);
flo = [np.nan, 0.64582769623, 0.449943020228, 0.05270146407, 0, np.nan]
fup = [np.nan, 1.0, 1.0, 0.947298535929289, 0.662388873768210, np.nan]
i = np.searchsorted(res.cdf.quantiles, x)
with pytest.warns(RuntimeWarning, match=message):
ci = res.sf.confidence_interval()
assert_allclose(ci.low.probabilities[i][1:], flo[1:])
assert_allclose(ci.high.probabilities[i][1:], fup[1:])
# With the same data, R's `survival` library `survfit` function
# doesn't produce the leading NaN
# library(survival)
# options(digits=16)
# time = c(37, 43, 47, 56, 60, 62, 71, 77, 80, 81)
# status = c(0, 0, 1, 1, 0, 0, 0, 1, 1, 1)
# res = survfit(Surv(time, status)
# ~1, conf.type = "plain", conf.int = 0.95)
# res$time
# res$lower
# res$upper
low = [1., 1., 0.64582769623233816, 0.44994302022779326,
0.44994302022779326, 0.44994302022779326, 0.44994302022779326,
0.05270146407071086, 0., np.nan]
high = [1., 1., 1., 1., 1., 1., 1., 0.9472985359292891,
0.6623888737682101, np.nan]
assert_allclose(ci.low.probabilities, low)
assert_allclose(ci.high.probabilities, high)
# It does with conf.type="log-log", as do we
with pytest.warns(RuntimeWarning, match=message):
ci = res.sf.confidence_interval(method='log-log')
low = [np.nan, np.nan, 0.38700001403202522, 0.31480711370551911,
0.31480711370551911, 0.31480711370551911, 0.31480711370551911,
0.08048821148507734, 0.01049958986680601, np.nan]
high = [np.nan, np.nan, 0.9813929658789660, 0.9308983170906275,
0.9308983170906275, 0.9308983170906275, 0.9308983170906275,
0.8263946341076415, 0.6558775085110887, np.nan]
assert_allclose(ci.low.probabilities, low)
assert_allclose(ci.high.probabilities, high)
def test_right_censored_against_uncensored(self):
rng = np.random.default_rng(7463952748044886637)
sample = rng.integers(10, 100, size=1000)
censored = np.zeros_like(sample)
censored[np.argmax(sample)] = True
res = stats.ecdf(sample)
ref = stats.ecdf(stats.CensoredData.right_censored(sample, censored))
assert_equal(res.sf.quantiles, ref.sf.quantiles)
assert_equal(res.sf._n, ref.sf._n)
assert_equal(res.sf._d[:-1], ref.sf._d[:-1]) # difference @ [-1]
assert_allclose(res.sf._sf[:-1], ref.sf._sf[:-1], rtol=1e-14)
def test_plot_iv(self):
rng = np.random.default_rng(1769658657308472721)
n_unique = rng.integers(10, 100)
sample, _, _ = self.get_random_sample(rng, n_unique)
res = stats.ecdf(sample)
try:
import matplotlib.pyplot as plt # noqa
res.sf.plot() # no other errors occur
except (ModuleNotFoundError, ImportError):
message = r"matplotlib must be installed to use method `plot`."
with pytest.raises(ModuleNotFoundError, match=message):
res.sf.plot()
class TestLogRank:
@pytest.mark.parametrize(
"x, y, statistic, pvalue",
# Results validate with R
# library(survival)
# options(digits=16)
#
# futime_1 <- c(8, 12, 26, 14, 21, 27, 8, 32, 20, 40)
# fustat_1 <- c(1, 1, 1, 1, 1, 1, 0, 0, 0, 0)
# rx_1 <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
#
# futime_2 <- c(33, 28, 41, 48, 48, 25, 37, 48, 25, 43)
# fustat_2 <- c(1, 1, 1, 0, 0, 0, 0, 0, 0, 0)
# rx_2 <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
#
# futime <- c(futime_1, futime_2)
# fustat <- c(fustat_1, fustat_2)
# rx <- c(rx_1, rx_2)
#
# survdiff(formula = Surv(futime, fustat) ~ rx)
#
# Also check against another library which handle alternatives
# library(nph)
# logrank.test(futime, fustat, rx, alternative = "two.sided")
# res["test"]
[(
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html # noqa
# uncensored, censored
[[8, 12, 26, 14, 21, 27], [8, 32, 20, 40]],
[[33, 28, 41], [48, 48, 25, 37, 48, 25, 43]],
# chi2, ["two-sided", "less", "greater"]
6.91598157449,
[0.008542873404, 0.9957285632979385, 0.004271436702061537]
),
(
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html # noqa
[[19, 6, 5, 4], [20, 19, 17, 14]],
[[16, 21, 7], [21, 15, 18, 18, 5]],
0.835004855038,
[0.3608293039, 0.8195853480676912, 0.1804146519323088]
),
(
# Bland, Altman, "The logrank test", BMJ, 2004
# https://www.bmj.com/content/328/7447/1073.short
[[6, 13, 21, 30, 37, 38, 49, 50, 63, 79, 86, 98, 202, 219],
[31, 47, 80, 82, 82, 149]],
[[10, 10, 12, 13, 14, 15, 16, 17, 18, 20, 24, 24, 25, 28, 30,
33, 35, 37, 40, 40, 46, 48, 76, 81, 82, 91, 112, 181],
[34, 40, 70]],
7.49659416854,
[0.006181578637, 0.003090789318730882, 0.9969092106812691]
)]
)
def test_log_rank(self, x, y, statistic, pvalue):
x = stats.CensoredData(uncensored=x[0], right=x[1])
y = stats.CensoredData(uncensored=y[0], right=y[1])
for i, alternative in enumerate(["two-sided", "less", "greater"]):
res = stats.logrank(x=x, y=y, alternative=alternative)
# we return z and use the normal distribution while other framework
# return z**2. The p-value are directly comparable, but we have to
# square the statistic
assert_allclose(res.statistic**2, statistic, atol=1e-10)
assert_allclose(res.pvalue, pvalue[i], atol=1e-10)
def test_raises(self):
sample = stats.CensoredData([1, 2])
msg = r"`y` must be"
with pytest.raises(ValueError, match=msg):
stats.logrank(x=sample, y=[[1, 2]])
msg = r"`x` must be"
with pytest.raises(ValueError, match=msg):
stats.logrank(x=[[1, 2]], y=sample)
| 21,992
| 46.094218
| 133
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_binned_statistic.py
|
import numpy as np
from numpy.testing import assert_allclose
import pytest
from pytest import raises as assert_raises
from scipy.stats import (binned_statistic, binned_statistic_2d,
binned_statistic_dd)
from scipy._lib._util import check_random_state
from .common_tests import check_named_results
class TestBinnedStatistic:
@classmethod
def setup_class(cls):
rng = check_random_state(9865)
cls.x = rng.uniform(size=100)
cls.y = rng.uniform(size=100)
cls.v = rng.uniform(size=100)
cls.X = rng.uniform(size=(100, 3))
cls.w = rng.uniform(size=100)
cls.u = rng.uniform(size=100) + 1e6
def test_1d_count(self):
x = self.x
v = self.v
count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)
count2, edges2 = np.histogram(x, bins=10)
assert_allclose(count1, count2)
assert_allclose(edges1, edges2)
def test_gh5927(self):
# smoke test for gh5927 - binned_statistic was using `is` for string
# comparison
x = self.x
v = self.v
statistics = ['mean', 'median', 'count', 'sum']
for statistic in statistics:
binned_statistic(x, v, statistic, bins=10)
def test_big_number_std(self):
# tests for numerical stability of std calculation
# see issue gh-10126 for more
x = self.x
u = self.u
stat1, edges1, bc = binned_statistic(x, u, 'std', bins=10)
stat2, edges2, bc = binned_statistic(x, u, np.std, bins=10)
assert_allclose(stat1, stat2)
def test_empty_bins_std(self):
# tests that std returns gives nan for empty bins
x = self.x
u = self.u
print(binned_statistic(x, u, 'count', bins=1000))
stat1, edges1, bc = binned_statistic(x, u, 'std', bins=1000)
stat2, edges2, bc = binned_statistic(x, u, np.std, bins=1000)
assert_allclose(stat1, stat2)
def test_non_finite_inputs_and_int_bins(self):
# if either `values` or `sample` contain np.inf or np.nan throw
# see issue gh-9010 for more
x = self.x
u = self.u
orig = u[0]
u[0] = np.inf
assert_raises(ValueError, binned_statistic, u, x, 'std', bins=10)
# need to test for non-python specific ints, e.g. np.int8, np.int64
assert_raises(ValueError, binned_statistic, u, x, 'std',
bins=np.int64(10))
u[0] = np.nan
assert_raises(ValueError, binned_statistic, u, x, 'count', bins=10)
# replace original value, u belongs the class
u[0] = orig
def test_1d_result_attributes(self):
x = self.x
v = self.v
res = binned_statistic(x, v, 'count', bins=10)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_1d_sum(self):
x = self.x
v = self.v
sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)
sum2, edges2 = np.histogram(x, bins=10, weights=v)
assert_allclose(sum1, sum2)
assert_allclose(edges1, edges2)
def test_1d_mean(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_std(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_min(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_max(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_median(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_bincode(self):
x = self.x[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)
bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,
1, 2, 1])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_allclose(bc, bc2)
assert_allclose(bcount, count1)
def test_1d_range_keyword(self):
# Regression test for gh-3063, range can be (min, max) or [(min, max)]
np.random.seed(9865)
x = np.arange(30)
data = np.random.random(30)
mean, bins, _ = binned_statistic(x[:15], data[:15])
mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)])
mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14))
assert_allclose(mean, mean_range)
assert_allclose(bins, bins_range)
assert_allclose(mean, mean_range2)
assert_allclose(bins, bins_range2)
def test_1d_multi_values(self):
x = self.x
v = self.v
w = self.w
stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10)
stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10)
stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10)
assert_allclose(stat2[0], stat1v)
assert_allclose(stat2[1], stat1w)
assert_allclose(edges1v, edges2)
assert_allclose(bc1v, bc2)
def test_2d_count(self):
x = self.x
y = self.y
v = self.v
count1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'count', bins=5)
count2, binx2, biny2 = np.histogram2d(x, y, bins=5)
assert_allclose(count1, count2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_result_attributes(self):
x = self.x
y = self.y
v = self.v
res = binned_statistic_2d(x, y, v, 'count', bins=5)
attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber')
check_named_results(res, attributes)
def test_2d_sum(self):
x = self.x
y = self.y
v = self.v
sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)
sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)
assert_allclose(sum1, sum2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_mean(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_mean_unicode(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'mean', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_std(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_min(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_max(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_median(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'median', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(
x, y, v, np.median, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_bincode(self):
x = self.x[:20]
y = self.y[:20]
v = self.v[:20]
count1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'count', bins=3)
bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,
6, 11, 16, 6, 6, 11, 8])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_allclose(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_allclose(bcount, count1adj)
def test_2d_multi_values(self):
x = self.x
y = self.y
v = self.v
w = self.w
stat1v, binx1v, biny1v, bc1v = binned_statistic_2d(
x, y, v, 'mean', bins=8)
stat1w, binx1w, biny1w, bc1w = binned_statistic_2d(
x, y, w, 'mean', bins=8)
stat2, binx2, biny2, bc2 = binned_statistic_2d(
x, y, [v, w], 'mean', bins=8)
assert_allclose(stat2[0], stat1v)
assert_allclose(stat2[1], stat1w)
assert_allclose(binx1v, binx2)
assert_allclose(biny1w, biny2)
assert_allclose(bc1v, bc2)
def test_2d_binnumbers_unraveled(self):
x = self.x
y = self.y
v = self.v
stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20)
stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10)
stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d(
x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True)
bcx3 = np.searchsorted(edgesx, x, side='right')
bcy3 = np.searchsorted(edgesy, y, side='right')
# `numpy.searchsorted` is non-inclusive on right-edge, compensate
bcx3[x == x.max()] -= 1
bcy3[y == y.max()] -= 1
assert_allclose(bcx, bc2[0])
assert_allclose(bcy, bc2[1])
assert_allclose(bcx3, bc2[0])
assert_allclose(bcy3, bc2[1])
def test_dd_count(self):
X = self.X
v = self.v
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
count2, edges2 = np.histogramdd(X, bins=3)
assert_allclose(count1, count2)
assert_allclose(edges1, edges2)
def test_dd_result_attributes(self):
X = self.X
v = self.v
res = binned_statistic_dd(X, v, 'count', bins=3)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_dd_sum(self):
X = self.X
v = self.v
sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)
sum2, edges2 = np.histogramdd(X, bins=3, weights=v)
sum3, edges3, bc = binned_statistic_dd(X, v, np.sum, bins=3)
assert_allclose(sum1, sum2)
assert_allclose(edges1, edges2)
assert_allclose(sum1, sum3)
assert_allclose(edges1, edges3)
def test_dd_mean(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_std(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_min(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_max(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_median(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_bincode(self):
X = self.X[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,
32, 36, 91, 43, 87, 81, 81])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_allclose(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_allclose(bcount, count1adj)
def test_dd_multi_values(self):
X = self.X
v = self.v
w = self.w
for stat in ["count", "sum", "mean", "std", "min", "max", "median",
np.std]:
stat1v, edges1v, bc1v = binned_statistic_dd(X, v, stat, bins=8)
stat1w, edges1w, bc1w = binned_statistic_dd(X, w, stat, bins=8)
stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], stat, bins=8)
assert_allclose(stat2[0], stat1v)
assert_allclose(stat2[1], stat1w)
assert_allclose(edges1v, edges2)
assert_allclose(edges1w, edges2)
assert_allclose(bc1v, bc2)
def test_dd_binnumbers_unraveled(self):
X = self.X
v = self.v
stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15)
stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20)
stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10)
stat2, edges2, bc2 = binned_statistic_dd(
X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True)
assert_allclose(bcx, bc2[0])
assert_allclose(bcy, bc2[1])
assert_allclose(bcz, bc2[2])
def test_dd_binned_statistic_result(self):
# NOTE: tests the reuse of bin_edges from previous call
x = np.random.random((10000, 3))
v = np.random.random(10000)
bins = np.linspace(0, 1, 10)
bins = (bins, bins, bins)
result = binned_statistic_dd(x, v, 'mean', bins=bins)
stat = result.statistic
result = binned_statistic_dd(x, v, 'mean',
binned_statistic_result=result)
stat2 = result.statistic
assert_allclose(stat, stat2)
def test_dd_zero_dedges(self):
x = np.random.random((10000, 3))
v = np.random.random(10000)
bins = np.linspace(0, 1, 10)
bins = np.append(bins, 1)
bins = (bins, bins, bins)
with assert_raises(ValueError, match='difference is numerically 0'):
binned_statistic_dd(x, v, 'mean', bins=bins)
def test_dd_range_errors(self):
# Test that descriptive exceptions are raised as appropriate for bad
# values of the `range` argument. (See gh-12996)
with assert_raises(ValueError,
match='In range, start must be <= stop'):
binned_statistic_dd([self.y], self.v,
range=[[1, 0]])
with assert_raises(
ValueError,
match='In dimension 1 of range, start must be <= stop'):
binned_statistic_dd([self.x, self.y], self.v,
range=[[1, 0], [0, 1]])
with assert_raises(
ValueError,
match='In dimension 2 of range, start must be <= stop'):
binned_statistic_dd([self.x, self.y], self.v,
range=[[0, 1], [1, 0]])
with assert_raises(
ValueError,
match='range given for 1 dimensions; 2 required'):
binned_statistic_dd([self.x, self.y], self.v,
range=[[0, 1]])
def test_binned_statistic_float32(self):
X = np.array([0, 0.42358226], dtype=np.float32)
stat, _, _ = binned_statistic(X, None, 'count', bins=5)
assert_allclose(stat, np.array([1, 0, 0, 0, 1], dtype=np.float64))
def test_gh14332(self):
# Test the wrong output when the `sample` is close to bin edge
x = []
size = 20
for i in range(size):
x += [1-0.1**i]
bins = np.linspace(0,1,11)
sum1, edges1, bc = binned_statistic_dd(x, np.ones(len(x)),
bins=[bins], statistic='sum')
sum2, edges2 = np.histogram(x, bins=bins)
assert_allclose(sum1, sum2)
assert_allclose(edges1[0], edges2)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("statistic", [np.mean, np.median, np.sum, np.std,
np.min, np.max, 'count',
lambda x: (x**2).sum(),
lambda x: (x**2).sum() * 1j])
def test_dd_all(self, dtype, statistic):
def ref_statistic(x):
return len(x) if statistic == 'count' else statistic(x)
rng = np.random.default_rng(3704743126639371)
n = 10
x = rng.random(size=n)
i = x >= 0.5
v = rng.random(size=n)
if dtype is np.complex128:
v = v + rng.random(size=n)*1j
stat, _, _ = binned_statistic_dd(x, v, statistic, bins=2)
ref = np.array([ref_statistic(v[~i]), ref_statistic(v[i])])
assert_allclose(stat, ref)
assert stat.dtype == np.result_type(ref.dtype, np.float64)
| 18,814
| 32.066784
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_continuous_fit_censored.py
|
# Tests for fitting specific distributions to censored data.
import numpy as np
from numpy.testing import assert_allclose
from scipy.optimize import fmin
from scipy.stats import (CensoredData, beta, cauchy, chi2, expon, gamma,
gumbel_l, gumbel_r, invgauss, invweibull, laplace,
logistic, lognorm, nct, ncx2, norm, weibull_max,
weibull_min)
# In some tests, we'll use this optimizer for improved accuracy.
def optimizer(func, x0, args=(), disp=0):
return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12)
def test_beta():
"""
Test fitting beta shape parameters to interval-censored data.
Calculation in R:
> library(fitdistrplus)
> data <- data.frame(left=c(0.10, 0.50, 0.75, 0.80),
+ right=c(0.20, 0.55, 0.90, 0.95))
> result = fitdistcens(data, 'beta', control=list(reltol=1e-14))
> result
Fitting of the distribution ' beta ' on censored data by maximum likelihood
Parameters:
estimate
shape1 1.419941
shape2 1.027066
> result$sd
shape1 shape2
0.9914177 0.6866565
"""
data = CensoredData(interval=[[0.10, 0.20],
[0.50, 0.55],
[0.75, 0.90],
[0.80, 0.95]])
# For this test, fit only the shape parameters; loc and scale are fixed.
a, b, loc, scale = beta.fit(data, floc=0, fscale=1, optimizer=optimizer)
assert_allclose(a, 1.419941, rtol=5e-6)
assert_allclose(b, 1.027066, rtol=5e-6)
assert loc == 0
assert scale == 1
def test_cauchy_right_censored():
"""
Test fitting the Cauchy distribution to right-censored data.
Calculation in R, with two values not censored [1, 10] and
one right-censored value [30].
> library(fitdistrplus)
> data <- data.frame(left=c(1, 10, 30), right=c(1, 10, NA))
> result = fitdistcens(data, 'cauchy', control=list(reltol=1e-14))
> result
Fitting of the distribution ' cauchy ' on censored data by maximum
likelihood
Parameters:
estimate
location 7.100001
scale 7.455866
"""
data = CensoredData(uncensored=[1, 10], right=[30])
loc, scale = cauchy.fit(data, optimizer=optimizer)
assert_allclose(loc, 7.10001, rtol=5e-6)
assert_allclose(scale, 7.455866, rtol=5e-6)
def test_cauchy_mixed():
"""
Test fitting the Cauchy distribution to data with mixed censoring.
Calculation in R, with:
* two values not censored [1, 10],
* one left-censored [1],
* one right-censored [30], and
* one interval-censored [[4, 8]].
> library(fitdistrplus)
> data <- data.frame(left=c(NA, 1, 4, 10, 30), right=c(1, 1, 8, 10, NA))
> result = fitdistcens(data, 'cauchy', control=list(reltol=1e-14))
> result
Fitting of the distribution ' cauchy ' on censored data by maximum
likelihood
Parameters:
estimate
location 4.605150
scale 5.900852
"""
data = CensoredData(uncensored=[1, 10], left=[1], right=[30],
interval=[[4, 8]])
loc, scale = cauchy.fit(data, optimizer=optimizer)
assert_allclose(loc, 4.605150, rtol=5e-6)
assert_allclose(scale, 5.900852, rtol=5e-6)
def test_chi2_mixed():
"""
Test fitting just the shape parameter (df) of chi2 to mixed data.
Calculation in R, with:
* two values not censored [1, 10],
* one left-censored [1],
* one right-censored [30], and
* one interval-censored [[4, 8]].
> library(fitdistrplus)
> data <- data.frame(left=c(NA, 1, 4, 10, 30), right=c(1, 1, 8, 10, NA))
> result = fitdistcens(data, 'chisq', control=list(reltol=1e-14))
> result
Fitting of the distribution ' chisq ' on censored data by maximum
likelihood
Parameters:
estimate
df 5.060329
"""
data = CensoredData(uncensored=[1, 10], left=[1], right=[30],
interval=[[4, 8]])
df, loc, scale = chi2.fit(data, floc=0, fscale=1, optimizer=optimizer)
assert_allclose(df, 5.060329, rtol=5e-6)
assert loc == 0
assert scale == 1
def test_expon_right_censored():
"""
For the exponential distribution with loc=0, the exact solution for
fitting n uncensored points x[0]...x[n-1] and m right-censored points
x[n]..x[n+m-1] is
scale = sum(x)/n
That is, divide the sum of all the values (not censored and
right-censored) by the number of uncensored values. (See, for example,
https://en.wikipedia.org/wiki/Censoring_(statistics)#Likelihood.)
The second derivative of the log-likelihood function is
n/scale**2 - 2*sum(x)/scale**3
from which the estimate of the standard error can be computed.
-----
Calculation in R, for reference only. The R results are not
used in the test.
> library(fitdistrplus)
> dexps <- function(x, scale) {
+ return(dexp(x, 1/scale))
+ }
> pexps <- function(q, scale) {
+ return(pexp(q, 1/scale))
+ }
> left <- c(1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15,
+ 16, 16, 20, 20, 21, 22)
> right <- c(1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15,
+ NA, NA, NA, NA, NA, NA)
> result = fitdistcens(data, 'exps', start=list(scale=mean(data$left)),
+ control=list(reltol=1e-14))
> result
Fitting of the distribution ' exps ' on censored data by maximum likelihood
Parameters:
estimate
scale 19.85
> result$sd
scale
6.277119
"""
# This data has 10 uncensored values and 6 right-censored values.
obs = [1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15, 16, 16, 20, 20, 21, 22]
cens = [False]*10 + [True]*6
data = CensoredData.right_censored(obs, cens)
loc, scale = expon.fit(data, floc=0, optimizer=optimizer)
assert loc == 0
# Use the analytical solution to compute the expected value. This
# is the sum of the observed values divided by the number of uncensored
# values.
n = len(data) - data.num_censored()
total = data._uncensored.sum() + data._right.sum()
expected = total / n
assert_allclose(scale, expected, 1e-8)
def test_gamma_right_censored():
"""
Fit gamma shape and scale to data with one right-censored value.
Calculation in R:
> library(fitdistrplus)
> data <- data.frame(left=c(2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0, 25.0),
+ right=c(2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0, NA))
> result = fitdistcens(data, 'gamma', start=list(shape=1, scale=10),
+ control=list(reltol=1e-13))
> result
Fitting of the distribution ' gamma ' on censored data by maximum
likelihood
Parameters:
estimate
shape 1.447623
scale 8.360197
> result$sd
shape scale
0.7053086 5.1016531
"""
# The last value is right-censored.
x = CensoredData.right_censored([2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0,
25.0],
[0]*7 + [1])
a, loc, scale = gamma.fit(x, floc=0, optimizer=optimizer)
assert_allclose(a, 1.447623, rtol=5e-6)
assert loc == 0
assert_allclose(scale, 8.360197, rtol=5e-6)
def test_gumbel():
"""
Fit gumbel_l and gumbel_r to censored data.
This R calculation should match gumbel_r.
> library(evd)
> libary(fitdistrplus)
> data = data.frame(left=c(0, 2, 3, 9, 10, 10),
+ right=c(1, 2, 3, 9, NA, NA))
> result = fitdistcens(data, 'gumbel',
+ control=list(reltol=1e-14),
+ start=list(loc=4, scale=5))
> result
Fitting of the distribution ' gumbel ' on censored data by maximum
likelihood
Parameters:
estimate
loc 4.487853
scale 4.843640
"""
# First value is interval-censored. Last two are right-censored.
uncensored = np.array([2, 3, 9])
right = np.array([10, 10])
interval = np.array([[0, 1]])
data = CensoredData(uncensored, right=right, interval=interval)
loc, scale = gumbel_r.fit(data, optimizer=optimizer)
assert_allclose(loc, 4.487853, rtol=5e-6)
assert_allclose(scale, 4.843640, rtol=5e-6)
# Negate the data and reverse the intervals, and test with gumbel_l.
data2 = CensoredData(-uncensored, left=-right,
interval=-interval[:, ::-1])
# Fitting gumbel_l to data2 should give the same result as above, but
# with loc negated.
loc2, scale2 = gumbel_l.fit(data2, optimizer=optimizer)
assert_allclose(loc2, -4.487853, rtol=5e-6)
assert_allclose(scale2, 4.843640, rtol=5e-6)
def test_invgauss():
"""
Fit just the shape parameter of invgauss to data with one value
left-censored and one value right-censored.
Calculation in R; using a fixed dispersion parameter amounts to fixing
the scale to be 1.
> library(statmod)
> library(fitdistrplus)
> left <- c(NA, 0.4813096, 0.5571880, 0.5132463, 0.3801414, 0.5904386,
+ 0.4822340, 0.3478597, 3, 0.7191797, 1.5810902, 0.4442299)
> right <- c(0.15, 0.4813096, 0.5571880, 0.5132463, 0.3801414, 0.5904386,
+ 0.4822340, 0.3478597, NA, 0.7191797, 1.5810902, 0.4442299)
> data <- data.frame(left=left, right=right)
> result = fitdistcens(data, 'invgauss', control=list(reltol=1e-12),
+ fix.arg=list(dispersion=1), start=list(mean=3))
> result
Fitting of the distribution ' invgauss ' on censored data by maximum
likelihood
Parameters:
estimate
mean 0.853469
Fixed parameters:
value
dispersion 1
> result$sd
mean
0.247636
Here's the R calculation with the dispersion as a free parameter to
be fit.
> result = fitdistcens(data, 'invgauss', control=list(reltol=1e-12),
+ start=list(mean=3, dispersion=1))
> result
Fitting of the distribution ' invgauss ' on censored data by maximum
likelihood
Parameters:
estimate
mean 0.8699819
dispersion 1.2261362
The parametrization of the inverse Gaussian distribution in the
`statmod` package is not the same as in SciPy (see
https://arxiv.org/abs/1603.06687
for details). The translation from R to SciPy is
scale = 1/dispersion
mu = mean * dispersion
> 1/result$estimate['dispersion'] # 1/dispersion
dispersion
0.8155701
> result$estimate['mean'] * result$estimate['dispersion']
mean
1.066716
Those last two values are the SciPy scale and shape parameters.
"""
# One point is left-censored, and one is right-censored.
x = [0.4813096, 0.5571880, 0.5132463, 0.3801414,
0.5904386, 0.4822340, 0.3478597, 0.7191797,
1.5810902, 0.4442299]
data = CensoredData(uncensored=x, left=[0.15], right=[3])
# Fit only the shape parameter.
mu, loc, scale = invgauss.fit(data, floc=0, fscale=1, optimizer=optimizer)
assert_allclose(mu, 0.853469, rtol=5e-5)
assert loc == 0
assert scale == 1
# Fit the shape and scale.
mu, loc, scale = invgauss.fit(data, floc=0, optimizer=optimizer)
assert_allclose(mu, 1.066716, rtol=5e-5)
assert loc == 0
assert_allclose(scale, 0.8155701, rtol=5e-5)
def test_invweibull():
"""
Fit invweibull to censored data.
Here is the calculation in R. The 'frechet' distribution from the evd
package matches SciPy's invweibull distribution. The `loc` parameter
is fixed at 0.
> library(evd)
> libary(fitdistrplus)
> data = data.frame(left=c(0, 2, 3, 9, 10, 10),
+ right=c(1, 2, 3, 9, NA, NA))
> result = fitdistcens(data, 'frechet',
+ control=list(reltol=1e-14),
+ start=list(loc=4, scale=5))
> result
Fitting of the distribution ' frechet ' on censored data by maximum
likelihood
Parameters:
estimate
scale 2.7902200
shape 0.6379845
Fixed parameters:
value
loc 0
"""
# In the R data, the first value is interval-censored, and the last
# two are right-censored. The rest are not censored.
data = CensoredData(uncensored=[2, 3, 9], right=[10, 10],
interval=[[0, 1]])
c, loc, scale = invweibull.fit(data, floc=0, optimizer=optimizer)
assert_allclose(c, 0.6379845, rtol=5e-6)
assert loc == 0
assert_allclose(scale, 2.7902200, rtol=5e-6)
def test_laplace():
"""
Fir the Laplace distribution to left- and right-censored data.
Calculation in R:
> library(fitdistrplus)
> dlaplace <- function(x, location=0, scale=1) {
+ return(0.5*exp(-abs((x - location)/scale))/scale)
+ }
> plaplace <- function(q, location=0, scale=1) {
+ z <- (q - location)/scale
+ s <- sign(z)
+ f <- -s*0.5*exp(-abs(z)) + (s+1)/2
+ return(f)
+ }
> left <- c(NA, -41.564, 50.0, 15.7384, 50.0, 10.0452, -2.0684,
+ -19.5399, 50.0, 9.0005, 27.1227, 4.3113, -3.7372,
+ 25.3111, 14.7987, 34.0887, 50.0, 42.8496, 18.5862,
+ 32.8921, 9.0448, -27.4591, NA, 19.5083, -9.7199)
> right <- c(-50.0, -41.564, NA, 15.7384, NA, 10.0452, -2.0684,
+ -19.5399, NA, 9.0005, 27.1227, 4.3113, -3.7372,
+ 25.3111, 14.7987, 34.0887, NA, 42.8496, 18.5862,
+ 32.8921, 9.0448, -27.4591, -50.0, 19.5083, -9.7199)
> data <- data.frame(left=left, right=right)
> result <- fitdistcens(data, 'laplace', start=list(location=10, scale=10),
+ control=list(reltol=1e-13))
> result
Fitting of the distribution ' laplace ' on censored data by maximum
likelihood
Parameters:
estimate
location 14.79870
scale 30.93601
> result$sd
location scale
0.1758864 7.0972125
"""
# The value -50 is left-censored, and the value 50 is right-censored.
obs = np.array([-50.0, -41.564, 50.0, 15.7384, 50.0, 10.0452, -2.0684,
-19.5399, 50.0, 9.0005, 27.1227, 4.3113, -3.7372,
25.3111, 14.7987, 34.0887, 50.0, 42.8496, 18.5862,
32.8921, 9.0448, -27.4591, -50.0, 19.5083, -9.7199])
x = obs[(obs != -50.0) & (obs != 50)]
left = obs[obs == -50.0]
right = obs[obs == 50.0]
data = CensoredData(uncensored=x, left=left, right=right)
loc, scale = laplace.fit(data, loc=10, scale=10, optimizer=optimizer)
assert_allclose(loc, 14.79870, rtol=5e-6)
assert_allclose(scale, 30.93601, rtol=5e-6)
def test_logistic():
"""
Fit the logistic distribution to left-censored data.
Calculation in R:
> library(fitdistrplus)
> left = c(13.5401, 37.4235, 11.906 , 13.998 , NA , 0.4023, NA ,
+ 10.9044, 21.0629, 9.6985, NA , 12.9016, 39.164 , 34.6396,
+ NA , 20.3665, 16.5889, 18.0952, 45.3818, 35.3306, 8.4949,
+ 3.4041, NA , 7.2828, 37.1265, 6.5969, 17.6868, 17.4977,
+ 16.3391, 36.0541)
> right = c(13.5401, 37.4235, 11.906 , 13.998 , 0. , 0.4023, 0. ,
+ 10.9044, 21.0629, 9.6985, 0. , 12.9016, 39.164 , 34.6396,
+ 0. , 20.3665, 16.5889, 18.0952, 45.3818, 35.3306, 8.4949,
+ 3.4041, 0. , 7.2828, 37.1265, 6.5969, 17.6868, 17.4977,
+ 16.3391, 36.0541)
> data = data.frame(left=left, right=right)
> result = fitdistcens(data, 'logis', control=list(reltol=1e-14))
> result
Fitting of the distribution ' logis ' on censored data by maximum
likelihood
Parameters:
estimate
location 14.633459
scale 9.232736
> result$sd
location scale
2.931505 1.546879
"""
# Values that are zero are left-censored; the true values are less than 0.
x = np.array([13.5401, 37.4235, 11.906, 13.998, 0.0, 0.4023, 0.0, 10.9044,
21.0629, 9.6985, 0.0, 12.9016, 39.164, 34.6396, 0.0, 20.3665,
16.5889, 18.0952, 45.3818, 35.3306, 8.4949, 3.4041, 0.0,
7.2828, 37.1265, 6.5969, 17.6868, 17.4977, 16.3391,
36.0541])
data = CensoredData.left_censored(x, censored=(x == 0))
loc, scale = logistic.fit(data, optimizer=optimizer)
assert_allclose(loc, 14.633459, rtol=5e-7)
assert_allclose(scale, 9.232736, rtol=5e-6)
def test_lognorm():
"""
Ref: https://math.montana.edu/jobo/st528/documents/relc.pdf
The data is the locomotive control time to failure example that starts
on page 8. That's the 8th page in the PDF; the page number shown in
the text is 270).
The document includes SAS output for the data.
"""
# These are the uncensored measurements. There are also 59 right-censored
# measurements where the lower bound is 135.
miles_to_fail = [22.5, 37.5, 46.0, 48.5, 51.5, 53.0, 54.5, 57.5, 66.5,
68.0, 69.5, 76.5, 77.0, 78.5, 80.0, 81.5, 82.0, 83.0,
84.0, 91.5, 93.5, 102.5, 107.0, 108.5, 112.5, 113.5,
116.0, 117.0, 118.5, 119.0, 120.0, 122.5, 123.0, 127.5,
131.0, 132.5, 134.0]
data = CensoredData.right_censored(miles_to_fail + [135]*59,
[0]*len(miles_to_fail) + [1]*59)
sigma, loc, scale = lognorm.fit(data, floc=0)
assert loc == 0
# Convert the lognorm parameters to the mu and sigma of the underlying
# normal distribution.
mu = np.log(scale)
# The expected results are from the 17th page of the PDF document
# (labeled page 279), in the SAS output on the right side of the page.
assert_allclose(mu, 5.1169, rtol=5e-4)
assert_allclose(sigma, 0.7055, rtol=5e-3)
def test_nct():
"""
Test fitting the noncentral t distribution to censored data.
Calculation in R:
> library(fitdistrplus)
> data <- data.frame(left=c(1, 2, 3, 5, 8, 10, 25, 25),
+ right=c(1, 2, 3, 5, 8, 10, NA, NA))
> result = fitdistcens(data, 't', control=list(reltol=1e-14),
+ start=list(df=1, ncp=2))
> result
Fitting of the distribution ' t ' on censored data by maximum likelihood
Parameters:
estimate
df 0.5432336
ncp 2.8893565
"""
data = CensoredData.right_censored([1, 2, 3, 5, 8, 10, 25, 25],
[0, 0, 0, 0, 0, 0, 1, 1])
# Fit just the shape parameter df and nc; loc and scale are fixed.
with np.errstate(over='ignore'): # remove context when gh-14901 is closed
df, nc, loc, scale = nct.fit(data, floc=0, fscale=1,
optimizer=optimizer)
assert_allclose(df, 0.5432336, rtol=5e-6)
assert_allclose(nc, 2.8893565, rtol=5e-6)
assert loc == 0
assert scale == 1
def test_ncx2():
"""
Test fitting the shape parameters (df, ncp) of ncx2 to mixed data.
Calculation in R, with
* 5 not censored values [2.7, 0.2, 6.5, 0.4, 0.1],
* 1 interval-censored value [[0.6, 1.0]], and
* 2 right-censored values [8, 8].
> library(fitdistrplus)
> data <- data.frame(left=c(2.7, 0.2, 6.5, 0.4, 0.1, 0.6, 8, 8),
+ right=c(2.7, 0.2, 6.5, 0.4, 0.1, 1.0, NA, NA))
> result = fitdistcens(data, 'chisq', control=list(reltol=1e-14),
+ start=list(df=1, ncp=2))
> result
Fitting of the distribution ' chisq ' on censored data by maximum
likelihood
Parameters:
estimate
df 1.052871
ncp 2.362934
"""
data = CensoredData(uncensored=[2.7, 0.2, 6.5, 0.4, 0.1], right=[8, 8],
interval=[[0.6, 1.0]])
with np.errstate(over='ignore'): # remove context when gh-14901 is closed
df, ncp, loc, scale = ncx2.fit(data, floc=0, fscale=1,
optimizer=optimizer)
assert_allclose(df, 1.052871, rtol=5e-6)
assert_allclose(ncp, 2.362934, rtol=5e-6)
assert loc == 0
assert scale == 1
def test_norm():
"""
Test fitting the normal distribution to interval-censored data.
Calculation in R:
> library(fitdistrplus)
> data <- data.frame(left=c(0.10, 0.50, 0.75, 0.80),
+ right=c(0.20, 0.55, 0.90, 0.95))
> result = fitdistcens(data, 'norm', control=list(reltol=1e-14))
> result
Fitting of the distribution ' norm ' on censored data by maximum likelihood
Parameters:
estimate
mean 0.5919990
sd 0.2868042
> result$sd
mean sd
0.1444432 0.1029451
"""
data = CensoredData(interval=[[0.10, 0.20],
[0.50, 0.55],
[0.75, 0.90],
[0.80, 0.95]])
loc, scale = norm.fit(data, optimizer=optimizer)
assert_allclose(loc, 0.5919990, rtol=5e-6)
assert_allclose(scale, 0.2868042, rtol=5e-6)
def test_weibull_censored1():
# Ref: http://www.ams.sunysb.edu/~zhu/ams588/Lecture_3_likelihood.pdf
# Survival times; '*' indicates right-censored.
s = "3,5,6*,8,10*,11*,15,20*,22,23,27*,29,32,35,40,26,28,33*,21,24*"
times, cens = zip(*[(float(t[0]), len(t) == 2)
for t in [w.split('*') for w in s.split(',')]])
data = CensoredData.right_censored(times, cens)
c, loc, scale = weibull_min.fit(data, floc=0)
# Expected values are from the reference.
assert_allclose(c, 2.149, rtol=1e-3)
assert loc == 0
assert_allclose(scale, 28.99, rtol=1e-3)
# Flip the sign of the data, and make the censored values
# left-censored. We should get the same parameters when we fit
# weibull_max to the flipped data.
data2 = CensoredData.left_censored(-np.array(times), cens)
c2, loc2, scale2 = weibull_max.fit(data2, floc=0)
assert_allclose(c2, 2.149, rtol=1e-3)
assert loc2 == 0
assert_allclose(scale2, 28.99, rtol=1e-3)
def test_weibull_min_sas1():
# Data and SAS results from
# https://support.sas.com/documentation/cdl/en/qcug/63922/HTML/default/
# viewer.htm#qcug_reliability_sect004.htm
text = """
450 0 460 1 1150 0 1150 0 1560 1
1600 0 1660 1 1850 1 1850 1 1850 1
1850 1 1850 1 2030 1 2030 1 2030 1
2070 0 2070 0 2080 0 2200 1 3000 1
3000 1 3000 1 3000 1 3100 0 3200 1
3450 0 3750 1 3750 1 4150 1 4150 1
4150 1 4150 1 4300 1 4300 1 4300 1
4300 1 4600 0 4850 1 4850 1 4850 1
4850 1 5000 1 5000 1 5000 1 6100 1
6100 0 6100 1 6100 1 6300 1 6450 1
6450 1 6700 1 7450 1 7800 1 7800 1
8100 1 8100 1 8200 1 8500 1 8500 1
8500 1 8750 1 8750 0 8750 1 9400 1
9900 1 10100 1 10100 1 10100 1 11500 1
"""
life, cens = np.array([int(w) for w in text.split()]).reshape(-1, 2).T
life = life/1000.0
data = CensoredData.right_censored(life, cens)
c, loc, scale = weibull_min.fit(data, floc=0, optimizer=optimizer)
assert_allclose(c, 1.0584, rtol=1e-4)
assert_allclose(scale, 26.2968, rtol=1e-5)
assert loc == 0
def test_weibull_min_sas2():
# http://support.sas.com/documentation/cdl/en/ormpug/67517/HTML/default/
# viewer.htm#ormpug_nlpsolver_examples06.htm
# The last two values are right-censored.
days = np.array([143, 164, 188, 188, 190, 192, 206, 209, 213, 216, 220,
227, 230, 234, 246, 265, 304, 216, 244])
data = CensoredData.right_censored(days, [0]*(len(days) - 2) + [1]*2)
c, loc, scale = weibull_min.fit(data, 1, loc=100, scale=100,
optimizer=optimizer)
assert_allclose(c, 2.7112, rtol=5e-4)
assert_allclose(loc, 122.03, rtol=5e-4)
assert_allclose(scale, 108.37, rtol=5e-4)
| 24,186
| 34.361111
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_morestats.py
|
# Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
import warnings
import sys
import numpy as np
from numpy.random import RandomState
from numpy.testing import (assert_array_equal, assert_almost_equal,
assert_array_less, assert_array_almost_equal,
assert_, assert_allclose, assert_equal,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
import re
from scipy import optimize
from scipy import stats
from scipy.stats._morestats import _abw_state, _get_As_weibull, _Avals_weibull
from .common_tests import check_named_results
from .._hypotests import _get_wilcoxon_distr, _get_wilcoxon_distr2
from scipy.stats._binomtest import _binary_search_for_binom_tst
from scipy.stats._distr_params import distcont
distcont = dict(distcont) # type: ignore
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
have_matplotlib = True
except Exception:
have_matplotlib = False
# test data gear.dat from NIST for Levene and Bartlett test
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
# The loggamma RVS stream is changing due to gh-13349; this version
# preserves the old stream so that tests don't change.
def _old_loggamma_rvs(*args, **kwargs):
return np.log(stats.gamma.rvs(*args, **kwargs))
class TestBayes_mvs:
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist:
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro:
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
w, pw = stats.shapiro(x1)
shapiro_test = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
assert_almost_equal(pw, 0.042089745402336121, decimal=6)
assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]
w, pw = stats.shapiro(x2)
shapiro_test = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
assert_almost_equal(pw, 0.52460, decimal=3)
assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
# Verified against R
x3 = stats.norm.rvs(loc=5, scale=3, size=100, random_state=12345678)
w, pw = stats.shapiro(x3)
shapiro_test = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
assert_almost_equal(shapiro_test.pvalue, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
shapiro_test = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(shapiro_test.statistic, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
assert_almost_equal(shapiro_test.pvalue, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
shapiro_test = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
assert_almost_equal(pw, 0.042089745402336121, decimal=6)
assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
shapiro_test = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
assert_almost_equal(pw, 0.52460, decimal=3)
assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, np.array([[], [2]], dtype=object))
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
shapiro_test = stats.shapiro(x)
assert_equal(w, np.nan)
assert_equal(shapiro_test.statistic, np.nan)
assert_almost_equal(pw, 1.0)
assert_almost_equal(shapiro_test.pvalue, 1.0)
def test_gh14462(self):
# shapiro is theoretically location-invariant, but when the magnitude
# of the values is much greater than the variance, there can be
# numerical issues. Fixed by subtracting median from the data.
# See gh-14462.
trans_val, maxlog = stats.boxcox([122500, 474400, 110400])
res = stats.shapiro(trans_val)
# Reference from R:
# options(digits=16)
# x = c(0.00000000e+00, 3.39996924e-08, -6.35166875e-09)
# shapiro.test(x)
ref = (0.86468431705371, 0.2805581751566)
assert_allclose(res, ref, rtol=1e-5)
def test_length_3_gh18322(self):
# gh-18322 reported that the p-value could be negative for input of
# length 3. Check that this is resolved.
res = stats.shapiro([0.6931471805599453, 0.0, 0.0])
assert res.pvalue >= 0
# R `shapiro.test` doesn't produce an accurate p-value in the case
# above. Check that the formula used in `stats.shapiro` is not wrong.
# options(digits=16)
# x = c(-0.7746653110021126, -0.4344432067942129, 1.8157053280290931)
# shapiro.test(x)
x = [-0.7746653110021126, -0.4344432067942129, 1.8157053280290931]
res = stats.shapiro(x)
assert_allclose(res.statistic, 0.84658770645509)
assert_allclose(res.pvalue, 0.2313666489882, rtol=1e-6)
class TestAnderson:
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A, crit, sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
v = np.ones(10)
v[0] = 0
A, crit, sig = stats.anderson(v)
# The expected statistic 3.208057 was computed independently of scipy.
# For example, in R:
# > library(nortest)
# > v <- rep(1, 10)
# > v[1] <- 0
# > result <- ad.test(v)
# > result$statistic
# A
# 3.208057
assert_allclose(A, 3.208057)
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
with np.errstate(all='ignore'):
A, crit, sig = stats.anderson(x2, 'expon')
assert_(A > crit[-1])
def test_gumbel(self):
# Regression test for gh-6306. Before that issue was fixed,
# this case would return a2=inf.
v = np.ones(100)
v[0] = 0.0
a2, crit, sig = stats.anderson(v, 'gumbel')
# A brief reimplementation of the calculation of the statistic.
n = len(v)
xbar, s = stats.gumbel_l.fit(v)
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
logsf = stats.gumbel_l.logsf(v, xbar, s)
i = np.arange(1, n+1)
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
assert_allclose(a2, expected_a2)
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
def test_gumbel_l(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x = rs.gumbel(size=100)
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
assert_allclose(A2, A1)
def test_gumbel_r(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x1 = rs.gumbel(size=100)
x2 = np.ones(100)
# A constant array is a degenerate case and breaks gumbel_r.fit, so
# change one value in x2.
x2[0] = 0.996
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
assert_array_less(A1, crit1[-2:])
assert_(A2 > crit2[-1])
def test_weibull_min_case_A(self):
# data and reference values from `anderson` reference [7]
x = np.array([225, 171, 198, 189, 189, 135, 162, 135, 117, 162])
res = stats.anderson(x, 'weibull_min')
m, loc, scale = res.fit_result.params
assert_allclose((m, loc, scale), (2.38, 99.02, 78.23), rtol=2e-3)
assert_allclose(res.statistic, 0.260, rtol=1e-3)
assert res.statistic < res.critical_values[0]
c = 1 / m # ~0.42
assert_allclose(c, 1/2.38, rtol=2e-3)
# interpolate between rows for c=0.4 and c=0.45, indices -3 and -2
As40 = _Avals_weibull[-3]
As45 = _Avals_weibull[-2]
As_ref = As40 + (c - 0.4)/(0.45 - 0.4) * (As45 - As40)
# atol=1e-3 because results are rounded up to the next third decimal
assert np.all(res.critical_values > As_ref)
assert_allclose(res.critical_values, As_ref, atol=1e-3)
def test_weibull_min_case_B(self):
# From `anderson` reference [7]
x = np.array([74, 57, 48, 29, 502, 12, 70, 21,
29, 386, 59, 27, 153, 26, 326])
message = "Maximum likelihood estimation has converged to "
with pytest.raises(ValueError, match=message):
stats.anderson(x, 'weibull_min')
def test_weibull_warning_error(self):
# Check for warning message when there are too few observations
# This is also an example in which an error occurs during fitting
x = -np.array([225, 75, 57, 168, 107, 12, 61, 43, 29])
wmessage = "Critical values of the test statistic are given for the..."
emessage = "An error occurred while fitting the Weibull distribution..."
wcontext = pytest.warns(UserWarning, match=wmessage)
econtext = pytest.raises(ValueError, match=emessage)
with wcontext, econtext:
stats.anderson(x, 'weibull_min')
@pytest.mark.parametrize('distname',
['norm', 'expon', 'gumbel_l', 'extreme1',
'gumbel', 'gumbel_r', 'logistic', 'weibull_min'])
def test_anderson_fit_params(self, distname):
# check that anderson now returns a FitResult
rng = np.random.default_rng(330691555377792039)
real_distname = ('gumbel_l' if distname in {'extreme1', 'gumbel'}
else distname)
dist = getattr(stats, real_distname)
params = distcont[real_distname]
x = dist.rvs(*params, size=1000, random_state=rng)
res = stats.anderson(x, distname)
assert res.fit_result.success
def test_anderson_weibull_As(self):
m = 1 # "when mi < 2, so that c > 0.5, the last line...should be used"
assert_equal(_get_As_weibull(1/m), _Avals_weibull[-1])
m = np.inf
assert_equal(_get_As_weibull(1/m), _Avals_weibull[0])
class TestAndersonKSamp:
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0021, atol=0.00025)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0020, atol=0.00025)
@pytest.mark.slow
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
samples = (t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14)
Tk, tm, p = stats.anderson_ksamp(samples, midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
rng = np.random.default_rng(6989860141921615054)
method = stats.PermutationMethod(n_resamples=9999, random_state=rng)
res = stats.anderson_ksamp(samples, midrank=False, method=method)
assert_array_equal(res.statistic, Tk)
assert_array_equal(res.critical_values, tm)
assert_allclose(res.pvalue, p, atol=6e-4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_R_kSamples(self):
# test values generates with R package kSamples
# package version 1.2-6 (2017-06-14)
# r1 = 1:100
# continuous case (no ties) --> version 1
# res <- kSamples::ad.test(r1, r1 + 40.5)
# res$ad[1, "T.AD"] # 41.105
# res$ad[1, " asympt. P-value"] # 5.8399e-18
#
# discrete case (ties allowed) --> version 2 (here: midrank=True)
# res$ad[2, "T.AD"] # 41.235
#
# res <- kSamples::ad.test(r1, r1 + .5)
# res$ad[1, "T.AD"] # -1.2824
# res$ad[1, " asympt. P-value"] # 1
# res$ad[2, "T.AD"] # -1.2944
#
# res <- kSamples::ad.test(r1, r1 + 7.5)
# res$ad[1, "T.AD"] # 1.4923
# res$ad[1, " asympt. P-value"] # 0.077501
#
# res <- kSamples::ad.test(r1, r1 + 6)
# res$ad[2, "T.AD"] # 0.63892
# res$ad[2, " asympt. P-value"] # 0.17981
#
# res <- kSamples::ad.test(r1, r1 + 11.5)
# res$ad[1, "T.AD"] # 4.5042
# res$ad[1, " asympt. P-value"] # 0.00545
#
# res <- kSamples::ad.test(r1, r1 + 13.5)
# res$ad[1, "T.AD"] # 6.2982
# res$ad[1, " asympt. P-value"] # 0.00118
x1 = np.linspace(1, 100, 100)
# test case: different distributions;p-value floored at 0.001
# test case for issue #5493 / #8536
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False)
assert_almost_equal(s, 41.105, 3)
assert_equal(p, 0.001)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5])
assert_almost_equal(s, 41.235, 3)
assert_equal(p, 0.001)
# test case: similar distributions --> p-value capped at 0.25
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False)
assert_almost_equal(s, -1.2824, 4)
assert_equal(p, 0.25)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5])
assert_almost_equal(s, -1.2944, 4)
assert_equal(p, 0.25)
# test case: check interpolated p-value in [0.01, 0.25] (no ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False)
assert_almost_equal(s, 1.4923, 4)
assert_allclose(p, 0.0775, atol=0.005, rtol=0)
# test case: check interpolated p-value in [0.01, 0.25] (w/ ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 6])
assert_almost_equal(s, 0.6389, 4)
assert_allclose(p, 0.1798, atol=0.005, rtol=0)
# test extended critical values for p=0.001 and p=0.005
s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False)
assert_almost_equal(s, 4.5042, 4)
assert_allclose(p, 0.00545, atol=0.0005, rtol=0)
s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False)
assert_almost_equal(s, 6.2982, 4)
assert_allclose(p, 0.00118, atol=0.0001, rtol=0)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
res = stats.anderson_ksamp((t1, t2), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
assert_equal(res.significance_level, res.pvalue)
class TestAnsari:
def test_small(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(x, y)
assert_almost_equal(W, 23.5, 11)
assert_almost_equal(pval, 0.13499256881897437, 11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108,
106, 99))
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
assert_almost_equal(pval, 0.18145819972867083, 11)
def test_exact(self):
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
assert_almost_equal(W, 10.0, 11)
assert_almost_equal(pval, 0.533333333333333333, 7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_bad_alternative(self):
# invalid value for alternative must raise a ValueError
x1 = [1, 2, 3, 4]
x2 = [5, 6, 7, 8]
match = "'alternative' must be 'two-sided'"
with assert_raises(ValueError, match=match):
stats.ansari(x1, x2, alternative='foo')
def test_alternative_exact(self):
x1 = [-5, 1, 5, 10, 15, 20, 25] # high scale, loc=10
x2 = [7.5, 8.5, 9.5, 10.5, 11.5, 12.5] # low scale, loc=10
# ratio of scales is greater than 1. So, the
# p-value must be high when `alternative='less'`
# and low when `alternative='greater'`.
statistic, pval = stats.ansari(x1, x2)
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
assert pval_l > 0.95
assert pval_g < 0.05 # level of significance.
# also check if the p-values sum up to 1 plus the probability
# mass under the calculated statistic.
prob = _abw_state.pmf(statistic, len(x1), len(x2))
assert_allclose(pval_g + pval_l, 1 + prob, atol=1e-12)
# also check if one of the one-sided p-value equals half the
# two-sided p-value and the other one-sided p-value is its
# compliment.
assert_allclose(pval_g, pval/2, atol=1e-12)
assert_allclose(pval_l, 1+prob-pval/2, atol=1e-12)
# sanity check. The result should flip if
# we exchange x and y.
pval_l_reverse = stats.ansari(x2, x1, alternative='less').pvalue
pval_g_reverse = stats.ansari(x2, x1, alternative='greater').pvalue
assert pval_l_reverse < 0.05
assert pval_g_reverse > 0.95
@pytest.mark.parametrize(
'x, y, alternative, expected',
# the tests are designed in such a way that the
# if else statement in ansari test for exact
# mode is covered.
[([1, 2, 3, 4], [5, 6, 7, 8], 'less', 0.6285714285714),
([1, 2, 3, 4], [5, 6, 7, 8], 'greater', 0.6285714285714),
([1, 2, 3], [4, 5, 6, 7, 8], 'less', 0.8928571428571),
([1, 2, 3], [4, 5, 6, 7, 8], 'greater', 0.2857142857143),
([1, 2, 3, 4, 5], [6, 7, 8], 'less', 0.2857142857143),
([1, 2, 3, 4, 5], [6, 7, 8], 'greater', 0.8928571428571)]
)
def test_alternative_exact_with_R(self, x, y, alternative, expected):
# testing with R on arbitrary data
# Sample R code used for the third test case above:
# ```R
# > options(digits=16)
# > x <- c(1,2,3)
# > y <- c(4,5,6,7,8)
# > ansari.test(x, y, alternative='less', exact=TRUE)
#
# Ansari-Bradley test
#
# data: x and y
# AB = 6, p-value = 0.8928571428571
# alternative hypothesis: true ratio of scales is less than 1
#
# ```
pval = stats.ansari(x, y, alternative=alternative).pvalue
assert_allclose(pval, expected, atol=1e-12)
def test_alternative_approx(self):
# intuitive tests for approximation
x1 = stats.norm.rvs(0, 5, size=100, random_state=123)
x2 = stats.norm.rvs(0, 2, size=100, random_state=123)
# for m > 55 or n > 55, the test should automatically
# switch to approximation.
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
assert_allclose(pval_l, 1.0, atol=1e-12)
assert_allclose(pval_g, 0.0, atol=1e-12)
# also check if one of the one-sided p-value equals half the
# two-sided p-value and the other one-sided p-value is its
# compliment.
x1 = stats.norm.rvs(0, 2, size=60, random_state=123)
x2 = stats.norm.rvs(0, 1.5, size=60, random_state=123)
pval = stats.ansari(x1, x2).pvalue
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
assert_allclose(pval_g, pval/2, atol=1e-12)
assert_allclose(pval_l, 1-pval/2, atol=1e-12)
class TestBartlett:
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T, 20.78587342806484, 7)
assert_almost_equal(pval, 0.0136358632781, 7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.bartlett, g1, x)
class TestLevene:
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W, 1.7059176930008939, 7)
assert_almost_equal(pval, 0.0990829755522, 7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1, 1, 21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.levene, g1, x)
class TestBinomTest:
"""Tests for stats.binomtest."""
# Expected results here are from R binom.test, e.g.
# options(digits=16)
# binom.test(484, 967, p=0.48)
#
def test_two_sided_pvalues1(self):
# `tol` could be stricter on most architectures, but the value
# here is limited by accuracy of `binom.cdf` for large inputs on
# Linux_Python_37_32bit_full and aarch64
rtol = 1e-10 # aarch64 observed rtol: 1.5e-11
res = stats.binomtest(10079999, 21000000, 0.48)
assert_allclose(res.pvalue, 1.0, rtol=rtol)
res = stats.binomtest(10079990, 21000000, 0.48)
assert_allclose(res.pvalue, 0.9966892187965, rtol=rtol)
res = stats.binomtest(10080009, 21000000, 0.48)
assert_allclose(res.pvalue, 0.9970377203856, rtol=rtol)
res = stats.binomtest(10080017, 21000000, 0.48)
assert_allclose(res.pvalue, 0.9940754817328, rtol=1e-9)
def test_two_sided_pvalues2(self):
rtol = 1e-10 # no aarch64 failure with 1e-15, preemptive bump
res = stats.binomtest(9, n=21, p=0.48)
assert_allclose(res.pvalue, 0.6689672431939, rtol=rtol)
res = stats.binomtest(4, 21, 0.48)
assert_allclose(res.pvalue, 0.008139563452106, rtol=rtol)
res = stats.binomtest(11, 21, 0.48)
assert_allclose(res.pvalue, 0.8278629664608, rtol=rtol)
res = stats.binomtest(7, 21, 0.48)
assert_allclose(res.pvalue, 0.1966772901718, rtol=rtol)
res = stats.binomtest(3, 10, .5)
assert_allclose(res.pvalue, 0.34375, rtol=rtol)
res = stats.binomtest(2, 2, .4)
assert_allclose(res.pvalue, 0.16, rtol=rtol)
res = stats.binomtest(2, 4, .3)
assert_allclose(res.pvalue, 0.5884, rtol=rtol)
def test_edge_cases(self):
rtol = 1e-10 # aarch64 observed rtol: 1.33e-15
res = stats.binomtest(484, 967, 0.5)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(3, 47, 3/47)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(13, 46, 13/46)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(15, 44, 15/44)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(7, 13, 0.5)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(6, 11, 0.5)
assert_allclose(res.pvalue, 1, rtol=rtol)
def test_binary_srch_for_binom_tst(self):
# Test that old behavior of binomtest is maintained
# by the new binary search method in cases where d
# exactly equals the input on one side.
n = 10
p = 0.5
k = 3
# First test for the case where k > mode of PMF
i = np.arange(np.ceil(p * n), n+1)
d = stats.binom.pmf(k, n, p)
# Old way of calculating y, probably consistent with R.
y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
# New way with binary search.
ix = _binary_search_for_binom_tst(lambda x1:
-stats.binom.pmf(x1, n, p),
-d, np.ceil(p * n), n)
y2 = n - ix + int(d == stats.binom.pmf(ix, n, p))
assert_allclose(y1, y2, rtol=1e-9)
# Now test for the other side.
k = 7
i = np.arange(np.floor(p * n) + 1)
d = stats.binom.pmf(k, n, p)
# Old way of calculating y.
y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
# New way with binary search.
ix = _binary_search_for_binom_tst(lambda x1:
stats.binom.pmf(x1, n, p),
d, 0, np.floor(p * n))
y2 = ix + 1
assert_allclose(y1, y2, rtol=1e-9)
# Expected results here are from R 3.6.2 binom.test
@pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
[('less', 0.148831050443,
0.0, 0.2772002496709138),
('greater', 0.9004695898947,
0.1366613252458672, 1.0),
('two-sided', 0.2983720970096,
0.1266555521019559, 0.2918426890886281)])
def test_confidence_intervals1(self, alternative, pval, ci_low, ci_high):
res = stats.binomtest(20, n=100, p=0.25, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-12)
assert_equal(res.statistic, 0.2)
ci = res.proportion_ci(confidence_level=0.95)
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-12)
# Expected results here are from R 3.6.2 binom.test.
@pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
[('less',
0.005656361, 0.0, 0.1872093),
('greater',
0.9987146, 0.008860761, 1.0),
('two-sided',
0.01191714, 0.006872485, 0.202706269)])
def test_confidence_intervals2(self, alternative, pval, ci_low, ci_high):
res = stats.binomtest(3, n=50, p=0.2, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
assert_equal(res.statistic, 0.06)
ci = res.proportion_ci(confidence_level=0.99)
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
# Expected results here are from R 3.6.2 binom.test.
@pytest.mark.parametrize('alternative, pval, ci_high',
[('less', 0.05631351, 0.2588656),
('greater', 1.0, 1.0),
('two-sided', 0.07604122, 0.3084971)])
def test_confidence_interval_exact_k0(self, alternative, pval, ci_high):
# Test with k=0, n = 10.
res = stats.binomtest(0, 10, p=0.25, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
ci = res.proportion_ci(confidence_level=0.95)
assert_equal(ci.low, 0.0)
assert_allclose(ci.high, ci_high, rtol=1e-6)
# Expected results here are from R 3.6.2 binom.test.
@pytest.mark.parametrize('alternative, pval, ci_low',
[('less', 1.0, 0.0),
('greater', 9.536743e-07, 0.7411344),
('two-sided', 9.536743e-07, 0.6915029)])
def test_confidence_interval_exact_k_is_n(self, alternative, pval, ci_low):
# Test with k = n = 10.
res = stats.binomtest(10, 10, p=0.25, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
ci = res.proportion_ci(confidence_level=0.95)
assert_equal(ci.high, 1.0)
assert_allclose(ci.low, ci_low, rtol=1e-6)
# Expected results are from the prop.test function in R 3.6.2.
@pytest.mark.parametrize(
'k, alternative, corr, conf, ci_low, ci_high',
[[3, 'two-sided', True, 0.95, 0.08094782, 0.64632928],
[3, 'two-sided', True, 0.99, 0.0586329, 0.7169416],
[3, 'two-sided', False, 0.95, 0.1077913, 0.6032219],
[3, 'two-sided', False, 0.99, 0.07956632, 0.6799753],
[3, 'less', True, 0.95, 0.0, 0.6043476],
[3, 'less', True, 0.99, 0.0, 0.6901811],
[3, 'less', False, 0.95, 0.0, 0.5583002],
[3, 'less', False, 0.99, 0.0, 0.6507187],
[3, 'greater', True, 0.95, 0.09644904, 1.0],
[3, 'greater', True, 0.99, 0.06659141, 1.0],
[3, 'greater', False, 0.95, 0.1268766, 1.0],
[3, 'greater', False, 0.99, 0.08974147, 1.0],
[0, 'two-sided', True, 0.95, 0.0, 0.3445372],
[0, 'two-sided', False, 0.95, 0.0, 0.2775328],
[0, 'less', True, 0.95, 0.0, 0.2847374],
[0, 'less', False, 0.95, 0.0, 0.212942],
[0, 'greater', True, 0.95, 0.0, 1.0],
[0, 'greater', False, 0.95, 0.0, 1.0],
[10, 'two-sided', True, 0.95, 0.6554628, 1.0],
[10, 'two-sided', False, 0.95, 0.7224672, 1.0],
[10, 'less', True, 0.95, 0.0, 1.0],
[10, 'less', False, 0.95, 0.0, 1.0],
[10, 'greater', True, 0.95, 0.7152626, 1.0],
[10, 'greater', False, 0.95, 0.787058, 1.0]]
)
def test_ci_wilson_method(self, k, alternative, corr, conf,
ci_low, ci_high):
res = stats.binomtest(k, n=10, p=0.1, alternative=alternative)
if corr:
method = 'wilsoncc'
else:
method = 'wilson'
ci = res.proportion_ci(confidence_level=conf, method=method)
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
def test_estimate_equals_hypothesized_prop(self):
# Test the special case where the estimated proportion equals
# the hypothesized proportion. When alternative is 'two-sided',
# the p-value is 1.
res = stats.binomtest(4, 16, 0.25)
assert_equal(res.statistic, 0.25)
assert_equal(res.pvalue, 1.0)
@pytest.mark.parametrize('k, n', [(0, 0), (-1, 2)])
def test_invalid_k_n(self, k, n):
with pytest.raises(ValueError,
match="must be an integer not less than"):
stats.binomtest(k, n)
def test_invalid_k_too_big(self):
with pytest.raises(ValueError,
match="k must not be greater than n"):
stats.binomtest(11, 10, 0.25)
def test_invalid_k_wrong_type(self):
with pytest.raises(TypeError,
match="k must be an integer."):
stats.binomtest([10, 11], 21, 0.25)
def test_invalid_p_range(self):
message = 'p must be in range...'
with pytest.raises(ValueError, match=message):
stats.binomtest(50, 150, p=-0.5)
with pytest.raises(ValueError, match=message):
stats.binomtest(50, 150, p=1.5)
def test_invalid_confidence_level(self):
res = stats.binomtest(3, n=10, p=0.1)
with pytest.raises(ValueError, match="must be in the interval"):
res.proportion_ci(confidence_level=-1)
def test_invalid_ci_method(self):
res = stats.binomtest(3, n=10, p=0.1)
with pytest.raises(ValueError, match="method must be"):
res.proportion_ci(method="plate of shrimp")
def test_alias(self):
res = stats.binomtest(3, n=10, p=0.1)
assert_equal(res.proportion_estimate, res.statistic)
@pytest.mark.skipif(sys.maxsize <= 2**32, reason="32-bit does not overflow")
def test_boost_overflow_raises(self):
# Boost.Math error policy should raise exceptions in Python
with pytest.raises(OverflowError, match='Error in function...'):
stats.binomtest(5, 6, p=sys.float_info.min)
class TestFligner:
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1**2),
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Perturb input to break ties in the transformed data
# See https://github.com/scipy/scipy/pull/8042 for more details
rs = np.random.RandomState(123)
def _perturb(g):
return (np.asarray(g) + 1e-10 * rs.randn(len(g))).tolist()
g1_ = _perturb(g1)
g2_ = _perturb(g2)
g3_ = _perturb(g3)
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean')
Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
def mood_cases_with_ties():
# Generate random `x` and `y` arrays with ties both between and within the
# samples. Expected results are (statistic, pvalue) from SAS.
expected_results = [(-1.76658511464992, .0386488678399305),
(-.694031428192304, .2438312498647250),
(-1.15093525352151, .1248794365836150)]
seeds = [23453254, 1298352315, 987234597]
for si, seed in enumerate(seeds):
rng = np.random.default_rng(seed)
xy = rng.random(100)
# Generate random indices to make ties
tie_ind = rng.integers(low=0, high=99, size=5)
# Generate a random number of ties for each index.
num_ties_per_ind = rng.integers(low=1, high=5, size=5)
# At each `tie_ind`, mark the next `n` indices equal to that value.
for i, n in zip(tie_ind, num_ties_per_ind):
for j in range(i + 1, i + n):
xy[j] = xy[i]
# scramble order of xy before splitting into `x, y`
rng.shuffle(xy)
x, y = np.split(xy, 2)
yield x, y, 'less', *expected_results[si]
class TestMood:
@pytest.mark.parametrize("x,y,alternative,stat_expect,p_expect",
mood_cases_with_ties())
def test_against_SAS(self, x, y, alternative, stat_expect, p_expect):
"""
Example code used to generate SAS output:
DATA myData;
INPUT X Y;
CARDS;
1 0
1 1
1 2
1 3
1 4
2 0
2 1
2 4
2 9
2 16
ods graphics on;
proc npar1way mood data=myData ;
class X;
ods output MoodTest=mt;
proc contents data=mt;
proc print data=mt;
format Prob1 17.16 Prob2 17.16 Statistic 17.16 Z 17.16 ;
title "Mood Two-Sample Test";
proc print data=myData;
title "Data for above results";
run;
"""
statistic, pvalue = stats.mood(x, y, alternative=alternative)
assert_allclose(stat_expect, statistic, atol=1e-16)
assert_allclose(p_expect, pvalue, atol=1e-16)
@pytest.mark.parametrize("alternative, expected",
[('two-sided', (1.019938533549930,
.3077576129778760)),
('less', (1.019938533549930,
1 - .1538788064889380)),
('greater', (1.019938533549930,
.1538788064889380))])
def test_against_SAS_2(self, alternative, expected):
# Code to run in SAS in above function
x = [111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98]
y = [107, 108, 106, 98, 105, 103, 110, 105, 104, 100,
96, 108, 103, 104, 114, 114, 113, 108, 106, 99]
res = stats.mood(x, y, alternative=alternative)
assert_allclose(res, expected)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
# Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is
# less than 3
assert_raises(ValueError, stats.mood, [1], [])
def test_mood_alternative(self):
np.random.seed(0)
x = stats.norm.rvs(scale=0.75, size=100)
y = stats.norm.rvs(scale=1.25, size=100)
stat1, p1 = stats.mood(x, y, alternative='two-sided')
stat2, p2 = stats.mood(x, y, alternative='less')
stat3, p3 = stats.mood(x, y, alternative='greater')
assert stat1 == stat2 == stat3
assert_allclose(p1, 0, atol=1e-7)
assert_allclose(p2, p1/2)
assert_allclose(p3, 1 - p1/2)
with pytest.raises(ValueError, match="alternative must be..."):
stats.mood(x, y, alternative='ekki-ekki')
@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
def test_result(self, alternative):
rng = np.random.default_rng(265827767938813079281100964083953437622)
x1 = rng.standard_normal((10, 1))
x2 = rng.standard_normal((15, 1))
res = stats.mood(x1, x2, alternative=alternative)
assert_equal((res.statistic, res.pvalue), res)
class TestProbplot:
def test_basic(self):
x = stats.norm.rvs(size=20, random_state=12345)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
x = stats.norm.rvs(size=100, random_state=123456)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
x = stats.norm.rvs(size=20, random_state=12345)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist:
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100, random_state=7654321)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
class TestWilcoxon:
def test_wilcoxon_bad_arg(self):
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2],
alternative="dummy")
assert_raises(ValueError, stats.wilcoxon, [1]*10, mode="xyz")
def test_zero_diff(self):
x = np.arange(20)
# pratt and wilcox do not work if x - y == 0
assert_raises(ValueError, stats.wilcoxon, x, x, "wilcox",
mode="approx")
assert_raises(ValueError, stats.wilcoxon, x, x, "pratt",
mode="approx")
# ranksum is n*(n+1)/2, split in half if zero_method == "zsplit"
assert_equal(stats.wilcoxon(x, x, "zsplit", mode="approx"),
(20*21/4, 1.0))
def test_pratt(self):
# regression test for gh-6805: p-value matches value from R package
# coin (wilcoxsign_test) reported in the issue
x = [1, 2, 3, 4]
y = [1, 2, 3, 5]
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
res = stats.wilcoxon(x, y, zero_method="pratt", mode="approx")
assert_allclose(res, (0.0, 0.31731050786291415))
def test_wilcoxon_arg_type(self):
# Should be able to accept list as arguments.
# Address issue 6070.
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
_ = stats.wilcoxon(arr, zero_method="pratt", mode="approx")
_ = stats.wilcoxon(arr, zero_method="zsplit", mode="approx")
_ = stats.wilcoxon(arr, zero_method="wilcox", mode="approx")
def test_accuracy_wilcoxon(self):
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt", mode="approx")
assert_allclose(T, 423)
assert_allclose(p, 0.0031724568006762576)
T, p = stats.wilcoxon(x, y, "zsplit", mode="approx")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox", mode="approx")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False, mode="approx")
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True, mode="approx")
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes(self):
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False, mode="approx")
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_has_zstatistic(self):
rng = np.random.default_rng(89426135444)
x, y = rng.random(15), rng.random(15)
res = stats.wilcoxon(x, y, mode="approx")
ref = stats.norm.ppf(res.pvalue/2)
assert_allclose(res.zstatistic, ref)
res = stats.wilcoxon(x, y, mode="exact")
assert not hasattr(res, 'zstatistic')
res = stats.wilcoxon(x, y)
assert not hasattr(res, 'zstatistic')
def test_wilcoxon_tie(self):
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10, mode="approx")
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True, mode="approx")
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
def test_onesided(self):
# tested against "R version 3.4.1 (2017-06-30)"
# x <- c(125, 115, 130, 140, 140, 115, 140, 125, 140, 135)
# y <- c(110, 122, 125, 120, 140, 124, 123, 137, 135, 145)
# cfg <- list(x = x, y = y, paired = TRUE, exact = FALSE)
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = FALSE)))
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = TRUE)))
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = FALSE)))
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = TRUE)))
x = [125, 115, 130, 140, 140, 115, 140, 125, 140, 135]
y = [110, 122, 125, 120, 140, 124, 123, 137, 135, 145]
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="less", mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.7031847, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="less", correction=True,
mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.7233656, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="greater", mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.2968153, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="greater", correction=True,
mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.3176447, decimal=6)
def test_exact_basic(self):
for n in range(1, 51):
pmf1 = _get_wilcoxon_distr(n)
pmf2 = _get_wilcoxon_distr2(n)
assert_equal(n*(n+1)/2 + 1, len(pmf1))
assert_equal(sum(pmf1), 1)
assert_array_almost_equal(pmf1, pmf2)
def test_exact_pval(self):
# expected values computed with "R version 3.4.1 (2017-06-30)"
x = np.array([1.81, 0.82, 1.56, -0.48, 0.81, 1.28, -1.04, 0.23,
-0.75, 0.14])
y = np.array([0.71, 0.65, -0.2, 0.85, -1.1, -0.45, -0.84, -0.24,
-0.68, -0.76])
_, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact")
assert_almost_equal(p, 0.1054688, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="less", mode="exact")
assert_almost_equal(p, 0.9580078, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="greater", mode="exact")
assert_almost_equal(p, 0.05273438, decimal=6)
x = np.arange(0, 20) + 0.5
y = np.arange(20, 0, -1)
_, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact")
assert_almost_equal(p, 0.8694878, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="less", mode="exact")
assert_almost_equal(p, 0.4347439, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="greater", mode="exact")
assert_almost_equal(p, 0.5795889, decimal=6)
# These inputs were chosen to give a W statistic that is either the
# center of the distribution (when the length of the support is odd), or
# the value to the left of the center (when the length of the support is
# even). Also, the numbers are chosen so that the W statistic is the
# sum of the positive values.
@pytest.mark.parametrize('x', [[-1, -2, 3],
[-1, 2, -3, -4, 5],
[-1, -2, 3, -4, -5, -6, 7, 8]])
def test_exact_p_1(self, x):
w, p = stats.wilcoxon(x)
x = np.array(x)
wtrue = x[x > 0].sum()
assert_equal(w, wtrue)
assert_equal(p, 1)
def test_auto(self):
# auto default to exact if there are no ties and n<= 25
x = np.arange(0, 25) + 0.5
y = np.arange(25, 0, -1)
assert_equal(stats.wilcoxon(x, y),
stats.wilcoxon(x, y, mode="exact"))
# if there are ties (i.e. zeros in d = x-y), then switch to approx
d = np.arange(0, 13)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Exact p-value calculation")
w, p = stats.wilcoxon(d)
assert_equal(stats.wilcoxon(d, mode="approx"), (w, p))
# use approximation for samples > 25
d = np.arange(1, 52)
assert_equal(stats.wilcoxon(d), stats.wilcoxon(d, mode="approx"))
class TestKstat:
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = [stats.kstat(data, n) for n in [1, 2, 3, 4]]
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar:
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot:
def setup_method(self):
self.x = _old_loggamma_rvs(5, size=500, random_state=7654321) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax:
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
random_state=1234567) + 1e4
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=7)
def test_dist(self):
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
random_state=1234567) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
random_state=1234567) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=7)
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=7)
class TestBoxcox_llf:
def test_basic(self):
x = stats.norm.rvs(size=10000, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
def test_gh_6873(self):
# Regression test for gh-6873.
# This example was taken from gh-7534, a duplicate of gh-6873.
data = [198.0, 233.0, 233.0, 392.0]
llf = stats.boxcox_llf(-8, data)
# The expected value was computed with mpmath.
assert_allclose(llf, -17.93934208579061)
# This is the data from github user Qukaiyi, given as an example
# of a data set that caused boxcox to fail.
_boxcox_data = [
15957, 112079, 1039553, 711775, 173111, 307382, 183155, 53366, 760875,
207500, 160045, 473714, 40194, 440319, 133261, 265444, 155590, 36660,
904939, 55108, 138391, 339146, 458053, 63324, 1377727, 1342632, 41575,
68685, 172755, 63323, 368161, 199695, 538214, 167760, 388610, 398855,
1001873, 364591, 1320518, 194060, 194324, 2318551, 196114, 64225, 272000,
198668, 123585, 86420, 1925556, 695798, 88664, 46199, 759135, 28051,
345094, 1977752, 51778, 82746, 638126, 2560910, 45830, 140576, 1603787,
57371, 548730, 5343629, 2298913, 998813, 2156812, 423966, 68350, 145237,
131935, 1600305, 342359, 111398, 1409144, 281007, 60314, 242004, 113418,
246211, 61940, 95858, 957805, 40909, 307955, 174159, 124278, 241193,
872614, 304180, 146719, 64361, 87478, 509360, 167169, 933479, 620561,
483333, 97416, 143518, 286905, 597837, 2556043, 89065, 69944, 196858,
88883, 49379, 916265, 1527392, 626954, 54415, 89013, 2883386, 106096,
402697, 45578, 349852, 140379, 34648, 757343, 1305442, 2054757, 121232,
606048, 101492, 51426, 1820833, 83412, 136349, 1379924, 505977, 1303486,
95853, 146451, 285422, 2205423, 259020, 45864, 684547, 182014, 784334,
174793, 563068, 170745, 1195531, 63337, 71833, 199978, 2330904, 227335,
898280, 75294, 2011361, 116771, 157489, 807147, 1321443, 1148635, 2456524,
81839, 1228251, 97488, 1051892, 75397, 3009923, 2732230, 90923, 39735,
132433, 225033, 337555, 1204092, 686588, 1062402, 40362, 1361829, 1497217,
150074, 551459, 2019128, 39581, 45349, 1117187, 87845, 1877288, 164448,
10338362, 24942, 64737, 769946, 2469124, 2366997, 259124, 2667585, 29175,
56250, 74450, 96697, 5920978, 838375, 225914, 119494, 206004, 430907,
244083, 219495, 322239, 407426, 618748, 2087536, 2242124, 4736149, 124624,
406305, 240921, 2675273, 4425340, 821457, 578467, 28040, 348943, 48795,
145531, 52110, 1645730, 1768364, 348363, 85042, 2673847, 81935, 169075,
367733, 135474, 383327, 1207018, 93481, 5934183, 352190, 636533, 145870,
55659, 146215, 73191, 248681, 376907, 1606620, 169381, 81164, 246390,
236093, 885778, 335969, 49266, 381430, 307437, 350077, 34346, 49340,
84715, 527120, 40163, 46898, 4609439, 617038, 2239574, 159905, 118337,
120357, 430778, 3799158, 3516745, 54198, 2970796, 729239, 97848, 6317375,
887345, 58198, 88111, 867595, 210136, 1572103, 1420760, 574046, 845988,
509743, 397927, 1119016, 189955, 3883644, 291051, 126467, 1239907, 2556229,
411058, 657444, 2025234, 1211368, 93151, 577594, 4842264, 1531713, 305084,
479251, 20591, 1466166, 137417, 897756, 594767, 3606337, 32844, 82426,
1294831, 57174, 290167, 322066, 813146, 5671804, 4425684, 895607, 450598,
1048958, 232844, 56871, 46113, 70366, 701618, 97739, 157113, 865047,
194810, 1501615, 1765727, 38125, 2733376, 40642, 437590, 127337, 106310,
4167579, 665303, 809250, 1210317, 45750, 1853687, 348954, 156786, 90793,
1885504, 281501, 3902273, 359546, 797540, 623508, 3672775, 55330, 648221,
266831, 90030, 7118372, 735521, 1009925, 283901, 806005, 2434897, 94321,
309571, 4213597, 2213280, 120339, 64403, 8155209, 1686948, 4327743,
1868312, 135670, 3189615, 1569446, 706058, 58056, 2438625, 520619, 105201,
141961, 179990, 1351440, 3148662, 2804457, 2760144, 70775, 33807, 1926518,
2362142, 186761, 240941, 97860, 1040429, 1431035, 78892, 484039, 57845,
724126, 3166209, 175913, 159211, 1182095, 86734, 1921472, 513546, 326016,
1891609
]
class TestBoxcox:
def test_fixed_lmbda(self):
x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
# test that constant input is accepted; see gh-12225
xt = stats.boxcox(np.ones(10), 2)
assert_equal(xt, np.zeros(10))
def test_lmbda_None(self):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000, random_state=1245)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
rng = np.random.RandomState(1234)
x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = _old_loggamma_rvs(7, size=500, random_state=rng) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1, 2])
assert_raises(ValueError, stats.boxcox, x)
# Raise ValueError if data is constant.
assert_raises(ValueError, stats.boxcox, np.array([1]))
# Raise ValueError if data is not 1-dimensional.
assert_raises(ValueError, stats.boxcox, np.array([[1], [2]]))
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
def test_gh_6873(self):
# Regression test for gh-6873.
y, lam = stats.boxcox(_boxcox_data)
# The expected value of lam was computed with the function
# powerTransform in the R library 'car'. I trust that value
# to only about five significant digits.
assert_allclose(lam, -0.051654, rtol=1e-5)
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
def test_bounded_optimizer_within_bounds(self, bounds):
# Define custom optimizer with bounds.
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded")
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
assert bounds[0] < lmbda < bounds[1]
def test_bounded_optimizer_against_unbounded_optimizer(self):
# Test whether setting bounds on optimizer excludes solution from
# unbounded optimizer.
# Get unbounded solution.
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None)
# Set tolerance and bounds around solution.
bounds = (lmbda + 0.1, lmbda + 1)
options = {'xatol': 1e-12}
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded", options=options)
# Check bounded solution. Lower bound should be active.
_, lmbda_bounded = stats.boxcox(_boxcox_data, lmbda=None,
optimizer=optimizer)
assert lmbda_bounded != lmbda
assert_allclose(lmbda_bounded, bounds[0])
@pytest.mark.parametrize("optimizer", ["str", (1, 2), 0.1])
def test_bad_optimizer_type_raises_error(self, optimizer):
# Check if error is raised if string, tuple or float is passed
with pytest.raises(ValueError, match="`optimizer` must be a callable"):
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
def test_bad_optimizer_value_raises_error(self):
# Check if error is raised if `optimizer` function does not return
# `OptimizeResult` object
# Define test function that always returns 1
def optimizer(fun):
return 1
message = "return an object containing the optimal `lmbda`"
with pytest.raises(ValueError, match=message):
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
@pytest.mark.parametrize(
"bad_x", [np.array([1, -42, 12345.6]), np.array([np.nan, 42, 1])]
)
def test_negative_x_value_raises_error(self, bad_x):
"""Test boxcox_normmax raises ValueError if x contains non-positive values."""
message = "only positive, finite, real numbers"
with pytest.raises(ValueError, match=message):
stats.boxcox_normmax(bad_x)
class TestBoxcoxNormmax:
def setup_method(self):
self.x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
@pytest.mark.parametrize("method", ["mle", "pearsonr", "all"])
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
def test_bounded_optimizer_within_bounds(self, method, bounds):
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded")
maxlog = stats.boxcox_normmax(self.x, method=method,
optimizer=optimizer)
assert np.all(bounds[0] < maxlog)
assert np.all(maxlog < bounds[1])
def test_user_defined_optimizer(self):
# tests an optimizer that is not based on scipy.optimize.minimize
lmbda = stats.boxcox_normmax(self.x)
lmbda_rounded = np.round(lmbda, 5)
lmbda_range = np.linspace(lmbda_rounded-0.01, lmbda_rounded+0.01, 1001)
class MyResult:
pass
def optimizer(fun):
# brute force minimum over the range
objs = []
for lmbda in lmbda_range:
objs.append(fun(lmbda))
res = MyResult()
res.x = lmbda_range[np.argmin(objs)]
return res
lmbda2 = stats.boxcox_normmax(self.x, optimizer=optimizer)
assert lmbda2 != lmbda # not identical
assert_allclose(lmbda2, lmbda, 1e-5) # but as close as it should be
def test_user_defined_optimizer_and_brack_raises_error(self):
optimizer = optimize.minimize_scalar
# Using default `brack=None` with user-defined `optimizer` works as
# expected.
stats.boxcox_normmax(self.x, brack=None, optimizer=optimizer)
# Using user-defined `brack` with user-defined `optimizer` is expected
# to throw an error. Instead, users should specify
# optimizer-specific parameters in the optimizer function itself.
with pytest.raises(ValueError, match="`brack` must be None if "
"`optimizer` is given"):
stats.boxcox_normmax(self.x, brack=(-2.0, 2.0),
optimizer=optimizer)
class TestBoxcoxNormplot:
def setup_method(self):
self.x = _old_loggamma_rvs(5, size=500, random_state=7654321) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestYeojohnson_llf:
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.yeojohnson_llf(1, [])))
class TestYeojohnson:
def test_fixed_lmbda(self):
rng = np.random.RandomState(12345)
# Test positive input
x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5
assert np.all(x > 0)
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt, 1 - 1 / (x + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt, np.log(x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
# Test negative input
x = _old_loggamma_rvs(5, size=50, random_state=rng) - 5
assert np.all(x < 0)
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt, -np.log(-x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt, 1 / (-x + 1) - 1)
# test both positive and negative input
x = _old_loggamma_rvs(5, size=50, random_state=rng) - 2
assert not np.all(x < 0)
assert not np.all(x >= 0)
pos = x >= 0
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt[pos], np.log(x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
neg = ~pos
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt[neg], -np.log(-x[neg] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[neg], x[neg])
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1)
@pytest.mark.parametrize('lmbda', [0, .1, .5, 2])
def test_lmbda_None(self, lmbda):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
def _inverse_transform(x, lmbda):
x_inv = np.zeros(x.shape, dtype=x.dtype)
pos = x >= 0
# when x >= 0
if abs(lmbda) < np.spacing(1.):
x_inv[pos] = np.exp(x[pos]) - 1
else: # lmbda != 0
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,
1 / (2 - lmbda))
else: # lmbda == 2
x_inv[~pos] = 1 - np.exp(-x[~pos])
return x_inv
n_samples = 20000
np.random.seed(1234567)
x = np.random.normal(loc=0, scale=1, size=(n_samples))
x_inv = _inverse_transform(x, lmbda)
xt, maxlog = stats.yeojohnson(x_inv)
assert_allclose(maxlog, lmbda, atol=1e-2)
assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2)
assert_almost_equal(0, xt.mean(), decimal=1)
assert_almost_equal(1, xt.std(), decimal=1)
def test_empty(self):
assert_(stats.yeojohnson([]).shape == (0,))
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
xt1, _ = stats.yeojohnson(x)
xt2, _ = stats.yeojohnson(list(x))
assert_allclose(xt1, xt2, rtol=1e-12)
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_input_dtype_complex(self, dtype):
x = np.arange(6, dtype=dtype)
err_msg = ('Yeo-Johnson transformation is not defined for complex '
'numbers.')
with pytest.raises(ValueError, match=err_msg):
stats.yeojohnson(x)
@pytest.mark.parametrize('dtype', [np.int8, np.uint8, np.int16, np.int32])
def test_input_dtype_integer(self, dtype):
x_int = np.arange(8, dtype=dtype)
x_float = np.arange(8, dtype=np.float64)
xt_int, lmbda_int = stats.yeojohnson(x_int)
xt_float, lmbda_float = stats.yeojohnson(x_float)
assert_allclose(xt_int, xt_float, rtol=1e-7)
assert_allclose(lmbda_int, lmbda_float, rtol=1e-7)
def test_input_high_variance(self):
# non-regression test for gh-10821
x = np.array([3251637.22, 620695.44, 11642969.00, 2223468.22,
85307500.00, 16494389.89, 917215.88, 11642969.00,
2145773.87, 4962000.00, 620695.44, 651234.50,
1907876.71, 4053297.88, 3251637.22, 3259103.08,
9547969.00, 20631286.23, 12807072.08, 2383819.84,
90114500.00, 17209575.46, 12852969.00, 2414609.99,
2170368.23])
xt_yeo, lam_yeo = stats.yeojohnson(x)
xt_box, lam_box = stats.boxcox(x + 1)
assert_allclose(xt_yeo, xt_box, rtol=1e-6)
assert_allclose(lam_yeo, lam_box, rtol=1e-6)
class TestYeojohnsonNormmax:
def setup_method(self):
self.x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
def test_mle(self):
maxlog = stats.yeojohnson_normmax(self.x)
assert_allclose(maxlog, 1.876393, rtol=1e-6)
def test_darwin_example(self):
# test from original paper "A new family of power transformations to
# improve normality or symmetry" by Yeo and Johnson.
x = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
7.5, -6.0]
lmbda = stats.yeojohnson_normmax(x)
assert np.allclose(lmbda, 1.305, atol=1e-3)
class TestCircFuncs:
# In gh-5747, the R package `circular` was used to calculate reference
# values for the circular variance, e.g.:
# library(circular)
# options(digits=16)
# x = c(0, 2*pi/3, 5*pi/3)
# var.circular(x)
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 0.006455174270186603),
(stats.circstd, 6.520702116)])
def test_circfuncs(self, test_func, expected):
x = np.array([355, 5, 2, 359, 10, 350])
assert_allclose(test_func(x, high=360), expected, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = (x*np.pi/180).var()
# for small variations, circvar is approximately half the
# linear variance
V1 = V1 / 2.
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
@pytest.mark.parametrize("test_func, numpy_func",
[(stats.circmean, np.mean),
(stats.circvar, np.var),
(stats.circstd, np.std)])
def test_circfuncs_close(self, test_func, numpy_func):
# circfuncs should handle very similar inputs (gh-12740)
x = np.array([0.12675364631578953] * 10 + [0.12675365920187928] * 100)
circstat = test_func(x)
normal = numpy_func(x)
assert_allclose(circstat, normal, atol=2e-8)
def test_circmean_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 0.006455174270186603),
(stats.circstd, 6.520702116)])
def test_circfuncs_array_like(self, test_func, expected):
x = [355, 5, 2, 359, 10, 350]
assert_allclose(test_func(x, high=360), expected, rtol=1e-7)
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_empty(self, test_func):
assert_(np.isnan(test_func([])))
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_propagate(self, test_func):
x = [355, 5, 2, 359, 10, 350, np.nan]
assert_(np.isnan(test_func(x, high=360)))
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean,
{None: np.nan, 0: 355.66582264, 1: 0.28725053}),
(stats.circvar,
{None: np.nan,
0: 0.002570671054089924,
1: 0.005545914017677123}),
(stats.circstd,
{None: np.nan, 0: 4.11093193, 1: 6.04265394})])
def test_nan_propagate_array(self, test_func, expected):
x = np.array([[355, 5, 2, 359, 10, 350, 1],
[351, 7, 4, 352, 9, 349, np.nan],
[1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
for axis in expected.keys():
out = test_func(x, high=360, axis=axis)
if axis is None:
assert_(np.isnan(out))
else:
assert_allclose(out[0], expected[axis], rtol=1e-7)
assert_(np.isnan(out[1:]).all())
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean,
{None: 359.4178026893944,
0: np.array([353.0, 6.0, 3.0, 355.5, 9.5,
349.5]),
1: np.array([0.16769015, 358.66510252])}),
(stats.circvar,
{None: 0.008396678483192477,
0: np.array([1.9997969, 0.4999873, 0.4999873,
6.1230956, 0.1249992, 0.1249992]
)*(np.pi/180)**2,
1: np.array([0.006455174270186603,
0.01016767581393285])}),
(stats.circstd,
{None: 7.440570778057074,
0: np.array([2.00020313, 1.00002539, 1.00002539,
3.50108929, 0.50000317,
0.50000317]),
1: np.array([6.52070212, 8.19138093])})])
def test_nan_omit_array(self, test_func, expected):
x = np.array([[355, 5, 2, 359, 10, 350, np.nan],
[351, 7, 4, 352, 9, 349, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
for axis in expected.keys():
out = test_func(x, high=360, nan_policy='omit', axis=axis)
if axis is None:
assert_allclose(out, expected[axis], rtol=1e-7)
else:
assert_allclose(out[:-1], expected[axis], rtol=1e-7)
assert_(np.isnan(out[-1]))
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 0.006455174270186603),
(stats.circstd, 6.520702116)])
def test_nan_omit(self, test_func, expected):
x = [355, 5, 2, 359, 10, 350, np.nan]
assert_allclose(test_func(x, high=360, nan_policy='omit'),
expected, rtol=1e-7)
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_omit_all(self, test_func):
x = [np.nan, np.nan, np.nan, np.nan, np.nan]
assert_(np.isnan(test_func(x, nan_policy='omit')))
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_omit_all_axis(self, test_func):
x = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]])
out = test_func(x, nan_policy='omit', axis=1)
assert_(np.isnan(out).all())
assert_(len(out) == 2)
@pytest.mark.parametrize("x",
[[355, 5, 2, 359, 10, 350, np.nan],
np.array([[355, 5, 2, 359, 10, 350, np.nan],
[351, 7, 4, 352, np.nan, 9, 349]])])
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_raise(self, test_func, x):
assert_raises(ValueError, test_func, x, high=360, nan_policy='raise')
@pytest.mark.parametrize("x",
[[355, 5, 2, 359, 10, 350, np.nan],
np.array([[355, 5, 2, 359, 10, 350, np.nan],
[351, 7, 4, 352, np.nan, 9, 349]])])
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_bad_nan_policy(self, test_func, x):
assert_raises(ValueError, test_func, x, high=360, nan_policy='foobar')
def test_circmean_scalar(self):
x = 1.
M1 = x
M2 = stats.circmean(x)
assert_allclose(M2, M1, rtol=1e-5)
def test_circmean_range(self):
# regression test for gh-6420: circmean(..., high, low) must be
# between `high` and `low`
m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)
assert_(m < np.pi)
assert_(m > -np.pi)
def test_circfuncs_uint8(self):
# regression test for gh-7255: overflow when working with
# numpy uint8 data type
x = np.array([150, 10], dtype='uint8')
assert_equal(stats.circmean(x, high=180), 170.0)
assert_allclose(stats.circvar(x, high=180), 0.2339555554617, rtol=1e-7)
assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7)
class TestMedianTest:
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2], [2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
@pytest.mark.parametrize("correction", [False, True])
def test_result(self, correction):
x = [1, 2, 3]
y = [1, 2, 3]
res = stats.median_test(x, y, correction=correction)
assert_equal((res.statistic, res.pvalue, res.median, res.table), res)
class TestDirectionalStats:
# Reference implementations are not available
def test_directional_stats_correctness(self):
# Data from Fisher: Dispersion on a sphere, 1953 and
# Mardia and Jupp, Directional Statistics.
decl = -np.deg2rad(np.array([343.2, 62., 36.9, 27., 359.,
5.7, 50.4, 357.6, 44.]))
incl = -np.deg2rad(np.array([66.1, 68.7, 70.1, 82.1, 79.5,
73., 69.3, 58.8, 51.4]))
data = np.stack((np.cos(incl) * np.cos(decl),
np.cos(incl) * np.sin(decl),
np.sin(incl)),
axis=1)
dirstats = stats.directional_stats(data)
directional_mean = dirstats.mean_direction
mean_rounded = np.round(directional_mean, 4)
reference_mean = np.array([0.2984, -0.1346, -0.9449])
assert_allclose(mean_rounded, reference_mean)
@pytest.mark.parametrize('angles, ref', [
([-np.pi/2, np.pi/2], 1.),
([0, 2*np.pi], 0.)
])
def test_directional_stats_2d_special_cases(self, angles, ref):
if callable(ref):
ref = ref(angles)
data = np.stack([np.cos(angles), np.sin(angles)], axis=1)
res = 1 - stats.directional_stats(data).mean_resultant_length
assert_allclose(res, ref)
def test_directional_stats_2d(self):
# Test that for circular data directional_stats
# yields the same result as circmean/circvar
rng = np.random.default_rng(0xec9a6899d5a2830e0d1af479dbe1fd0c)
testdata = 2 * np.pi * rng.random((1000, ))
testdata_vector = np.stack((np.cos(testdata),
np.sin(testdata)),
axis=1)
dirstats = stats.directional_stats(testdata_vector)
directional_mean = dirstats.mean_direction
directional_mean_angle = np.arctan2(directional_mean[1],
directional_mean[0])
directional_mean_angle = directional_mean_angle % (2*np.pi)
circmean = stats.circmean(testdata)
assert_allclose(circmean, directional_mean_angle)
directional_var = 1 - dirstats.mean_resultant_length
circular_var = stats.circvar(testdata)
assert_allclose(directional_var, circular_var)
def test_directional_mean_higher_dim(self):
# test that directional_stats works for higher dimensions
# here a 4D array is reduced over axis = 2
data = np.array([[0.8660254, 0.5, 0.],
[0.8660254, -0.5, 0.]])
full_array = np.tile(data, (2, 2, 2, 1))
expected = np.array([[[1., 0., 0.],
[1., 0., 0.]],
[[1., 0., 0.],
[1., 0., 0.]]])
dirstats = stats.directional_stats(full_array, axis=2)
assert_allclose(expected, dirstats.mean_direction)
def test_directional_stats_list_ndarray_input(self):
# test that list and numpy array inputs yield same results
data = [[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0]]
data_array = np.asarray(data)
res = stats.directional_stats(data)
ref = stats.directional_stats(data_array)
assert_allclose(res.mean_direction, ref.mean_direction)
assert_allclose(res.mean_resultant_length,
res.mean_resultant_length)
def test_directional_stats_1d_error(self):
# test that one-dimensional data raises ValueError
data = np.ones((5, ))
message = (r"samples must at least be two-dimensional. "
r"Instead samples has shape: (5,)")
with pytest.raises(ValueError, match=re.escape(message)):
stats.directional_stats(data)
def test_directional_stats_normalize(self):
# test that directional stats calculations yield same results
# for unnormalized input with normalize=True and normalized
# input with normalize=False
data = np.array([[0.8660254, 0.5, 0.],
[1.7320508, -1., 0.]])
res = stats.directional_stats(data, normalize=True)
normalized_data = data / np.linalg.norm(data, axis=-1,
keepdims=True)
ref = stats.directional_stats(normalized_data,
normalize=False)
assert_allclose(res.mean_direction, ref.mean_direction)
assert_allclose(res.mean_resultant_length,
ref.mean_resultant_length)
class TestFDRControl:
def test_input_validation(self):
message = "`ps` must include only numbers between 0 and 1"
with pytest.raises(ValueError, match=message):
stats.false_discovery_control([-1, 0.5, 0.7])
with pytest.raises(ValueError, match=message):
stats.false_discovery_control([0.5, 0.7, 2])
with pytest.raises(ValueError, match=message):
stats.false_discovery_control([0.5, 0.7, np.nan])
message = "Unrecognized `method` 'YAK'"
with pytest.raises(ValueError, match=message):
stats.false_discovery_control([0.5, 0.7, 0.9], method='YAK')
message = "`axis` must be an integer or `None`"
with pytest.raises(ValueError, match=message):
stats.false_discovery_control([0.5, 0.7, 0.9], axis=1.5)
with pytest.raises(ValueError, match=message):
stats.false_discovery_control([0.5, 0.7, 0.9], axis=(1, 2))
def test_against_TileStats(self):
# See reference [3] of false_discovery_control
ps = [0.005, 0.009, 0.019, 0.022, 0.051, 0.101, 0.361, 0.387]
res = stats.false_discovery_control(ps)
ref = [0.036, 0.036, 0.044, 0.044, 0.082, 0.135, 0.387, 0.387]
assert_allclose(res, ref, atol=1e-3)
@pytest.mark.parametrize("case",
[([0.24617028, 0.01140030, 0.05652047, 0.06841983,
0.07989886, 0.01841490, 0.17540784, 0.06841983,
0.06841983, 0.25464082], 'bh'),
([0.72102493, 0.03339112, 0.16554665, 0.20039952,
0.23402122, 0.05393666, 0.51376399, 0.20039952,
0.20039952, 0.74583488], 'by')])
def test_against_R(self, case):
# Test against p.adjust, e.g.
# p = c(0.22155325, 0.00114003,..., 0.0364813 , 0.25464082)
# p.adjust(p, "BY")
ref, method = case
rng = np.random.default_rng(6134137338861652935)
ps = stats.loguniform.rvs(1e-3, 0.5, size=10, random_state=rng)
ps[3] = ps[7] # force a tie
res = stats.false_discovery_control(ps, method=method)
assert_allclose(res, ref, atol=1e-6)
def test_axis_None(self):
rng = np.random.default_rng(6134137338861652935)
ps = stats.loguniform.rvs(1e-3, 0.5, size=(3, 4, 5), random_state=rng)
res = stats.false_discovery_control(ps, axis=None)
ref = stats.false_discovery_control(ps.ravel())
assert_equal(res, ref)
@pytest.mark.parametrize("axis", [0, 1, -1])
def test_axis(self, axis):
rng = np.random.default_rng(6134137338861652935)
ps = stats.loguniform.rvs(1e-3, 0.5, size=(3, 4, 5), random_state=rng)
res = stats.false_discovery_control(ps, axis=axis)
ref = np.apply_along_axis(stats.false_discovery_control, axis, ps)
assert_equal(res, ref)
def test_edge_cases(self):
assert_array_equal(stats.false_discovery_control([0.25]), [0.25])
assert_array_equal(stats.false_discovery_control(0.25), 0.25)
assert_array_equal(stats.false_discovery_control([]), [])
| 118,674
| 41.70421
| 92
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_relative_risk.py
|
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from scipy.stats.contingency import relative_risk
# Test just the calculation of the relative risk, including edge
# cases that result in a relative risk of 0, inf or nan.
@pytest.mark.parametrize(
'exposed_cases, exposed_total, control_cases, control_total, expected_rr',
[(1, 4, 3, 8, 0.25 / 0.375),
(0, 10, 5, 20, 0),
(0, 10, 0, 20, np.nan),
(5, 15, 0, 20, np.inf)]
)
def test_relative_risk(exposed_cases, exposed_total,
control_cases, control_total, expected_rr):
result = relative_risk(exposed_cases, exposed_total,
control_cases, control_total)
assert_allclose(result.relative_risk, expected_rr, rtol=1e-13)
def test_relative_risk_confidence_interval():
result = relative_risk(exposed_cases=16, exposed_total=128,
control_cases=24, control_total=256)
rr = result.relative_risk
ci = result.confidence_interval(confidence_level=0.95)
# The corresponding calculation in R using the epitools package.
#
# > library(epitools)
# > c <- matrix(c(232, 112, 24, 16), nrow=2)
# > result <- riskratio(c)
# > result$measure
# risk ratio with 95% C.I.
# Predictor estimate lower upper
# Exposed1 1.000000 NA NA
# Exposed2 1.333333 0.7347317 2.419628
#
# The last line is the result that we want.
assert_allclose(rr, 4/3)
assert_allclose((ci.low, ci.high), (0.7347317, 2.419628), rtol=5e-7)
def test_relative_risk_ci_conflevel0():
result = relative_risk(exposed_cases=4, exposed_total=12,
control_cases=5, control_total=30)
rr = result.relative_risk
assert_allclose(rr, 2.0, rtol=1e-14)
ci = result.confidence_interval(0)
assert_allclose((ci.low, ci.high), (2.0, 2.0), rtol=1e-12)
def test_relative_risk_ci_conflevel1():
result = relative_risk(exposed_cases=4, exposed_total=12,
control_cases=5, control_total=30)
ci = result.confidence_interval(1)
assert_equal((ci.low, ci.high), (0, np.inf))
def test_relative_risk_ci_edge_cases_00():
result = relative_risk(exposed_cases=0, exposed_total=12,
control_cases=0, control_total=30)
assert_equal(result.relative_risk, np.nan)
ci = result.confidence_interval()
assert_equal((ci.low, ci.high), (np.nan, np.nan))
def test_relative_risk_ci_edge_cases_01():
result = relative_risk(exposed_cases=0, exposed_total=12,
control_cases=1, control_total=30)
assert_equal(result.relative_risk, 0)
ci = result.confidence_interval()
assert_equal((ci.low, ci.high), (0.0, np.nan))
def test_relative_risk_ci_edge_cases_10():
result = relative_risk(exposed_cases=1, exposed_total=12,
control_cases=0, control_total=30)
assert_equal(result.relative_risk, np.inf)
ci = result.confidence_interval()
assert_equal((ci.low, ci.high), (np.nan, np.inf))
@pytest.mark.parametrize('ec, et, cc, ct', [(0, 0, 10, 20),
(-1, 10, 1, 5),
(1, 10, 0, 0),
(1, 10, -1, 4)])
def test_relative_risk_bad_value(ec, et, cc, ct):
with pytest.raises(ValueError, match="must be an integer not less than"):
relative_risk(ec, et, cc, ct)
def test_relative_risk_bad_type():
with pytest.raises(TypeError, match="must be an integer"):
relative_risk(1, 10, 2.0, 40)
| 3,646
| 36.989583
| 78
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_tukeylambda_stats.py
|
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from scipy.stats._tukeylambda_stats import (tukeylambda_variance,
tukeylambda_kurtosis)
def test_tukeylambda_stats_known_exact():
"""Compare results with some known exact formulas."""
# Some exact values of the Tukey Lambda variance and kurtosis:
# lambda var kurtosis
# 0 pi**2/3 6/5 (logistic distribution)
# 0.5 4 - pi (5/3 - pi/2)/(pi/4 - 1)**2 - 3
# 1 1/3 -6/5 (uniform distribution on (-1,1))
# 2 1/12 -6/5 (uniform distribution on (-1/2, 1/2))
# lambda = 0
var = tukeylambda_variance(0)
assert_allclose(var, np.pi**2 / 3, atol=1e-12)
kurt = tukeylambda_kurtosis(0)
assert_allclose(kurt, 1.2, atol=1e-10)
# lambda = 0.5
var = tukeylambda_variance(0.5)
assert_allclose(var, 4 - np.pi, atol=1e-12)
kurt = tukeylambda_kurtosis(0.5)
desired = (5./3 - np.pi/2) / (np.pi/4 - 1)**2 - 3
assert_allclose(kurt, desired, atol=1e-10)
# lambda = 1
var = tukeylambda_variance(1)
assert_allclose(var, 1.0 / 3, atol=1e-12)
kurt = tukeylambda_kurtosis(1)
assert_allclose(kurt, -1.2, atol=1e-10)
# lambda = 2
var = tukeylambda_variance(2)
assert_allclose(var, 1.0 / 12, atol=1e-12)
kurt = tukeylambda_kurtosis(2)
assert_allclose(kurt, -1.2, atol=1e-10)
def test_tukeylambda_stats_mpmath():
"""Compare results with some values that were computed using mpmath."""
a10 = dict(atol=1e-10, rtol=0)
a12 = dict(atol=1e-12, rtol=0)
data = [
# lambda variance kurtosis
[-0.1, 4.78050217874253547, 3.78559520346454510],
[-0.0649, 4.16428023599895777, 2.52019675947435718],
[-0.05, 3.93672267890775277, 2.13129793057777277],
[-0.001, 3.30128380390964882, 1.21452460083542988],
[0.001, 3.27850775649572176, 1.18560634779287585],
[0.03125, 2.95927803254615800, 0.804487555161819980],
[0.05, 2.78281053405464501, 0.611604043886644327],
[0.0649, 2.65282386754100551, 0.476834119532774540],
[1.2, 0.242153920578588346, -1.23428047169049726],
[10.0, 0.00095237579757703597, 2.37810697355144933],
[20.0, 0.00012195121951131043, 7.37654321002709531],
]
for lam, var_expected, kurt_expected in data:
var = tukeylambda_variance(lam)
assert_allclose(var, var_expected, **a12)
kurt = tukeylambda_kurtosis(lam)
assert_allclose(kurt, kurt_expected, **a10)
# Test with vector arguments (most of the other tests are for single
# values).
lam, var_expected, kurt_expected = zip(*data)
var = tukeylambda_variance(lam)
assert_allclose(var, var_expected, **a12)
kurt = tukeylambda_kurtosis(lam)
assert_allclose(kurt, kurt_expected, **a10)
def test_tukeylambda_stats_invalid():
"""Test values of lambda outside the domains of the functions."""
lam = [-1.0, -0.5]
var = tukeylambda_variance(lam)
assert_equal(var, np.array([np.nan, np.inf]))
lam = [-1.0, -0.25]
kurt = tukeylambda_kurtosis(lam)
assert_equal(kurt, np.array([np.nan, np.inf]))
| 3,231
| 36.581395
| 75
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_sampling.py
|
import threading
import pickle
import pytest
from copy import deepcopy
import platform
import sys
import math
import numpy as np
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from scipy.stats.sampling import (
TransformedDensityRejection,
DiscreteAliasUrn,
DiscreteGuideTable,
NumericalInversePolynomial,
NumericalInverseHermite,
RatioUniforms,
SimpleRatioUniforms,
UNURANError
)
from pytest import raises as assert_raises
from scipy import stats
from scipy import special
from scipy.stats import chisquare, cramervonmises
from scipy.stats._distr_params import distdiscrete, distcont
from scipy._lib._util import check_random_state
# common test data: this data can be shared between all the tests.
# Normal distribution shared between all the continuous methods
class StandardNormal:
def pdf(self, x):
# normalization constant needed for NumericalInverseHermite
return 1./np.sqrt(2.*np.pi) * np.exp(-0.5 * x*x)
def dpdf(self, x):
return 1./np.sqrt(2.*np.pi) * -x * np.exp(-0.5 * x*x)
def cdf(self, x):
return special.ndtr(x)
all_methods = [
("TransformedDensityRejection", {"dist": StandardNormal()}),
("DiscreteAliasUrn", {"dist": [0.02, 0.18, 0.8]}),
("DiscreteGuideTable", {"dist": [0.02, 0.18, 0.8]}),
("NumericalInversePolynomial", {"dist": StandardNormal()}),
("NumericalInverseHermite", {"dist": StandardNormal()}),
("SimpleRatioUniforms", {"dist": StandardNormal(), "mode": 0})
]
if (sys.implementation.name == 'pypy'
and sys.implementation.version < (7, 3, 10)):
# changed in PyPy for v7.3.10
floaterr = r"unsupported operand type for float\(\): 'list'"
else:
floaterr = r"must be real number, not list"
# Make sure an internal error occurs in UNU.RAN when invalid callbacks are
# passed. Moreover, different generators throw different error messages.
# So, in case of an `UNURANError`, we do not validate the error message.
bad_pdfs_common = [
# Negative PDF
(lambda x: -x, UNURANError, r"..."),
# Returning wrong type
(lambda x: [], TypeError, floaterr),
# Undefined name inside the function
(lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa
# Infinite value returned => Overflow error.
(lambda x: np.inf, UNURANError, r"..."),
# NaN value => internal error in UNU.RAN
(lambda x: np.nan, UNURANError, r"..."),
# signature of PDF wrong
(lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
]
# same approach for dpdf
bad_dpdf_common = [
# Infinite value returned.
(lambda x: np.inf, UNURANError, r"..."),
# NaN value => internal error in UNU.RAN
(lambda x: np.nan, UNURANError, r"..."),
# Returning wrong type
(lambda x: [], TypeError, floaterr),
# Undefined name inside the function
(lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa
# signature of dPDF wrong
(lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
]
# same approach for logpdf
bad_logpdfs_common = [
# Returning wrong type
(lambda x: [], TypeError, floaterr),
# Undefined name inside the function
(lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa
# Infinite value returned => Overflow error.
(lambda x: np.inf, UNURANError, r"..."),
# NaN value => internal error in UNU.RAN
(lambda x: np.nan, UNURANError, r"..."),
# signature of logpdf wrong
(lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
]
bad_pv_common = [
([], r"must contain at least one element"),
([[1.0, 0.0]], r"wrong number of dimensions \(expected 1, got 2\)"),
([0.2, 0.4, np.nan, 0.8], r"must contain only finite / non-nan values"),
([0.2, 0.4, np.inf, 0.8], r"must contain only finite / non-nan values"),
([0.0, 0.0], r"must contain at least one non-zero value"),
]
# size of the domains is incorrect
bad_sized_domains = [
# > 2 elements in the domain
((1, 2, 3), ValueError, r"must be a length 2 tuple"),
# empty domain
((), ValueError, r"must be a length 2 tuple")
]
# domain values are incorrect
bad_domains = [
((2, 1), UNURANError, r"left >= right"),
((1, 1), UNURANError, r"left >= right"),
]
# infinite and nan values present in domain.
inf_nan_domains = [
# left >= right
((10, 10), UNURANError, r"left >= right"),
((np.inf, np.inf), UNURANError, r"left >= right"),
((-np.inf, -np.inf), UNURANError, r"left >= right"),
((np.inf, -np.inf), UNURANError, r"left >= right"),
# Also include nans in some of the domains.
((-np.inf, np.nan), ValueError, r"only non-nan values"),
((np.nan, np.inf), ValueError, r"only non-nan values")
]
# `nan` values present in domain. Some distributions don't support
# infinite tails, so don't mix the nan values with infinities.
nan_domains = [
((0, np.nan), ValueError, r"only non-nan values"),
((np.nan, np.nan), ValueError, r"only non-nan values")
]
# all the methods should throw errors for nan, bad sized, and bad valued
# domains.
@pytest.mark.parametrize("domain, err, msg",
bad_domains + bad_sized_domains +
nan_domains) # type: ignore[operator]
@pytest.mark.parametrize("method, kwargs", all_methods)
def test_bad_domain(domain, err, msg, method, kwargs):
Method = getattr(stats.sampling, method)
with pytest.raises(err, match=msg):
Method(**kwargs, domain=domain)
@pytest.mark.parametrize("method, kwargs", all_methods)
def test_random_state(method, kwargs):
Method = getattr(stats.sampling, method)
# simple seed that works for any version of NumPy
seed = 123
rng1 = Method(**kwargs, random_state=seed)
rng2 = Method(**kwargs, random_state=seed)
assert_equal(rng1.rvs(100), rng2.rvs(100))
# global seed
np.random.seed(123)
rng1 = Method(**kwargs)
rvs1 = rng1.rvs(100)
np.random.seed(None)
rng2 = Method(**kwargs, random_state=123)
rvs2 = rng2.rvs(100)
assert_equal(rvs1, rvs2)
# Generator seed for new NumPy
# when a RandomState is given, it should take the bitgen_t
# member of the class and create a Generator instance.
seed1 = np.random.RandomState(np.random.MT19937(123))
seed2 = np.random.Generator(np.random.MT19937(123))
rng1 = Method(**kwargs, random_state=seed1)
rng2 = Method(**kwargs, random_state=seed2)
assert_equal(rng1.rvs(100), rng2.rvs(100))
def test_set_random_state():
rng1 = TransformedDensityRejection(StandardNormal(), random_state=123)
rng2 = TransformedDensityRejection(StandardNormal())
rng2.set_random_state(123)
assert_equal(rng1.rvs(100), rng2.rvs(100))
rng = TransformedDensityRejection(StandardNormal(), random_state=123)
rvs1 = rng.rvs(100)
rng.set_random_state(123)
rvs2 = rng.rvs(100)
assert_equal(rvs1, rvs2)
def test_threading_behaviour():
# Test if the API is thread-safe.
# This verifies if the lock mechanism and the use of `PyErr_Occurred`
# is correct.
errors = {"err1": None, "err2": None}
class Distribution:
def __init__(self, pdf_msg):
self.pdf_msg = pdf_msg
def pdf(self, x):
if 49.9 < x < 50.0:
raise ValueError(self.pdf_msg)
return x
def dpdf(self, x):
return 1
def func1():
dist = Distribution('foo')
rng = TransformedDensityRejection(dist, domain=(10, 100),
random_state=12)
try:
rng.rvs(100000)
except ValueError as e:
errors['err1'] = e.args[0]
def func2():
dist = Distribution('bar')
rng = TransformedDensityRejection(dist, domain=(10, 100),
random_state=2)
try:
rng.rvs(100000)
except ValueError as e:
errors['err2'] = e.args[0]
t1 = threading.Thread(target=func1)
t2 = threading.Thread(target=func2)
t1.start()
t2.start()
t1.join()
t2.join()
assert errors['err1'] == 'foo'
assert errors['err2'] == 'bar'
@pytest.mark.parametrize("method, kwargs", all_methods)
def test_pickle(method, kwargs):
Method = getattr(stats.sampling, method)
rng1 = Method(**kwargs, random_state=123)
obj = pickle.dumps(rng1)
rng2 = pickle.loads(obj)
assert_equal(rng1.rvs(100), rng2.rvs(100))
@pytest.mark.parametrize("size", [None, 0, (0, ), 1, (10, 3), (2, 3, 4, 5),
(0, 0), (0, 1)])
def test_rvs_size(size):
# As the `rvs` method is present in the base class and shared between
# all the classes, we can just test with one of the methods.
rng = TransformedDensityRejection(StandardNormal())
if size is None:
assert np.isscalar(rng.rvs(size))
else:
if np.isscalar(size):
size = (size, )
assert rng.rvs(size).shape == size
def test_with_scipy_distribution():
# test if the setup works with SciPy's rv_frozen distributions
dist = stats.norm()
urng = np.random.default_rng(0)
rng = NumericalInverseHermite(dist, random_state=urng)
u = np.linspace(0, 1, num=100)
check_cont_samples(rng, dist, dist.stats())
assert_allclose(dist.ppf(u), rng.ppf(u))
# test if it works with `loc` and `scale`
dist = stats.norm(loc=10., scale=5.)
rng = NumericalInverseHermite(dist, random_state=urng)
check_cont_samples(rng, dist, dist.stats())
assert_allclose(dist.ppf(u), rng.ppf(u))
# check for discrete distributions
dist = stats.binom(10, 0.2)
rng = DiscreteAliasUrn(dist, random_state=urng)
domain = dist.support()
pv = dist.pmf(np.arange(domain[0], domain[1]+1))
check_discr_samples(rng, pv, dist.stats())
def check_cont_samples(rng, dist, mv_ex):
rvs = rng.rvs(100000)
mv = rvs.mean(), rvs.var()
# test the moments only if the variance is finite
if np.isfinite(mv_ex[1]):
assert_allclose(mv, mv_ex, rtol=1e-7, atol=1e-1)
# Cramer Von Mises test for goodness-of-fit
rvs = rng.rvs(500)
dist.cdf = np.vectorize(dist.cdf)
pval = cramervonmises(rvs, dist.cdf).pvalue
assert pval > 0.1
def check_discr_samples(rng, pv, mv_ex):
rvs = rng.rvs(100000)
# test if the first few moments match
mv = rvs.mean(), rvs.var()
assert_allclose(mv, mv_ex, rtol=1e-3, atol=1e-1)
# normalize
pv = pv / pv.sum()
# chi-squared test for goodness-of-fit
obs_freqs = np.zeros_like(pv)
_, freqs = np.unique(rvs, return_counts=True)
freqs = freqs / freqs.sum()
obs_freqs[:freqs.size] = freqs
pval = chisquare(obs_freqs, pv).pvalue
assert pval > 0.1
def test_warning_center_not_in_domain():
# UNURAN will warn if the center provided or the one computed w/o the
# domain is outside of the domain
msg = "102 : center moved into domain of distribution"
with pytest.warns(RuntimeWarning, match=msg):
NumericalInversePolynomial(StandardNormal(), center=0, domain=(3, 5))
with pytest.warns(RuntimeWarning, match=msg):
NumericalInversePolynomial(StandardNormal(), domain=(3, 5))
@pytest.mark.parametrize('method', ["SimpleRatioUniforms",
"NumericalInversePolynomial",
"TransformedDensityRejection"])
def test_error_mode_not_in_domain(method):
# UNURAN raises an error if the mode is not in the domain
# the behavior is different compared to the case that center is not in the
# domain. mode is supposed to be the exact value, center can be an
# approximate value
Method = getattr(stats.sampling, method)
msg = "17 : mode not in domain"
with pytest.raises(UNURANError, match=msg):
Method(StandardNormal(), mode=0, domain=(3, 5))
@pytest.mark.parametrize('method', ["NumericalInverseHermite",
"NumericalInversePolynomial"])
class TestQRVS:
def test_input_validation(self, method):
match = "`qmc_engine` must be an instance of..."
with pytest.raises(ValueError, match=match):
Method = getattr(stats.sampling, method)
gen = Method(StandardNormal())
gen.qrvs(qmc_engine=0)
# issues with QMCEngines and old NumPy
Method = getattr(stats.sampling, method)
gen = Method(StandardNormal())
match = "`d` must be consistent with dimension of `qmc_engine`."
with pytest.raises(ValueError, match=match):
gen.qrvs(d=3, qmc_engine=stats.qmc.Halton(2))
qrngs = [None, stats.qmc.Sobol(1, seed=0), stats.qmc.Halton(3, seed=0)]
# `size=None` should not add anything to the shape, `size=1` should
sizes = [(None, tuple()), (1, (1,)), (4, (4,)),
((4,), (4,)), ((2, 4), (2, 4))] # type: ignore
# Neither `d=None` nor `d=1` should add anything to the shape
ds = [(None, tuple()), (1, tuple()), (3, (3,))]
@pytest.mark.parametrize('qrng', qrngs)
@pytest.mark.parametrize('size_in, size_out', sizes)
@pytest.mark.parametrize('d_in, d_out', ds)
def test_QRVS_shape_consistency(self, qrng, size_in, size_out,
d_in, d_out, method):
w32 = sys.platform == "win32" and platform.architecture()[0] == "32bit"
if w32 and method == "NumericalInversePolynomial":
pytest.xfail("NumericalInversePolynomial.qrvs fails for Win "
"32-bit")
dist = StandardNormal()
Method = getattr(stats.sampling, method)
gen = Method(dist)
# If d and qrng.d are inconsistent, an error is raised
if d_in is not None and qrng is not None and qrng.d != d_in:
match = "`d` must be consistent with dimension of `qmc_engine`."
with pytest.raises(ValueError, match=match):
gen.qrvs(size_in, d=d_in, qmc_engine=qrng)
return
# Sometimes d is really determined by qrng
if d_in is None and qrng is not None and qrng.d != 1:
d_out = (qrng.d,)
shape_expected = size_out + d_out
qrng2 = deepcopy(qrng)
qrvs = gen.qrvs(size=size_in, d=d_in, qmc_engine=qrng)
if size_in is not None:
assert qrvs.shape == shape_expected
if qrng2 is not None:
uniform = qrng2.random(np.prod(size_in) or 1)
qrvs2 = stats.norm.ppf(uniform).reshape(shape_expected)
assert_allclose(qrvs, qrvs2, atol=1e-12)
def test_QRVS_size_tuple(self, method):
# QMCEngine samples are always of shape (n, d). When `size` is a tuple,
# we set `n = prod(size)` in the call to qmc_engine.random, transform
# the sample, and reshape it to the final dimensions. When we reshape,
# we need to be careful, because the _columns_ of the sample returned
# by a QMCEngine are "independent"-ish, but the elements within the
# columns are not. We need to make sure that this doesn't get mixed up
# by reshaping: qrvs[..., i] should remain "independent"-ish of
# qrvs[..., i+1], but the elements within qrvs[..., i] should be
# transformed from the same low-discrepancy sequence.
dist = StandardNormal()
Method = getattr(stats.sampling, method)
gen = Method(dist)
size = (3, 4)
d = 5
qrng = stats.qmc.Halton(d, seed=0)
qrng2 = stats.qmc.Halton(d, seed=0)
uniform = qrng2.random(np.prod(size))
qrvs = gen.qrvs(size=size, d=d, qmc_engine=qrng)
qrvs2 = stats.norm.ppf(uniform)
for i in range(d):
sample = qrvs[..., i]
sample2 = qrvs2[:, i].reshape(size)
assert_allclose(sample, sample2, atol=1e-12)
class TestTransformedDensityRejection:
# Simple Custom Distribution
class dist0:
def pdf(self, x):
return 3/4 * (1-x*x)
def dpdf(self, x):
return 3/4 * (-2*x)
def cdf(self, x):
return 3/4 * (x - x**3/3 + 2/3)
def support(self):
return -1, 1
# Standard Normal Distribution
class dist1:
def pdf(self, x):
return stats.norm._pdf(x / 0.1)
def dpdf(self, x):
return -x / 0.01 * stats.norm._pdf(x / 0.1)
def cdf(self, x):
return stats.norm._cdf(x / 0.1)
# pdf with piecewise linear function as transformed density
# with T = -1/sqrt with shift. Taken from UNU.RAN test suite
# (from file t_tdr_ps.c)
class dist2:
def __init__(self, shift):
self.shift = shift
def pdf(self, x):
x -= self.shift
y = 1. / (abs(x) + 1.)
return 0.5 * y * y
def dpdf(self, x):
x -= self.shift
y = 1. / (abs(x) + 1.)
y = y * y * y
return y if (x < 0.) else -y
def cdf(self, x):
x -= self.shift
if x <= 0.:
return 0.5 / (1. - x)
else:
return 1. - 0.5 / (1. + x)
dists = [dist0(), dist1(), dist2(0.), dist2(10000.)]
# exact mean and variance of the distributions in the list dists
mv0 = [0., 4./15.]
mv1 = [0., 0.01]
mv2 = [0., np.inf]
mv3 = [10000., np.inf]
mvs = [mv0, mv1, mv2, mv3]
@pytest.mark.parametrize("dist, mv_ex",
zip(dists, mvs))
def test_basic(self, dist, mv_ex):
with suppress_warnings() as sup:
# filter the warnings thrown by UNU.RAN
sup.filter(RuntimeWarning)
rng = TransformedDensityRejection(dist, random_state=42)
check_cont_samples(rng, dist, mv_ex)
# PDF 0 everywhere => bad construction points
bad_pdfs = [(lambda x: 0, UNURANError, r"50 : bad construction points.")]
bad_pdfs += bad_pdfs_common # type: ignore[arg-type]
@pytest.mark.parametrize("pdf, err, msg", bad_pdfs)
def test_bad_pdf(self, pdf, err, msg):
class dist:
pass
dist.pdf = pdf
dist.dpdf = lambda x: 1 # an arbitrary dPDF
with pytest.raises(err, match=msg):
TransformedDensityRejection(dist)
@pytest.mark.parametrize("dpdf, err, msg", bad_dpdf_common)
def test_bad_dpdf(self, dpdf, err, msg):
class dist:
pass
dist.pdf = lambda x: x
dist.dpdf = dpdf
with pytest.raises(err, match=msg):
TransformedDensityRejection(dist, domain=(1, 10))
# test domains with inf + nan in them. need to write a custom test for
# this because not all methods support infinite tails.
@pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
def test_inf_nan_domains(self, domain, err, msg):
with pytest.raises(err, match=msg):
TransformedDensityRejection(StandardNormal(), domain=domain)
@pytest.mark.parametrize("construction_points", [-1, 0, 0.1])
def test_bad_construction_points_scalar(self, construction_points):
with pytest.raises(ValueError, match=r"`construction_points` must be "
r"a positive integer."):
TransformedDensityRejection(
StandardNormal(), construction_points=construction_points
)
def test_bad_construction_points_array(self):
# empty array
construction_points = []
with pytest.raises(ValueError, match=r"`construction_points` must "
r"either be a "
r"scalar or a non-empty array."):
TransformedDensityRejection(
StandardNormal(), construction_points=construction_points
)
# construction_points not monotonically increasing
construction_points = [1, 1, 1, 1, 1, 1]
with pytest.warns(RuntimeWarning, match=r"33 : starting points not "
r"strictly monotonically "
r"increasing"):
TransformedDensityRejection(
StandardNormal(), construction_points=construction_points
)
# construction_points containing nans
construction_points = [np.nan, np.nan, np.nan]
with pytest.raises(UNURANError, match=r"50 : bad construction "
r"points."):
TransformedDensityRejection(
StandardNormal(), construction_points=construction_points
)
# construction_points out of domain
construction_points = [-10, 10]
with pytest.warns(RuntimeWarning, match=r"50 : starting point out of "
r"domain"):
TransformedDensityRejection(
StandardNormal(), domain=(-3, 3),
construction_points=construction_points
)
@pytest.mark.parametrize("c", [-1., np.nan, np.inf, 0.1, 1.])
def test_bad_c(self, c):
msg = r"`c` must either be -0.5 or 0."
with pytest.raises(ValueError, match=msg):
TransformedDensityRejection(StandardNormal(), c=-1.)
u = [np.linspace(0, 1, num=1000), [], [[]], [np.nan],
[-np.inf, np.nan, np.inf], 0,
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]]
@pytest.mark.parametrize("u", u)
def test_ppf_hat(self, u):
# Increase the `max_squeeze_hat_ratio` so the ppf_hat is more
# accurate.
rng = TransformedDensityRejection(StandardNormal(),
max_squeeze_hat_ratio=0.9999)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in greater")
sup.filter(RuntimeWarning, "invalid value encountered in "
"greater_equal")
sup.filter(RuntimeWarning, "invalid value encountered in less")
sup.filter(RuntimeWarning, "invalid value encountered in "
"less_equal")
res = rng.ppf_hat(u)
expected = stats.norm.ppf(u)
assert_allclose(res, expected, rtol=1e-3, atol=1e-5)
assert res.shape == expected.shape
def test_bad_dist(self):
# Empty distribution
class dist:
...
msg = r"`pdf` required but not found."
with pytest.raises(ValueError, match=msg):
TransformedDensityRejection(dist)
# dPDF not present in dist
class dist:
pdf = lambda x: 1-x*x # noqa: E731
msg = r"`dpdf` required but not found."
with pytest.raises(ValueError, match=msg):
TransformedDensityRejection(dist)
class TestDiscreteAliasUrn:
# DAU fails on these probably because of large domains and small
# computation errors in PMF. Mean/SD match but chi-squared test fails.
basic_fail_dists = {
'nchypergeom_fisher', # numerical erros on tails
'nchypergeom_wallenius', # numerical erros on tails
'randint' # fails on 32-bit ubuntu
}
@pytest.mark.parametrize("distname, params", distdiscrete)
def test_basic(self, distname, params):
if distname in self.basic_fail_dists:
msg = ("DAU fails on these probably because of large domains "
"and small computation errors in PMF.")
pytest.skip(msg)
if not isinstance(distname, str):
dist = distname
else:
dist = getattr(stats, distname)
dist = dist(*params)
domain = dist.support()
if not np.isfinite(domain[1] - domain[0]):
# DAU only works with finite domain. So, skip the distributions
# with infinite tails.
pytest.skip("DAU only works with a finite domain.")
k = np.arange(domain[0], domain[1]+1)
pv = dist.pmf(k)
mv_ex = dist.stats('mv')
rng = DiscreteAliasUrn(dist, random_state=42)
check_discr_samples(rng, pv, mv_ex)
# Can't use bad_pmf_common here as we evaluate PMF early on to avoid
# unhelpful errors from UNU.RAN.
bad_pmf = [
# inf returned
(lambda x: np.inf, ValueError,
r"must contain only finite / non-nan values"),
# nan returned
(lambda x: np.nan, ValueError,
r"must contain only finite / non-nan values"),
# all zeros
(lambda x: 0.0, ValueError,
r"must contain at least one non-zero value"),
# Undefined name inside the function
(lambda x: foo, NameError, # type: ignore[name-defined] # noqa
r"name 'foo' is not defined"),
# Returning wrong type.
(lambda x: [], ValueError,
r"setting an array element with a sequence."),
# probabilities < 0
(lambda x: -x, UNURANError,
r"50 : probability < 0"),
# signature of PMF wrong
(lambda: 1.0, TypeError,
r"takes 0 positional arguments but 1 was given")
]
@pytest.mark.parametrize("pmf, err, msg", bad_pmf)
def test_bad_pmf(self, pmf, err, msg):
class dist:
pass
dist.pmf = pmf
with pytest.raises(err, match=msg):
DiscreteAliasUrn(dist, domain=(1, 10))
@pytest.mark.parametrize("pv", [[0.18, 0.02, 0.8],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
def test_sampling_with_pv(self, pv):
pv = np.asarray(pv, dtype=np.float64)
rng = DiscreteAliasUrn(pv, random_state=123)
rng.rvs(100_000)
pv = pv / pv.sum()
variates = np.arange(0, len(pv))
# test if the first few moments match
m_expected = np.average(variates, weights=pv)
v_expected = np.average((variates - m_expected) ** 2, weights=pv)
mv_expected = m_expected, v_expected
check_discr_samples(rng, pv, mv_expected)
@pytest.mark.parametrize("pv, msg", bad_pv_common)
def test_bad_pv(self, pv, msg):
with pytest.raises(ValueError, match=msg):
DiscreteAliasUrn(pv)
# DAU doesn't support infinite tails. So, it should throw an error when
# inf is present in the domain.
inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf),
(0, np.inf), (-np.inf, 0)]
@pytest.mark.parametrize("domain", inf_domain)
def test_inf_domain(self, domain):
with pytest.raises(ValueError, match=r"must be finite"):
DiscreteAliasUrn(stats.binom(10, 0.2), domain=domain)
def test_bad_urn_factor(self):
with pytest.warns(RuntimeWarning, match=r"relative urn size < 1."):
DiscreteAliasUrn([0.5, 0.5], urn_factor=-1)
def test_bad_args(self):
msg = (r"`domain` must be provided when the "
r"probability vector is not available.")
class dist:
def pmf(self, x):
return x
with pytest.raises(ValueError, match=msg):
DiscreteAliasUrn(dist)
class TestNumericalInversePolynomial:
# Simple Custom Distribution
class dist0:
def pdf(self, x):
return 3/4 * (1-x*x)
def cdf(self, x):
return 3/4 * (x - x**3/3 + 2/3)
def support(self):
return -1, 1
# Standard Normal Distribution
class dist1:
def pdf(self, x):
return stats.norm._pdf(x / 0.1)
def cdf(self, x):
return stats.norm._cdf(x / 0.1)
# Sin 2 distribution
# / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 1
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_pinv.c)
class dist2:
def pdf(self, x):
return 0.05 + 0.45 * (1 + np.sin(2*np.pi*x))
def cdf(self, x):
return (0.05*(x + 1) +
0.9*(1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) /
(4.*np.pi))
def support(self):
return -1, 1
# Sin 10 distribution
# / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 5
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_pinv.c)
class dist3:
def pdf(self, x):
return 0.2 * (0.05 + 0.45 * (1 + np.sin(2*np.pi*x)))
def cdf(self, x):
return x/10. + 0.5 + 0.09/(2*np.pi) * (np.cos(10*np.pi) -
np.cos(2*np.pi*x))
def support(self):
return -5, 5
dists = [dist0(), dist1(), dist2(), dist3()]
# exact mean and variance of the distributions in the list dists
mv0 = [0., 4./15.]
mv1 = [0., 0.01]
mv2 = [-0.45/np.pi, 2/3*0.5 - 0.45**2/np.pi**2]
mv3 = [-0.45/np.pi, 0.2 * 250/3 * 0.5 - 0.45**2/np.pi**2]
mvs = [mv0, mv1, mv2, mv3]
@pytest.mark.parametrize("dist, mv_ex",
zip(dists, mvs))
def test_basic(self, dist, mv_ex):
rng = NumericalInversePolynomial(dist, random_state=42)
check_cont_samples(rng, dist, mv_ex)
@pytest.mark.xslow
@pytest.mark.parametrize("distname, params", distcont)
def test_basic_all_scipy_dists(self, distname, params):
very_slow_dists = ['anglit', 'gausshyper', 'kappa4',
'ksone', 'kstwo', 'levy_l',
'levy_stable', 'studentized_range',
'trapezoid', 'triang', 'vonmises']
# for these distributions, some assertions fail due to minor
# numerical differences. They can be avoided either by changing
# the seed or by increasing the u_resolution.
fail_dists = ['chi2', 'fatiguelife', 'gibrat',
'halfgennorm', 'lognorm', 'ncf',
'ncx2', 'pareto', 't']
# for these distributions, skip the check for agreement between sample
# moments and true moments. We cannot expect them to pass due to the
# high variance of sample moments.
skip_sample_moment_check = ['rel_breitwigner']
if distname in very_slow_dists:
pytest.skip(f"PINV too slow for {distname}")
if distname in fail_dists:
pytest.skip(f"PINV fails for {distname}")
dist = (getattr(stats, distname)
if isinstance(distname, str)
else distname)
dist = dist(*params)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
rng = NumericalInversePolynomial(dist, random_state=42)
if distname in skip_sample_moment_check:
return
check_cont_samples(rng, dist, [dist.mean(), dist.var()])
@pytest.mark.parametrize("pdf, err, msg", bad_pdfs_common)
def test_bad_pdf(self, pdf, err, msg):
class dist:
pass
dist.pdf = pdf
with pytest.raises(err, match=msg):
NumericalInversePolynomial(dist, domain=[0, 5])
@pytest.mark.parametrize("logpdf, err, msg", bad_logpdfs_common)
def test_bad_logpdf(self, logpdf, err, msg):
class dist:
pass
dist.logpdf = logpdf
with pytest.raises(err, match=msg):
NumericalInversePolynomial(dist, domain=[0, 5])
# test domains with inf + nan in them. need to write a custom test for
# this because not all methods support infinite tails.
@pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
def test_inf_nan_domains(self, domain, err, msg):
with pytest.raises(err, match=msg):
NumericalInversePolynomial(StandardNormal(), domain=domain)
u = [
# test if quantile 0 and 1 return -inf and inf respectively and check
# the correctness of the PPF for equidistant points between 0 and 1.
np.linspace(0, 1, num=10000),
# test the PPF method for empty arrays
[], [[]],
# test if nans and infs return nan result.
[np.nan], [-np.inf, np.nan, np.inf],
# test if a scalar is returned for a scalar input.
0,
# test for arrays with nans, values greater than 1 and less than 0,
# and some valid values.
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
]
@pytest.mark.parametrize("u", u)
def test_ppf(self, u):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in greater")
sup.filter(RuntimeWarning, "invalid value encountered in "
"greater_equal")
sup.filter(RuntimeWarning, "invalid value encountered in less")
sup.filter(RuntimeWarning, "invalid value encountered in "
"less_equal")
res = rng.ppf(u)
expected = stats.norm.ppf(u)
assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
assert res.shape == expected.shape
x = [np.linspace(-10, 10, num=10000), [], [[]], [np.nan],
[-np.inf, np.nan, np.inf], 0,
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-np.inf, 3, 4]]]
@pytest.mark.parametrize("x", x)
def test_cdf(self, x):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in greater")
sup.filter(RuntimeWarning, "invalid value encountered in "
"greater_equal")
sup.filter(RuntimeWarning, "invalid value encountered in less")
sup.filter(RuntimeWarning, "invalid value encountered in "
"less_equal")
res = rng.cdf(x)
expected = stats.norm.cdf(x)
assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
assert res.shape == expected.shape
def test_u_error(self):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist, u_resolution=1e-10)
max_error, mae = rng.u_error()
assert max_error < 1e-10
assert mae <= max_error
rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
max_error, mae = rng.u_error()
assert max_error < 1e-14
assert mae <= max_error
bad_orders = [1, 4.5, 20, np.inf, np.nan]
bad_u_resolution = [1e-20, 1e-1, np.inf, np.nan]
@pytest.mark.parametrize("order", bad_orders)
def test_bad_orders(self, order):
dist = StandardNormal()
msg = r"`order` must be an integer in the range \[3, 17\]."
with pytest.raises(ValueError, match=msg):
NumericalInversePolynomial(dist, order=order)
@pytest.mark.parametrize("u_resolution", bad_u_resolution)
def test_bad_u_resolution(self, u_resolution):
msg = r"`u_resolution` must be between 1e-15 and 1e-5."
with pytest.raises(ValueError, match=msg):
NumericalInversePolynomial(StandardNormal(),
u_resolution=u_resolution)
def test_bad_args(self):
class BadDist:
def cdf(self, x):
return stats.norm._cdf(x)
dist = BadDist()
msg = r"Either of the methods `pdf` or `logpdf` must be specified"
with pytest.raises(ValueError, match=msg):
rng = NumericalInversePolynomial(dist)
dist = StandardNormal()
rng = NumericalInversePolynomial(dist)
msg = r"`sample_size` must be greater than or equal to 1000."
with pytest.raises(ValueError, match=msg):
rng.u_error(10)
class Distribution:
def pdf(self, x):
return np.exp(-0.5 * x*x)
dist = Distribution()
rng = NumericalInversePolynomial(dist)
msg = r"Exact CDF required but not found."
with pytest.raises(ValueError, match=msg):
rng.u_error()
def test_logpdf_pdf_consistency(self):
# 1. check that PINV works with pdf and logpdf only
# 2. check that generated ppf is the same (up to a small tolerance)
class MyDist:
pass
# create genrator from dist with only pdf
dist_pdf = MyDist()
dist_pdf.pdf = lambda x: math.exp(-x*x/2)
rng1 = NumericalInversePolynomial(dist_pdf)
# create dist with only logpdf
dist_logpdf = MyDist()
dist_logpdf.logpdf = lambda x: -x*x/2
rng2 = NumericalInversePolynomial(dist_logpdf)
q = np.linspace(1e-5, 1-1e-5, num=100)
assert_allclose(rng1.ppf(q), rng2.ppf(q))
class TestNumericalInverseHermite:
# / (1 +sin(2 Pi x))/2 if |x| <= 1
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_hinv.c)
class dist0:
def pdf(self, x):
return 0.5*(1. + np.sin(2.*np.pi*x))
def dpdf(self, x):
return np.pi*np.cos(2.*np.pi*x)
def cdf(self, x):
return (1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) / (4.*np.pi)
def support(self):
return -1, 1
# / Max(sin(2 Pi x)),0)Pi/2 if -1 < x <0.5
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_hinv.c)
class dist1:
def pdf(self, x):
if (x <= -0.5):
return np.sin((2. * np.pi) * x) * 0.5 * np.pi
if (x < 0.):
return 0.
if (x <= 0.5):
return np.sin((2. * np.pi) * x) * 0.5 * np.pi
def dpdf(self, x):
if (x <= -0.5):
return np.cos((2. * np.pi) * x) * np.pi * np.pi
if (x < 0.):
return 0.
if (x <= 0.5):
return np.cos((2. * np.pi) * x) * np.pi * np.pi
def cdf(self, x):
if (x <= -0.5):
return 0.25 * (1 - np.cos((2. * np.pi) * x))
if (x < 0.):
return 0.5
if (x <= 0.5):
return 0.75 - 0.25 * np.cos((2. * np.pi) * x)
def support(self):
return -1, 0.5
dists = [dist0(), dist1()]
# exact mean and variance of the distributions in the list dists
mv0 = [-1/(2*np.pi), 1/3 - 1/(4*np.pi*np.pi)]
mv1 = [-1/4, 3/8-1/(2*np.pi*np.pi) - 1/16]
mvs = [mv0, mv1]
@pytest.mark.parametrize("dist, mv_ex",
zip(dists, mvs))
@pytest.mark.parametrize("order", [3, 5])
def test_basic(self, dist, mv_ex, order):
rng = NumericalInverseHermite(dist, order=order, random_state=42)
check_cont_samples(rng, dist, mv_ex)
# test domains with inf + nan in them. need to write a custom test for
# this because not all methods support infinite tails.
@pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
def test_inf_nan_domains(self, domain, err, msg):
with pytest.raises(err, match=msg):
NumericalInverseHermite(StandardNormal(), domain=domain)
def basic_test_all_scipy_dists(self, distname, shapes):
slow_dists = {'ksone', 'kstwo', 'levy_stable', 'skewnorm'}
fail_dists = {'beta', 'gausshyper', 'geninvgauss', 'ncf', 'nct',
'norminvgauss', 'genhyperbolic', 'studentized_range',
'vonmises', 'kappa4', 'invgauss', 'wald'}
if distname in slow_dists:
pytest.skip("Distribution is too slow")
if distname in fail_dists:
# specific reasons documented in gh-13319
# https://github.com/scipy/scipy/pull/13319#discussion_r626188955
pytest.xfail("Fails - usually due to inaccurate CDF/PDF")
np.random.seed(0)
dist = getattr(stats, distname)(*shapes)
fni = NumericalInverseHermite(dist)
x = np.random.rand(10)
p_tol = np.max(np.abs(dist.ppf(x)-fni.ppf(x))/np.abs(dist.ppf(x)))
u_tol = np.max(np.abs(dist.cdf(fni.ppf(x)) - x))
assert p_tol < 1e-8
assert u_tol < 1e-12
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.xslow
@pytest.mark.parametrize(("distname", "shapes"), distcont)
def test_basic_all_scipy_dists(self, distname, shapes):
# if distname == "truncnorm":
# pytest.skip("Tested separately")
self.basic_test_all_scipy_dists(distname, shapes)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_basic_truncnorm_gh17155(self):
self.basic_test_all_scipy_dists("truncnorm", (0.1, 2))
def test_input_validation(self):
match = r"`order` must be either 1, 3, or 5."
with pytest.raises(ValueError, match=match):
NumericalInverseHermite(StandardNormal(), order=2)
match = "`cdf` required but not found"
with pytest.raises(ValueError, match=match):
NumericalInverseHermite("norm")
match = "could not convert string to float"
with pytest.raises(ValueError, match=match):
NumericalInverseHermite(StandardNormal(),
u_resolution='ekki')
rngs = [None, 0, np.random.RandomState(0)]
rngs.append(np.random.default_rng(0)) # type: ignore
sizes = [(None, tuple()), (8, (8,)), ((4, 5, 6), (4, 5, 6))]
@pytest.mark.parametrize('rng', rngs)
@pytest.mark.parametrize('size_in, size_out', sizes)
def test_RVS(self, rng, size_in, size_out):
dist = StandardNormal()
fni = NumericalInverseHermite(dist)
rng2 = deepcopy(rng)
rvs = fni.rvs(size=size_in, random_state=rng)
if size_in is not None:
assert rvs.shape == size_out
if rng2 is not None:
rng2 = check_random_state(rng2)
uniform = rng2.uniform(size=size_in)
rvs2 = stats.norm.ppf(uniform)
assert_allclose(rvs, rvs2)
def test_inaccurate_CDF(self):
# CDF function with inaccurate tail cannot be inverted; see gh-13319
# https://github.com/scipy/scipy/pull/13319#discussion_r626188955
shapes = (2.3098496451481823, 0.6268795430096368)
match = ("98 : one or more intervals very short; possibly due to "
"numerical problems with a pole or very flat tail")
# fails with default tol
with pytest.warns(RuntimeWarning, match=match):
NumericalInverseHermite(stats.beta(*shapes))
# no error with coarser tol
NumericalInverseHermite(stats.beta(*shapes), u_resolution=1e-8)
def test_custom_distribution(self):
dist1 = StandardNormal()
fni1 = NumericalInverseHermite(dist1)
dist2 = stats.norm()
fni2 = NumericalInverseHermite(dist2)
assert_allclose(fni1.rvs(random_state=0), fni2.rvs(random_state=0))
u = [
# check the correctness of the PPF for equidistant points between
# 0.02 and 0.98.
np.linspace(0., 1., num=10000),
# test the PPF method for empty arrays
[], [[]],
# test if nans and infs return nan result.
[np.nan], [-np.inf, np.nan, np.inf],
# test if a scalar is returned for a scalar input.
0,
# test for arrays with nans, values greater than 1 and less than 0,
# and some valid values.
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
]
@pytest.mark.parametrize("u", u)
def test_ppf(self, u):
dist = StandardNormal()
rng = NumericalInverseHermite(dist, u_resolution=1e-12)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in greater")
sup.filter(RuntimeWarning, "invalid value encountered in "
"greater_equal")
sup.filter(RuntimeWarning, "invalid value encountered in less")
sup.filter(RuntimeWarning, "invalid value encountered in "
"less_equal")
res = rng.ppf(u)
expected = stats.norm.ppf(u)
assert_allclose(res, expected, rtol=1e-9, atol=3e-10)
assert res.shape == expected.shape
def test_u_error(self):
dist = StandardNormal()
rng = NumericalInverseHermite(dist, u_resolution=1e-10)
max_error, mae = rng.u_error()
assert max_error < 1e-10
assert mae <= max_error
with suppress_warnings() as sup:
# ignore warning about u-resolution being too small.
sup.filter(RuntimeWarning)
rng = NumericalInverseHermite(dist, u_resolution=1e-14)
max_error, mae = rng.u_error()
assert max_error < 1e-14
assert mae <= max_error
class TestDiscreteGuideTable:
basic_fail_dists = {
'nchypergeom_fisher', # numerical errors on tails
'nchypergeom_wallenius', # numerical errors on tails
'randint' # fails on 32-bit ubuntu
}
def test_guide_factor_gt3_raises_warning(self):
pv = [0.1, 0.3, 0.6]
urng = np.random.default_rng()
with pytest.warns(RuntimeWarning):
DiscreteGuideTable(pv, random_state=urng, guide_factor=7)
def test_guide_factor_zero_raises_warning(self):
pv = [0.1, 0.3, 0.6]
urng = np.random.default_rng()
with pytest.warns(RuntimeWarning):
DiscreteGuideTable(pv, random_state=urng, guide_factor=0)
def test_negative_guide_factor_raises_warning(self):
# This occurs from the UNU.RAN wrapper automatically.
# however it already gives a useful warning
# Here we just test that a warning is raised.
pv = [0.1, 0.3, 0.6]
urng = np.random.default_rng()
with pytest.warns(RuntimeWarning):
DiscreteGuideTable(pv, random_state=urng, guide_factor=-1)
@pytest.mark.parametrize("distname, params", distdiscrete)
def test_basic(self, distname, params):
if distname in self.basic_fail_dists:
msg = ("DGT fails on these probably because of large domains "
"and small computation errors in PMF.")
pytest.skip(msg)
if not isinstance(distname, str):
dist = distname
else:
dist = getattr(stats, distname)
dist = dist(*params)
domain = dist.support()
if not np.isfinite(domain[1] - domain[0]):
# DGT only works with finite domain. So, skip the distributions
# with infinite tails.
pytest.skip("DGT only works with a finite domain.")
k = np.arange(domain[0], domain[1]+1)
pv = dist.pmf(k)
mv_ex = dist.stats('mv')
rng = DiscreteGuideTable(dist, random_state=42)
check_discr_samples(rng, pv, mv_ex)
u = [
# the correctness of the PPF for equidistant points between 0 and 1.
np.linspace(0, 1, num=10000),
# test the PPF method for empty arrays
[], [[]],
# test if nans and infs return nan result.
[np.nan], [-np.inf, np.nan, np.inf],
# test if a scalar is returned for a scalar input.
0,
# test for arrays with nans, values greater than 1 and less than 0,
# and some valid values.
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
]
@pytest.mark.parametrize('u', u)
def test_ppf(self, u):
n, p = 4, 0.1
dist = stats.binom(n, p)
rng = DiscreteGuideTable(dist, random_state=42)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in greater")
sup.filter(RuntimeWarning, "invalid value encountered in "
"greater_equal")
sup.filter(RuntimeWarning, "invalid value encountered in less")
sup.filter(RuntimeWarning, "invalid value encountered in "
"less_equal")
res = rng.ppf(u)
expected = stats.binom.ppf(u, n, p)
assert_equal(res.shape, expected.shape)
assert_equal(res, expected)
@pytest.mark.parametrize("pv, msg", bad_pv_common)
def test_bad_pv(self, pv, msg):
with pytest.raises(ValueError, match=msg):
DiscreteGuideTable(pv)
# DGT doesn't support infinite tails. So, it should throw an error when
# inf is present in the domain.
inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf),
(0, np.inf), (-np.inf, 0)]
@pytest.mark.parametrize("domain", inf_domain)
def test_inf_domain(self, domain):
with pytest.raises(ValueError, match=r"must be finite"):
DiscreteGuideTable(stats.binom(10, 0.2), domain=domain)
class TestSimpleRatioUniforms:
# pdf with piecewise linear function as transformed density
# with T = -1/sqrt with shift. Taken from UNU.RAN test suite
# (from file t_srou.c)
class dist:
def __init__(self, shift):
self.shift = shift
self.mode = shift
def pdf(self, x):
x -= self.shift
y = 1. / (abs(x) + 1.)
return 0.5 * y * y
def cdf(self, x):
x -= self.shift
if x <= 0.:
return 0.5 / (1. - x)
else:
return 1. - 0.5 / (1. + x)
dists = [dist(0.), dist(10000.)]
# exact mean and variance of the distributions in the list dists
mv1 = [0., np.inf]
mv2 = [10000., np.inf]
mvs = [mv1, mv2]
@pytest.mark.parametrize("dist, mv_ex",
zip(dists, mvs))
def test_basic(self, dist, mv_ex):
rng = SimpleRatioUniforms(dist, mode=dist.mode, random_state=42)
check_cont_samples(rng, dist, mv_ex)
rng = SimpleRatioUniforms(dist, mode=dist.mode,
cdf_at_mode=dist.cdf(dist.mode),
random_state=42)
check_cont_samples(rng, dist, mv_ex)
# test domains with inf + nan in them. need to write a custom test for
# this because not all methods support infinite tails.
@pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
def test_inf_nan_domains(self, domain, err, msg):
with pytest.raises(err, match=msg):
SimpleRatioUniforms(StandardNormal(), domain=domain)
def test_bad_args(self):
# pdf_area < 0
with pytest.raises(ValueError, match=r"`pdf_area` must be > 0"):
SimpleRatioUniforms(StandardNormal(), mode=0, pdf_area=-1)
class TestRatioUniforms:
""" Tests for rvs_ratio_uniforms.
"""
def test_rv_generation(self):
# use KS test to check distribution of rvs
# normal distribution
f = stats.norm.pdf
v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
u = np.sqrt(f(0))
gen = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=12345)
assert_equal(stats.kstest(gen.rvs(2500), 'norm')[1] > 0.25, True)
# exponential distribution
gen = RatioUniforms(lambda x: np.exp(-x), umax=1,
vmin=0, vmax=2*np.exp(-1), random_state=12345)
assert_equal(stats.kstest(gen.rvs(1000), 'expon')[1] > 0.25, True)
def test_shape(self):
# test shape of return value depending on size parameter
f = stats.norm.pdf
v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
u = np.sqrt(f(0))
gen1 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
gen2 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
gen3 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
r1, r2, r3 = gen1.rvs(3), gen2.rvs((3,)), gen3.rvs((3, 1))
assert_equal(r1, r2)
assert_equal(r2, r3.flatten())
assert_equal(r1.shape, (3,))
assert_equal(r3.shape, (3, 1))
gen4 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=12)
gen5 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=12)
r4, r5 = gen4.rvs(size=(3, 3, 3)), gen5.rvs(size=27)
assert_equal(r4.flatten(), r5)
assert_equal(r4.shape, (3, 3, 3))
gen6 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
gen7 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
gen8 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
r6, r7, r8 = gen6.rvs(), gen7.rvs(1), gen8.rvs((1,))
assert_equal(r6, r7)
assert_equal(r7, r8)
def test_random_state(self):
f = stats.norm.pdf
v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
umax = np.sqrt(f(0))
gen1 = RatioUniforms(f, umax=umax, vmin=-v, vmax=v, random_state=1234)
r1 = gen1.rvs(10)
np.random.seed(1234)
gen2 = RatioUniforms(f, umax=umax, vmin=-v, vmax=v)
r2 = gen2.rvs(10)
assert_equal(r1, r2)
def test_exceptions(self):
f = stats.norm.pdf
# need vmin < vmax
with assert_raises(ValueError, match="vmin must be smaller than vmax"):
RatioUniforms(pdf=f, umax=1, vmin=3, vmax=1)
with assert_raises(ValueError, match="vmin must be smaller than vmax"):
RatioUniforms(pdf=f, umax=1, vmin=1, vmax=1)
# need umax > 0
with assert_raises(ValueError, match="umax must be positive"):
RatioUniforms(pdf=f, umax=-1, vmin=1, vmax=3)
with assert_raises(ValueError, match="umax must be positive"):
RatioUniforms(pdf=f, umax=0, vmin=1, vmax=3)
| 54,156
| 36.635163
| 99
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_axis_nan_policy.py
|
# Many scipy.stats functions support `axis` and `nan_policy` parameters.
# When the two are combined, it can be tricky to get all the behavior just
# right. This file contains a suite of common tests for scipy.stats functions
# that support `axis` and `nan_policy` and additional tests for some associated
# functions in stats._util.
from itertools import product, combinations_with_replacement, permutations
import re
import pickle
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from scipy import stats
from scipy.stats._axis_nan_policy import _masked_arrays_2_sentinel_arrays
def unpack_ttest_result(res):
low, high = res.confidence_interval()
return (res.statistic, res.pvalue, res.df, res._standard_error,
res._estimate, low, high)
def _get_ttest_ci(ttest):
# get a function that returns the CI bounds of provided `ttest`
def ttest_ci(*args, **kwargs):
res = ttest(*args, **kwargs)
return res.confidence_interval()
return ttest_ci
axis_nan_policy_cases = [
# function, args, kwds, number of samples, number of outputs,
# ... paired, unpacker function
# args, kwds typically aren't needed; just showing that they work
(stats.kruskal, tuple(), dict(), 3, 2, False, None), # 4 samples is slow
(stats.ranksums, ('less',), dict(), 2, 2, False, None),
(stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, 2, False, None),
(stats.wilcoxon, ('pratt',), {'mode': 'auto'}, 2, 2, True,
lambda res: (res.statistic, res.pvalue)),
(stats.wilcoxon, tuple(), dict(), 1, 2, True,
lambda res: (res.statistic, res.pvalue)),
(stats.wilcoxon, tuple(), {'mode': 'approx'}, 1, 3, True,
lambda res: (res.statistic, res.pvalue, res.zstatistic)),
(stats.gmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
(stats.hmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
(stats.pmean, (1.42,), dict(), 1, 1, False, lambda x: (x,)),
(stats.sem, tuple(), dict(), 1, 1, False, lambda x: (x,)),
(stats.iqr, tuple(), dict(), 1, 1, False, lambda x: (x,)),
(stats.kurtosis, tuple(), dict(), 1, 1, False, lambda x: (x,)),
(stats.skew, tuple(), dict(), 1, 1, False, lambda x: (x,)),
(stats.kstat, tuple(), dict(), 1, 1, False, lambda x: (x,)),
(stats.kstatvar, tuple(), dict(), 1, 1, False, lambda x: (x,)),
(stats.moment, tuple(), dict(), 1, 1, False, lambda x: (x,)),
(stats.moment, tuple(), dict(moment=[1, 2]), 1, 2, False, None),
(stats.jarque_bera, tuple(), dict(), 1, 2, False, None),
(stats.ttest_1samp, (np.array([0]),), dict(), 1, 7, False,
unpack_ttest_result),
(stats.ttest_rel, tuple(), dict(), 2, 7, True, unpack_ttest_result),
(stats.ttest_ind, tuple(), dict(), 2, 7, False, unpack_ttest_result),
(_get_ttest_ci(stats.ttest_1samp), (0,), dict(), 1, 2, False, None),
(_get_ttest_ci(stats.ttest_rel), tuple(), dict(), 2, 2, True, None),
(_get_ttest_ci(stats.ttest_ind), tuple(), dict(), 2, 2, False, None),
(stats.mode, tuple(), dict(), 1, 2, True, lambda x: (x.mode, x.count))
]
# If the message is one of those expected, put nans in
# appropriate places of `statistics` and `pvalues`
too_small_messages = {"The input contains nan", # for nan_policy="raise"
"Degrees of freedom <= 0 for slice",
"x and y should have at least 5 elements",
"Data must be at least length 3",
"The sample must contain at least two",
"x and y must contain at least two",
"division by zero",
"Mean of empty slice",
"Data passed to ks_2samp must not be empty",
"Not enough test observations",
"Not enough other observations",
"At least one observation is required",
"zero-size array to reduction operation maximum",
"`x` and `y` must be of nonzero size.",
"The exact distribution of the Wilcoxon test",
"Data input must not be empty"}
# If the message is one of these, results of the function may be inaccurate,
# but NaNs are not to be placed
inaccuracy_messages = {"Precision loss occurred in moment calculation",
"Sample size too small for normal approximation."}
# For some functions, nan_policy='propagate' should not just return NaNs
override_propagate_funcs = {stats.mode}
def _mixed_data_generator(n_samples, n_repetitions, axis, rng,
paired=False):
# generate random samples to check the response of hypothesis tests to
# samples with different (but broadcastable) shapes and various
# nan patterns (e.g. all nans, some nans, no nans) along axis-slices
data = []
for i in range(n_samples):
n_patterns = 6 # number of distinct nan patterns
n_obs = 20 if paired else 20 + i # observations per axis-slice
x = np.ones((n_repetitions, n_patterns, n_obs)) * np.nan
for j in range(n_repetitions):
samples = x[j, :, :]
# case 0: axis-slice with all nans (0 reals)
# cases 1-3: axis-slice with 1-3 reals (the rest nans)
# case 4: axis-slice with mostly (all but two) reals
# case 5: axis slice with all reals
for k, n_reals in enumerate([0, 1, 2, 3, n_obs-2, n_obs]):
# for cases 1-3, need paired nansw to be in the same place
indices = rng.permutation(n_obs)[:n_reals]
samples[k, indices] = rng.random(size=n_reals)
# permute the axis-slices just to show that order doesn't matter
samples[:] = rng.permutation(samples, axis=0)
# For multi-sample tests, we want to test broadcasting and check
# that nan policy works correctly for each nan pattern for each input.
# This takes care of both simultaneosly.
new_shape = [n_repetitions] + [1]*n_samples + [n_obs]
new_shape[1 + i] = 6
x = x.reshape(new_shape)
x = np.moveaxis(x, -1, axis)
data.append(x)
return data
def _homogeneous_data_generator(n_samples, n_repetitions, axis, rng,
paired=False, all_nans=True):
# generate random samples to check the response of hypothesis tests to
# samples with different (but broadcastable) shapes and homogeneous
# data (all nans or all finite)
data = []
for i in range(n_samples):
n_obs = 20 if paired else 20 + i # observations per axis-slice
shape = [n_repetitions] + [1]*n_samples + [n_obs]
shape[1 + i] = 2
x = np.ones(shape) * np.nan if all_nans else rng.random(shape)
x = np.moveaxis(x, -1, axis)
data.append(x)
return data
def nan_policy_1d(hypotest, data1d, unpacker, *args, n_outputs=2,
nan_policy='raise', paired=False, _no_deco=True, **kwds):
# Reference implementation for how `nan_policy` should work for 1d samples
if nan_policy == 'raise':
for sample in data1d:
if np.any(np.isnan(sample)):
raise ValueError("The input contains nan values")
elif (nan_policy == 'propagate'
and hypotest not in override_propagate_funcs):
# For all hypothesis tests tested, returning nans is the right thing.
# But many hypothesis tests don't propagate correctly (e.g. they treat
# np.nan the same as np.inf, which doesn't make sense when ranks are
# involved) so override that behavior here.
for sample in data1d:
if np.any(np.isnan(sample)):
return np.full(n_outputs, np.nan)
elif nan_policy == 'omit':
# manually omit nans (or pairs in which at least one element is nan)
if not paired:
data1d = [sample[~np.isnan(sample)] for sample in data1d]
else:
nan_mask = np.isnan(data1d[0])
for sample in data1d[1:]:
nan_mask = np.logical_or(nan_mask, np.isnan(sample))
data1d = [sample[~nan_mask] for sample in data1d]
return unpacker(hypotest(*data1d, *args, _no_deco=_no_deco, **kwds))
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
"paired", "unpacker"), axis_nan_policy_cases)
@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
@pytest.mark.parametrize(("axis"), (1,))
@pytest.mark.parametrize(("data_generator"), ("mixed",))
def test_axis_nan_policy_fast(hypotest, args, kwds, n_samples, n_outputs,
paired, unpacker, nan_policy, axis,
data_generator):
_axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
unpacker, nan_policy, axis, data_generator)
@pytest.mark.slow
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
"paired", "unpacker"), axis_nan_policy_cases)
@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
@pytest.mark.parametrize(("axis"), range(-3, 3))
@pytest.mark.parametrize(("data_generator"),
("all_nans", "all_finite", "mixed"))
def test_axis_nan_policy_full(hypotest, args, kwds, n_samples, n_outputs,
paired, unpacker, nan_policy, axis,
data_generator):
_axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
unpacker, nan_policy, axis, data_generator)
def _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
unpacker, nan_policy, axis, data_generator):
# Tests the 1D and vectorized behavior of hypothesis tests against a
# reference implementation (nan_policy_1d with np.ndenumerate)
# Some hypothesis tests return a non-iterable that needs an `unpacker` to
# extract the statistic and p-value. For those that don't:
if not unpacker:
def unpacker(res):
return res
rng = np.random.default_rng(0)
# Generate multi-dimensional test data with all important combinations
# of patterns of nans along `axis`
n_repetitions = 3 # number of repetitions of each pattern
data_gen_kwds = {'n_samples': n_samples, 'n_repetitions': n_repetitions,
'axis': axis, 'rng': rng, 'paired': paired}
if data_generator == 'mixed':
inherent_size = 6 # number of distinct types of patterns
data = _mixed_data_generator(**data_gen_kwds)
elif data_generator == 'all_nans':
inherent_size = 2 # hard-coded in _homogeneous_data_generator
data_gen_kwds['all_nans'] = True
data = _homogeneous_data_generator(**data_gen_kwds)
elif data_generator == 'all_finite':
inherent_size = 2 # hard-coded in _homogeneous_data_generator
data_gen_kwds['all_nans'] = False
data = _homogeneous_data_generator(**data_gen_kwds)
output_shape = [n_repetitions] + [inherent_size]*n_samples
# To generate reference behavior to compare against, loop over the axis-
# slices in data. Make indexing easier by moving `axis` to the end and
# broadcasting all samples to the same shape.
data_b = [np.moveaxis(sample, axis, -1) for sample in data]
data_b = [np.broadcast_to(sample, output_shape + [sample.shape[-1]])
for sample in data_b]
statistics = np.zeros(output_shape)
pvalues = np.zeros(output_shape)
for i, _ in np.ndenumerate(statistics):
data1d = [sample[i] for sample in data_b]
with np.errstate(divide='ignore', invalid='ignore'):
try:
res1d = nan_policy_1d(hypotest, data1d, unpacker, *args,
n_outputs=n_outputs,
nan_policy=nan_policy,
paired=paired, _no_deco=True, **kwds)
# Eventually we'll check the results of a single, vectorized
# call of `hypotest` against the arrays `statistics` and
# `pvalues` populated using the reference `nan_policy_1d`.
# But while we're at it, check the results of a 1D call to
# `hypotest` against the reference `nan_policy_1d`.
res1db = unpacker(hypotest(*data1d, *args,
nan_policy=nan_policy, **kwds))
assert_equal(res1db[0], res1d[0])
if len(res1db) == 2:
assert_equal(res1db[1], res1d[1])
# When there is not enough data in 1D samples, many existing
# hypothesis tests raise errors instead of returning nans .
# For vectorized calls, we put nans in the corresponding elements
# of the output.
except (RuntimeWarning, UserWarning, ValueError,
ZeroDivisionError) as e:
# whatever it is, make sure same error is raised by both
# `nan_policy_1d` and `hypotest`
with pytest.raises(type(e), match=re.escape(str(e))):
nan_policy_1d(hypotest, data1d, unpacker, *args,
n_outputs=n_outputs, nan_policy=nan_policy,
paired=paired, _no_deco=True, **kwds)
with pytest.raises(type(e), match=re.escape(str(e))):
hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)
if any([str(e).startswith(message)
for message in too_small_messages]):
res1d = np.full(n_outputs, np.nan)
elif any([str(e).startswith(message)
for message in inaccuracy_messages]):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sup.filter(UserWarning)
res1d = nan_policy_1d(hypotest, data1d, unpacker,
*args, n_outputs=n_outputs,
nan_policy=nan_policy,
paired=paired, _no_deco=True,
**kwds)
else:
raise e
statistics[i] = res1d[0]
if len(res1d) == 2:
pvalues[i] = res1d[1]
# Perform a vectorized call to the hypothesis test.
# If `nan_policy == 'raise'`, check that it raises the appropriate error.
# If not, compare against the output against `statistics` and `pvalues`
if nan_policy == 'raise' and not data_generator == "all_finite":
message = 'The input contains nan values'
with pytest.raises(ValueError, match=message):
hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds)
else:
with suppress_warnings() as sup, \
np.errstate(divide='ignore', invalid='ignore'):
sup.filter(RuntimeWarning, "Precision loss occurred in moment")
sup.filter(UserWarning, "Sample size too small for normal "
"approximation.")
res = unpacker(hypotest(*data, axis=axis, nan_policy=nan_policy,
*args, **kwds))
assert_allclose(res[0], statistics, rtol=1e-15)
assert_equal(res[0].dtype, statistics.dtype)
if len(res) == 2:
assert_allclose(res[1], pvalues, rtol=1e-15)
assert_equal(res[1].dtype, pvalues.dtype)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
"paired", "unpacker"), axis_nan_policy_cases)
@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
@pytest.mark.parametrize(("data_generator"),
("all_nans", "all_finite", "mixed", "empty"))
def test_axis_nan_policy_axis_is_None(hypotest, args, kwds, n_samples,
n_outputs, paired, unpacker, nan_policy,
data_generator):
# check for correct behavior when `axis=None`
if not unpacker:
def unpacker(res):
return res
rng = np.random.default_rng(0)
if data_generator == "empty":
data = [rng.random((2, 0)) for i in range(n_samples)]
else:
data = [rng.random((2, 20)) for i in range(n_samples)]
if data_generator == "mixed":
masks = [rng.random((2, 20)) > 0.9 for i in range(n_samples)]
for sample, mask in zip(data, masks):
sample[mask] = np.nan
elif data_generator == "all_nans":
data = [sample * np.nan for sample in data]
data_raveled = [sample.ravel() for sample in data]
if nan_policy == 'raise' and data_generator not in {"all_finite", "empty"}:
message = 'The input contains nan values'
# check for correct behavior whether or not data is 1d to begin with
with pytest.raises(ValueError, match=message):
hypotest(*data, axis=None, nan_policy=nan_policy,
*args, **kwds)
with pytest.raises(ValueError, match=message):
hypotest(*data_raveled, axis=None, nan_policy=nan_policy,
*args, **kwds)
else:
# behavior of reference implementation with 1d input, hypotest with 1d
# input, and hypotest with Nd input should match, whether that means
# that outputs are equal or they raise the same exception
ea_str, eb_str, ec_str = None, None, None
with np.errstate(divide='ignore', invalid='ignore'):
try:
res1da = nan_policy_1d(hypotest, data_raveled, unpacker, *args,
n_outputs=n_outputs,
nan_policy=nan_policy, paired=paired,
_no_deco=True, **kwds)
except (RuntimeWarning, ValueError, ZeroDivisionError) as ea:
ea_str = str(ea)
try:
res1db = unpacker(hypotest(*data_raveled, *args,
nan_policy=nan_policy, **kwds))
except (RuntimeWarning, ValueError, ZeroDivisionError) as eb:
eb_str = str(eb)
try:
res1dc = unpacker(hypotest(*data, *args, axis=None,
nan_policy=nan_policy, **kwds))
except (RuntimeWarning, ValueError, ZeroDivisionError) as ec:
ec_str = str(ec)
if ea_str or eb_str or ec_str:
assert any([str(ea_str).startswith(message)
for message in too_small_messages])
assert ea_str == eb_str == ec_str
else:
assert_equal(res1db, res1da)
assert_equal(res1dc, res1da)
for item in list(res1da) + list(res1db) + list(res1dc):
# Most functions naturally return NumPy numbers, which
# are drop-in replacements for the Python versions but with
# desirable attributes. Make sure this is consistent.
assert np.issubdtype(item.dtype, np.number)
# Test keepdims for:
# - single-output and multi-output functions (gmean and mannwhitneyu)
# - Axis negative, positive, None, and tuple
# - 1D with no NaNs
# - 1D with NaN propagation
# - Zero-sized output
@pytest.mark.parametrize("nan_policy", ("omit", "propagate"))
@pytest.mark.parametrize(
("hypotest", "args", "kwds", "n_samples", "unpacker"),
((stats.gmean, tuple(), dict(), 1, lambda x: (x,)),
(stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, None))
)
@pytest.mark.parametrize(
("sample_shape", "axis_cases"),
(((2, 3, 3, 4), (None, 0, -1, (0, 2), (1, -1), (3, 1, 2, 0))),
((10, ), (0, -1)),
((20, 0), (0, 1)))
)
def test_keepdims(hypotest, args, kwds, n_samples, unpacker,
sample_shape, axis_cases, nan_policy):
# test if keepdims parameter works correctly
if not unpacker:
def unpacker(res):
return res
rng = np.random.default_rng(0)
data = [rng.random(sample_shape) for _ in range(n_samples)]
nan_data = [sample.copy() for sample in data]
nan_mask = [rng.random(sample_shape) < 0.2 for _ in range(n_samples)]
for sample, mask in zip(nan_data, nan_mask):
sample[mask] = np.nan
for axis in axis_cases:
expected_shape = list(sample_shape)
if axis is None:
expected_shape = np.ones(len(sample_shape))
else:
if isinstance(axis, int):
expected_shape[axis] = 1
else:
for ax in axis:
expected_shape[ax] = 1
expected_shape = tuple(expected_shape)
res = unpacker(hypotest(*data, *args, axis=axis, keepdims=True,
**kwds))
res_base = unpacker(hypotest(*data, *args, axis=axis, keepdims=False,
**kwds))
nan_res = unpacker(hypotest(*nan_data, *args, axis=axis,
keepdims=True, nan_policy=nan_policy,
**kwds))
nan_res_base = unpacker(hypotest(*nan_data, *args, axis=axis,
keepdims=False,
nan_policy=nan_policy, **kwds))
for r, r_base, rn, rn_base in zip(res, res_base, nan_res,
nan_res_base):
assert r.shape == expected_shape
r = np.squeeze(r, axis=axis)
assert_equal(r, r_base)
assert rn.shape == expected_shape
rn = np.squeeze(rn, axis=axis)
assert_equal(rn, rn_base)
@pytest.mark.parametrize(("fun", "nsamp"),
[(stats.kstat, 1),
(stats.kstatvar, 1)])
def test_hypotest_back_compat_no_axis(fun, nsamp):
m, n = 8, 9
rng = np.random.default_rng(0)
x = rng.random((nsamp, m, n))
res = fun(*x)
res2 = fun(*x, _no_deco=True)
res3 = fun([xi.ravel() for xi in x])
assert_equal(res, res2)
assert_equal(res, res3)
@pytest.mark.parametrize(("axis"), (0, 1, 2))
def test_axis_nan_policy_decorated_positional_axis(axis):
# Test for correct behavior of function decorated with
# _axis_nan_policy_decorator whether `axis` is provided as positional or
# keyword argument
shape = (8, 9, 10)
rng = np.random.default_rng(0)
x = rng.random(shape)
y = rng.random(shape)
res1 = stats.mannwhitneyu(x, y, True, 'two-sided', axis)
res2 = stats.mannwhitneyu(x, y, True, 'two-sided', axis=axis)
assert_equal(res1, res2)
message = "mannwhitneyu() got multiple values for argument 'axis'"
with pytest.raises(TypeError, match=re.escape(message)):
stats.mannwhitneyu(x, y, True, 'two-sided', axis, axis=axis)
def test_axis_nan_policy_decorated_positional_args():
# Test for correct behavior of function decorated with
# _axis_nan_policy_decorator when function accepts *args
shape = (3, 8, 9, 10)
rng = np.random.default_rng(0)
x = rng.random(shape)
x[0, 0, 0, 0] = np.nan
stats.kruskal(*x)
message = "kruskal() got an unexpected keyword argument 'samples'"
with pytest.raises(TypeError, match=re.escape(message)):
stats.kruskal(samples=x)
with pytest.raises(TypeError, match=re.escape(message)):
stats.kruskal(*x, samples=x)
def test_axis_nan_policy_decorated_keyword_samples():
# Test for correct behavior of function decorated with
# _axis_nan_policy_decorator whether samples are provided as positional or
# keyword arguments
shape = (2, 8, 9, 10)
rng = np.random.default_rng(0)
x = rng.random(shape)
x[0, 0, 0, 0] = np.nan
res1 = stats.mannwhitneyu(*x)
res2 = stats.mannwhitneyu(x=x[0], y=x[1])
assert_equal(res1, res2)
message = "mannwhitneyu() got multiple values for argument"
with pytest.raises(TypeError, match=re.escape(message)):
stats.mannwhitneyu(*x, x=x[0], y=x[1])
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
"paired", "unpacker"), axis_nan_policy_cases)
def test_axis_nan_policy_decorated_pickled(hypotest, args, kwds, n_samples,
n_outputs, paired, unpacker):
if "ttest_ci" in hypotest.__name__:
pytest.skip("Can't pickle functions defined within functions.")
rng = np.random.default_rng(0)
# Some hypothesis tests return a non-iterable that needs an `unpacker` to
# extract the statistic and p-value. For those that don't:
if not unpacker:
def unpacker(res):
return res
data = rng.uniform(size=(n_samples, 2, 30))
pickled_hypotest = pickle.dumps(hypotest)
unpickled_hypotest = pickle.loads(pickled_hypotest)
res1 = unpacker(hypotest(*data, *args, axis=-1, **kwds))
res2 = unpacker(unpickled_hypotest(*data, *args, axis=-1, **kwds))
assert_allclose(res1, res2, rtol=1e-12)
def test_check_empty_inputs():
# Test that _check_empty_inputs is doing its job, at least for single-
# sample inputs. (Multi-sample functionality is tested below.)
# If the input sample is not empty, it should return None.
# If the input sample is empty, it should return an array of NaNs or an
# empty array of appropriate shape. np.mean is used as a reference for the
# output because, like the statistics calculated by these functions,
# it works along and "consumes" `axis` but preserves the other axes.
for i in range(5):
for combo in combinations_with_replacement([0, 1, 2], i):
for axis in range(len(combo)):
samples = (np.zeros(combo),)
output = stats._axis_nan_policy._check_empty_inputs(samples,
axis)
if output is not None:
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice.")
sup.filter(RuntimeWarning, "invalid value encountered")
reference = samples[0].mean(axis=axis)
np.testing.assert_equal(output, reference)
def _check_arrays_broadcastable(arrays, axis):
# https://numpy.org/doc/stable/user/basics.broadcasting.html
# "When operating on two arrays, NumPy compares their shapes element-wise.
# It starts with the trailing (i.e. rightmost) dimensions and works its
# way left.
# Two dimensions are compatible when
# 1. they are equal, or
# 2. one of them is 1
# ...
# Arrays do not need to have the same number of dimensions."
# (Clarification: if the arrays are compatible according to the criteria
# above and an array runs out of dimensions, it is still compatible.)
# Below, we follow the rules above except ignoring `axis`
n_dims = max([arr.ndim for arr in arrays])
if axis is not None:
# convert to negative axis
axis = (-n_dims + axis) if axis >= 0 else axis
for dim in range(1, n_dims+1): # we'll index from -1 to -n_dims, inclusive
if -dim == axis:
continue # ignore lengths along `axis`
dim_lengths = set()
for arr in arrays:
if dim <= arr.ndim and arr.shape[-dim] != 1:
dim_lengths.add(arr.shape[-dim])
if len(dim_lengths) > 1:
return False
return True
@pytest.mark.slow
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
"paired", "unpacker"), axis_nan_policy_cases)
def test_empty(hypotest, args, kwds, n_samples, n_outputs, paired, unpacker):
# test for correct output shape when at least one input is empty
if hypotest in override_propagate_funcs:
reason = "Doesn't follow the usual pattern. Tested separately."
pytest.skip(reason=reason)
if unpacker is None:
unpacker = lambda res: (res[0], res[1]) # noqa: E731
def small_data_generator(n_samples, n_dims):
def small_sample_generator(n_dims):
# return all possible "small" arrays in up to n_dim dimensions
for i in n_dims:
# "small" means with size along dimension either 0 or 1
for combo in combinations_with_replacement([0, 1, 2], i):
yield np.zeros(combo)
# yield all possible combinations of small samples
gens = [small_sample_generator(n_dims) for i in range(n_samples)]
yield from product(*gens)
n_dims = [2, 3]
for samples in small_data_generator(n_samples, n_dims):
# this test is only for arrays of zero size
if not any(sample.size == 0 for sample in samples):
continue
max_axis = max(sample.ndim for sample in samples)
# need to test for all valid values of `axis` parameter, too
for axis in range(-max_axis, max_axis):
try:
# After broadcasting, all arrays are the same shape, so
# the shape of the output should be the same as a single-
# sample statistic. Use np.mean as a reference.
concat = stats._stats_py._broadcast_concatenate(samples, axis)
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice.")
sup.filter(RuntimeWarning, "invalid value encountered")
expected = np.mean(concat, axis=axis) * np.nan
res = hypotest(*samples, *args, axis=axis, **kwds)
res = unpacker(res)
for i in range(n_outputs):
assert_equal(res[i], expected)
except ValueError:
# confirm that the arrays truly are not broadcastable
assert not _check_arrays_broadcastable(samples, axis)
# confirm that _both_ `_broadcast_concatenate` and `hypotest`
# produce this information.
message = "Array shapes are incompatible for broadcasting."
with pytest.raises(ValueError, match=message):
stats._stats_py._broadcast_concatenate(samples, axis)
with pytest.raises(ValueError, match=message):
hypotest(*samples, *args, axis=axis, **kwds)
def test_masked_array_2_sentinel_array():
# prepare arrays
np.random.seed(0)
A = np.random.rand(10, 11, 12)
B = np.random.rand(12)
mask = A < 0.5
A = np.ma.masked_array(A, mask)
# set arbitrary elements to special values
# (these values might have been considered for use as sentinel values)
max_float = np.finfo(np.float64).max
max_float2 = np.nextafter(max_float, -np.inf)
max_float3 = np.nextafter(max_float2, -np.inf)
A[3, 4, 1] = np.nan
A[4, 5, 2] = np.inf
A[5, 6, 3] = max_float
B[8] = np.nan
B[7] = np.inf
B[6] = max_float2
# convert masked A to array with sentinel value, don't modify B
out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([A, B])
A_out, B_out = out_arrays
# check that good sentinel value was chosen (according to intended logic)
assert (sentinel != max_float) and (sentinel != max_float2)
assert sentinel == max_float3
# check that output arrays are as intended
A_reference = A.data
A_reference[A.mask] = sentinel
np.testing.assert_array_equal(A_out, A_reference)
assert B_out is B
def test_masked_dtype():
# When _masked_arrays_2_sentinel_arrays was first added, it always
# upcast the arrays to np.float64. After gh16662, check expected promotion
# and that the expected sentinel is found.
# these are important because the max of the promoted dtype is the first
# candidate to be the sentinel value
max16 = np.iinfo(np.int16).max
max128c = np.finfo(np.complex128).max
# a is a regular array, b has masked elements, and c has no masked elements
a = np.array([1, 2, max16], dtype=np.int16)
b = np.ma.array([1, 2, 1], dtype=np.int8, mask=[0, 1, 0])
c = np.ma.array([1, 2, 1], dtype=np.complex128, mask=[0, 0, 0])
# check integer masked -> sentinel conversion
out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a, b])
a_out, b_out = out_arrays
assert sentinel == max16-1 # not max16 because max16 was in the data
assert b_out.dtype == np.int16 # check expected promotion
assert_allclose(b_out, [b[0], sentinel, b[-1]]) # check sentinel placement
assert a_out is a # not a masked array, so left untouched
assert not isinstance(b_out, np.ma.MaskedArray) # b became regular array
# similarly with complex
out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([b, c])
b_out, c_out = out_arrays
assert sentinel == max128c # max128c was not in the data
assert b_out.dtype == np.complex128 # b got promoted
assert_allclose(b_out, [b[0], sentinel, b[-1]]) # check sentinel placement
assert not isinstance(b_out, np.ma.MaskedArray) # b became regular array
assert not isinstance(c_out, np.ma.MaskedArray) # c became regular array
# Also, check edge case when a sentinel value cannot be found in the data
min8, max8 = np.iinfo(np.int8).min, np.iinfo(np.int8).max
a = np.arange(min8, max8+1, dtype=np.int8) # use all possible values
mask1 = np.zeros_like(a, dtype=bool)
mask0 = np.zeros_like(a, dtype=bool)
# a masked value can be used as the sentinel
mask1[1] = True
a1 = np.ma.array(a, mask=mask1)
out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a1])
assert sentinel == min8+1
# unless it's the smallest possible; skipped for simiplicity (see code)
mask0[0] = True
a0 = np.ma.array(a, mask=mask0)
message = "This function replaces masked elements with sentinel..."
with pytest.raises(ValueError, match=message):
_masked_arrays_2_sentinel_arrays([a0])
# test that dtype is preserved in functions
a = np.ma.array([1, 2, 3], mask=[0, 1, 0], dtype=np.float32)
assert stats.gmean(a).dtype == np.float32
def test_masked_stat_1d():
# basic test of _axis_nan_policy_factory with 1D masked sample
males = [19, 22, 16, 29, 24]
females = [20, 11, 17, 12]
res = stats.mannwhitneyu(males, females)
# same result when extra nan is omitted
females2 = [20, 11, 17, np.nan, 12]
res2 = stats.mannwhitneyu(males, females2, nan_policy='omit')
np.testing.assert_array_equal(res2, res)
# same result when extra element is masked
females3 = [20, 11, 17, 1000, 12]
mask3 = [False, False, False, True, False]
females3 = np.ma.masked_array(females3, mask=mask3)
res3 = stats.mannwhitneyu(males, females3)
np.testing.assert_array_equal(res3, res)
# same result when extra nan is omitted and additional element is masked
females4 = [20, 11, 17, np.nan, 1000, 12]
mask4 = [False, False, False, False, True, False]
females4 = np.ma.masked_array(females4, mask=mask4)
res4 = stats.mannwhitneyu(males, females4, nan_policy='omit')
np.testing.assert_array_equal(res4, res)
# same result when extra elements, including nan, are masked
females5 = [20, 11, 17, np.nan, 1000, 12]
mask5 = [False, False, False, True, True, False]
females5 = np.ma.masked_array(females5, mask=mask5)
res5 = stats.mannwhitneyu(males, females5, nan_policy='propagate')
res6 = stats.mannwhitneyu(males, females5, nan_policy='raise')
np.testing.assert_array_equal(res5, res)
np.testing.assert_array_equal(res6, res)
@pytest.mark.parametrize(("axis"), range(-3, 3))
def test_masked_stat_3d(axis):
# basic test of _axis_nan_policy_factory with 3D masked sample
np.random.seed(0)
a = np.random.rand(3, 4, 5)
b = np.random.rand(4, 5)
c = np.random.rand(4, 1)
mask_a = a < 0.1
mask_c = [False, False, False, True]
a_masked = np.ma.masked_array(a, mask=mask_a)
c_masked = np.ma.masked_array(c, mask=mask_c)
a_nans = a.copy()
a_nans[mask_a] = np.nan
c_nans = c.copy()
c_nans[mask_c] = np.nan
res = stats.kruskal(a_nans, b, c_nans, nan_policy='omit', axis=axis)
res2 = stats.kruskal(a_masked, b, c_masked, axis=axis)
np.testing.assert_array_equal(res, res2)
def test_mixed_mask_nan_1():
# targeted test of _axis_nan_policy_factory with 2D masked sample:
# omitting samples with masks and nan_policy='omit' are equivalent
# also checks paired-sample sentinel value removal
m, n = 3, 20
axis = -1
np.random.seed(0)
a = np.random.rand(m, n)
b = np.random.rand(m, n)
mask_a1 = np.random.rand(m, n) < 0.2
mask_a2 = np.random.rand(m, n) < 0.1
mask_b1 = np.random.rand(m, n) < 0.15
mask_b2 = np.random.rand(m, n) < 0.15
mask_a1[2, :] = True
a_nans = a.copy()
b_nans = b.copy()
a_nans[mask_a1 | mask_a2] = np.nan
b_nans[mask_b1 | mask_b2] = np.nan
a_masked1 = np.ma.masked_array(a, mask=mask_a1)
b_masked1 = np.ma.masked_array(b, mask=mask_b1)
a_masked1[mask_a2] = np.nan
b_masked1[mask_b2] = np.nan
a_masked2 = np.ma.masked_array(a, mask=mask_a2)
b_masked2 = np.ma.masked_array(b, mask=mask_b2)
a_masked2[mask_a1] = np.nan
b_masked2[mask_b1] = np.nan
a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2))
b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2))
res = stats.wilcoxon(a_nans, b_nans, nan_policy='omit', axis=axis)
res1 = stats.wilcoxon(a_masked1, b_masked1, nan_policy='omit', axis=axis)
res2 = stats.wilcoxon(a_masked2, b_masked2, nan_policy='omit', axis=axis)
res3 = stats.wilcoxon(a_masked3, b_masked3, nan_policy='raise', axis=axis)
res4 = stats.wilcoxon(a_masked3, b_masked3,
nan_policy='propagate', axis=axis)
np.testing.assert_array_equal(res1, res)
np.testing.assert_array_equal(res2, res)
np.testing.assert_array_equal(res3, res)
np.testing.assert_array_equal(res4, res)
def test_mixed_mask_nan_2():
# targeted test of _axis_nan_policy_factory with 2D masked sample:
# check for expected interaction between masks and nans
# Cases here are
# [mixed nan/mask, all nans, all masked,
# unmasked nan, masked nan, unmasked non-nan]
a = [[1, np.nan, 2], [np.nan, np.nan, np.nan], [1, 2, 3],
[1, np.nan, 3], [1, np.nan, 3], [1, 2, 3]]
mask = [[1, 0, 1], [0, 0, 0], [1, 1, 1],
[0, 0, 0], [0, 1, 0], [0, 0, 0]]
a_masked = np.ma.masked_array(a, mask=mask)
b = [[4, 5, 6]]
ref1 = stats.ranksums([1, 3], [4, 5, 6])
ref2 = stats.ranksums([1, 2, 3], [4, 5, 6])
# nan_policy = 'omit'
# all elements are removed from first three rows
# middle element is removed from fourth and fifth rows
# no elements removed from last row
res = stats.ranksums(a_masked, b, nan_policy='omit', axis=-1)
stat_ref = [np.nan, np.nan, np.nan,
ref1.statistic, ref1.statistic, ref2.statistic]
p_ref = [np.nan, np.nan, np.nan,
ref1.pvalue, ref1.pvalue, ref2.pvalue]
np.testing.assert_array_equal(res.statistic, stat_ref)
np.testing.assert_array_equal(res.pvalue, p_ref)
# nan_policy = 'propagate'
# nans propagate in first, second, and fourth row
# all elements are removed by mask from third row
# middle element is removed from fifth row
# no elements removed from last row
res = stats.ranksums(a_masked, b, nan_policy='propagate', axis=-1)
stat_ref = [np.nan, np.nan, np.nan,
np.nan, ref1.statistic, ref2.statistic]
p_ref = [np.nan, np.nan, np.nan,
np.nan, ref1.pvalue, ref2.pvalue]
np.testing.assert_array_equal(res.statistic, stat_ref)
np.testing.assert_array_equal(res.pvalue, p_ref)
def test_axis_None_vs_tuple():
# `axis` `None` should be equivalent to tuple with all axes
shape = (3, 8, 9, 10)
rng = np.random.default_rng(0)
x = rng.random(shape)
res = stats.kruskal(*x, axis=None)
res2 = stats.kruskal(*x, axis=(0, 1, 2))
np.testing.assert_array_equal(res, res2)
def test_axis_None_vs_tuple_with_broadcasting():
# `axis` `None` should be equivalent to tuple with all axes,
# which should be equivalent to raveling the arrays before passing them
rng = np.random.default_rng(0)
x = rng.random((5, 1))
y = rng.random((1, 5))
x2, y2 = np.broadcast_arrays(x, y)
res0 = stats.mannwhitneyu(x.ravel(), y.ravel())
res1 = stats.mannwhitneyu(x, y, axis=None)
res2 = stats.mannwhitneyu(x, y, axis=(0, 1))
res3 = stats.mannwhitneyu(x2.ravel(), y2.ravel())
assert res1 == res0
assert res2 == res0
assert res3 != res0
@pytest.mark.parametrize(("axis"),
list(permutations(range(-3, 3), 2)) + [(-4, 1)])
def test_other_axis_tuples(axis):
# Check that _axis_nan_policy_factory treates all `axis` tuples as expected
rng = np.random.default_rng(0)
shape_x = (4, 5, 6)
shape_y = (1, 6)
x = rng.random(shape_x)
y = rng.random(shape_y)
axis_original = axis
# convert axis elements to positive
axis = tuple([(i if i >= 0 else 3 + i) for i in axis])
axis = sorted(axis)
if len(set(axis)) != len(axis):
message = "`axis` must contain only distinct elements"
with pytest.raises(np.AxisError, match=re.escape(message)):
stats.mannwhitneyu(x, y, axis=axis_original)
return
if axis[0] < 0 or axis[-1] > 2:
message = "`axis` is out of bounds for array of dimension 3"
with pytest.raises(np.AxisError, match=re.escape(message)):
stats.mannwhitneyu(x, y, axis=axis_original)
return
res = stats.mannwhitneyu(x, y, axis=axis_original)
# reference behavior
not_axis = {0, 1, 2} - set(axis) # which axis is not part of `axis`
not_axis = next(iter(not_axis)) # take it out of the set
x2 = x
shape_y_broadcasted = [1, 1, 6]
shape_y_broadcasted[not_axis] = shape_x[not_axis]
y2 = np.broadcast_to(y, shape_y_broadcasted)
m = x2.shape[not_axis]
x2 = np.moveaxis(x2, axis, (1, 2))
y2 = np.moveaxis(y2, axis, (1, 2))
x2 = np.reshape(x2, (m, -1))
y2 = np.reshape(y2, (m, -1))
res2 = stats.mannwhitneyu(x2, y2, axis=1)
np.testing.assert_array_equal(res, res2)
@pytest.mark.parametrize(("weighted_fun_name"), ["gmean", "hmean", "pmean"])
def test_mean_mixed_mask_nan_weights(weighted_fun_name):
# targeted test of _axis_nan_policy_factory with 2D masked sample:
# omitting samples with masks and nan_policy='omit' are equivalent
# also checks paired-sample sentinel value removal
if weighted_fun_name == 'pmean':
def weighted_fun(a, **kwargs):
return stats.pmean(a, p=0.42, **kwargs)
else:
weighted_fun = getattr(stats, weighted_fun_name)
m, n = 3, 20
axis = -1
rng = np.random.default_rng(6541968121)
a = rng.uniform(size=(m, n))
b = rng.uniform(size=(m, n))
mask_a1 = rng.uniform(size=(m, n)) < 0.2
mask_a2 = rng.uniform(size=(m, n)) < 0.1
mask_b1 = rng.uniform(size=(m, n)) < 0.15
mask_b2 = rng.uniform(size=(m, n)) < 0.15
mask_a1[2, :] = True
a_nans = a.copy()
b_nans = b.copy()
a_nans[mask_a1 | mask_a2] = np.nan
b_nans[mask_b1 | mask_b2] = np.nan
a_masked1 = np.ma.masked_array(a, mask=mask_a1)
b_masked1 = np.ma.masked_array(b, mask=mask_b1)
a_masked1[mask_a2] = np.nan
b_masked1[mask_b2] = np.nan
a_masked2 = np.ma.masked_array(a, mask=mask_a2)
b_masked2 = np.ma.masked_array(b, mask=mask_b2)
a_masked2[mask_a1] = np.nan
b_masked2[mask_b1] = np.nan
a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2))
b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2))
mask_all = (mask_a1 | mask_a2 | mask_b1 | mask_b2)
a_masked4 = np.ma.masked_array(a, mask=mask_all)
b_masked4 = np.ma.masked_array(b, mask=mask_all)
with np.testing.suppress_warnings() as sup:
message = 'invalid value encountered'
sup.filter(RuntimeWarning, message)
res = weighted_fun(a_nans, weights=b_nans,
nan_policy='omit', axis=axis)
res1 = weighted_fun(a_masked1, weights=b_masked1,
nan_policy='omit', axis=axis)
res2 = weighted_fun(a_masked2, weights=b_masked2,
nan_policy='omit', axis=axis)
res3 = weighted_fun(a_masked3, weights=b_masked3,
nan_policy='raise', axis=axis)
res4 = weighted_fun(a_masked3, weights=b_masked3,
nan_policy='propagate', axis=axis)
# Would test with a_masked3/b_masked3, but there is a bug in np.average
# that causes a bug in _no_deco mean with masked weights. Would use
# np.ma.average, but that causes other problems. See numpy/numpy#7330.
if weighted_fun_name not in {'pmean', 'gmean'}:
weighted_fun_ma = getattr(stats.mstats, weighted_fun_name)
res5 = weighted_fun_ma(a_masked4, weights=b_masked4,
axis=axis, _no_deco=True)
np.testing.assert_array_equal(res1, res)
np.testing.assert_array_equal(res2, res)
np.testing.assert_array_equal(res3, res)
np.testing.assert_array_equal(res4, res)
if weighted_fun_name not in {'pmean', 'gmean'}:
# _no_deco mean returns masked array, last element was masked
np.testing.assert_allclose(res5.compressed(), res[~np.isnan(res)])
def test_raise_invalid_args_g17713():
# other cases are handled in:
# test_axis_nan_policy_decorated_positional_axis - multiple values for arg
# test_axis_nan_policy_decorated_positional_args - unexpected kwd arg
message = "got an unexpected keyword argument"
with pytest.raises(TypeError, match=message):
stats.gmean([1, 2, 3], invalid_arg=True)
message = " got multiple values for argument"
with pytest.raises(TypeError, match=message):
stats.gmean([1, 2, 3], a=True)
message = "missing 1 required positional argument"
with pytest.raises(TypeError, match=message):
stats.gmean()
message = "takes from 1 to 4 positional arguments but 5 were given"
with pytest.raises(TypeError, match=message):
stats.gmean([1, 2, 3], 0, float, [1, 1, 1], 10)
@pytest.mark.parametrize(
'dtype',
(list(np.typecodes['Float']
+ np.typecodes['Integer']
+ np.typecodes['Complex'])))
def test_array_like_input(dtype):
# Check that `_axis_nan_policy`-decorated functions work with custom
# containers that are coercible to numeric arrays
class ArrLike():
def __init__(self, x):
self._x = x
def __array__(self):
return np.asarray(x, dtype=dtype)
x = [1]*2 + [3, 4, 5]
res = stats.mode(ArrLike(x))
assert res.mode == 1
assert res.count == 2
| 47,628
| 41.487957
| 79
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.