diff --git a/.gitattributes b/.gitattributes index b626fc80a349ade22e6d083b4ab636aa73fa4ed0..4cc0f25b9a84fb9506fe83aa88bcdcabec09e2b8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1440,3 +1440,4 @@ vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vglm/bin/python filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e730bcaee92f26ab211123fd32238a4ec18675f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82269618ca3158bed8098a2e1a4fdd2725dfa6cdf520df1c090d03d95c82debd +size 166337 diff --git a/parrot/lib/python3.10/site-packages/numpy/lib/tests/data/win64python2.npy b/parrot/lib/python3.10/site-packages/numpy/lib/tests/data/win64python2.npy new file mode 100644 index 0000000000000000000000000000000000000000..46edfc4d6869ed05b8438bc5bfa5d77d9ec3e9ba --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/lib/tests/data/win64python2.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a039c807558149ad5fa7ad12436c69d49c5e194cf617b92785f8cb60ec63297 +size 96 diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48fb00161901a9d052ff0cdbd0481c3ecbfc4d20 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ea3cc03e3cd14fb02d346c0058af25d17688b37 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21cbc930c793265bcaac4e1a326738e0f5ce4521 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e55e616e3b16a2171e9c08bbb7728eb30832bf7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c50b2cff577e8751375955e3b1a7c38d6355ae6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5e66e2e8e11579a19a175e55ed898a05ea82e50 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cee777519faf0c0be9fa0e0af2c5e6bd476751ac Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a370f91b3bd4437ea4808617a45e4ba0df8a03b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cbb8306278249aa0770b5bf5a3cfc5f484a2bd3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d1041a2ddd86935518ce05a1b5ae5434a34553f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b9a1ad7ffc11c6e0ed9c8e6d4c01161df2ac6cb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71602a800723c04e1083735cf2bdc21538507ff5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4503df7963f1cdf47756707d375a051faf815ced Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0117ceabc6cc5e21eb768cee2d64c111e0d36992 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5f2b90a026aaecbdc090b3d3234954ab29fce8ae --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Quansight-Labs +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..91afdcedb180599a41758cdd8c03416cf6c20d76 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py @@ -0,0 +1,116 @@ +""" +.. note: + If you are looking for overrides for NumPy-specific methods, see the + documentation for :obj:`unumpy`. This page explains how to write + back-ends and multimethods. + +``uarray`` is built around a back-end protocol, and overridable multimethods. +It is necessary to define multimethods for back-ends to be able to override them. +See the documentation of :obj:`generate_multimethod` on how to write multimethods. + + + +Let's start with the simplest: + +``__ua_domain__`` defines the back-end *domain*. The domain consists of period- +separated string consisting of the modules you extend plus the submodule. For +example, if a submodule ``module2.submodule`` extends ``module1`` +(i.e., it exposes dispatchables marked as types available in ``module1``), +then the domain string should be ``"module1.module2.submodule"``. + + +For the purpose of this demonstration, we'll be creating an object and setting +its attributes directly. However, note that you can use a module or your own type +as a backend as well. + +>>> class Backend: pass +>>> be = Backend() +>>> be.__ua_domain__ = "ua_examples" + +It might be useful at this point to sidetrack to the documentation of +:obj:`generate_multimethod` to find out how to generate a multimethod +overridable by :obj:`uarray`. Needless to say, writing a backend and +creating multimethods are mostly orthogonal activities, and knowing +one doesn't necessarily require knowledge of the other, although it +is certainly helpful. We expect core API designers/specifiers to write the +multimethods, and implementors to override them. But, as is often the case, +similar people write both. + +Without further ado, here's an example multimethod: + +>>> import uarray as ua +>>> from uarray import Dispatchable +>>> def override_me(a, b): +... return Dispatchable(a, int), +>>> def override_replacer(args, kwargs, dispatchables): +... return (dispatchables[0], args[1]), {} +>>> overridden_me = ua.generate_multimethod( +... override_me, override_replacer, "ua_examples" +... ) + +Next comes the part about overriding the multimethod. This requires +the ``__ua_function__`` protocol, and the ``__ua_convert__`` +protocol. The ``__ua_function__`` protocol has the signature +``(method, args, kwargs)`` where ``method`` is the passed +multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables`` +is the list of converted dispatchables passed in. + +>>> def __ua_function__(method, args, kwargs): +... return method.__name__, args, kwargs +>>> be.__ua_function__ = __ua_function__ + +The other protocol of interest is the ``__ua_convert__`` protocol. It has the +signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion +between the formats should ideally be an ``O(1)`` operation, but it means that +no memory copying should be involved, only views of the existing data. + +>>> def __ua_convert__(dispatchables, coerce): +... for d in dispatchables: +... if d.type is int: +... if coerce and d.coercible: +... yield str(d.value) +... else: +... yield d.value +>>> be.__ua_convert__ = __ua_convert__ + +Now that we have defined the backend, the next thing to do is to call the multimethod. + +>>> with ua.set_backend(be): +... overridden_me(1, "2") +('override_me', (1, '2'), {}) + +Note that the marked type has no effect on the actual type of the passed object. +We can also coerce the type of the input. + +>>> with ua.set_backend(be, coerce=True): +... overridden_me(1, "2") +... overridden_me(1.0, "2") +('override_me', ('1', '2'), {}) +('override_me', ('1.0', '2'), {}) + +Another feature is that if you remove ``__ua_convert__``, the arguments are not +converted at all and it's up to the backend to handle that. + +>>> del be.__ua_convert__ +>>> with ua.set_backend(be): +... overridden_me(1, "2") +('override_me', (1, '2'), {}) + +You also have the option to return ``NotImplemented``, in which case processing moves on +to the next back-end, which in this case, doesn't exist. The same applies to +``__ua_convert__``. + +>>> be.__ua_function__ = lambda *a, **kw: NotImplemented +>>> with ua.set_backend(be): +... overridden_me(1, "2") +Traceback (most recent call last): + ... +uarray.BackendNotImplementedError: ... + +The last possibility is if we don't have ``__ua_convert__``, in which case the job is +left up to ``__ua_function__``, but putting things back into arrays after conversion +will not be possible. +""" + +from ._backend import * +__version__ = '0.8.8.dev0+aa94c5a4.scipy' diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6de6d80195fe87ec77624175ca2809e1c9e3fbd5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0be1208e5a6124a0b49ada5b73549885c99558d3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..67da7d35ccea8ad26bd471b16e9400071a821cc0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py @@ -0,0 +1,704 @@ +import typing +import types +import inspect +import functools +from . import _uarray +import copyreg +import pickle +import contextlib + +from ._uarray import ( # type: ignore + BackendNotImplementedError, + _Function, + _SkipBackendContext, + _SetBackendContext, + _BackendState, +) + +__all__ = [ + "set_backend", + "set_global_backend", + "skip_backend", + "register_backend", + "determine_backend", + "determine_backend_multi", + "clear_backends", + "create_multimethod", + "generate_multimethod", + "_Function", + "BackendNotImplementedError", + "Dispatchable", + "wrap_single_convertor", + "wrap_single_convertor_instance", + "all_of_type", + "mark_as", + "set_state", + "get_state", + "reset_state", + "_BackendState", + "_SkipBackendContext", + "_SetBackendContext", +] + +ArgumentExtractorType = typing.Callable[..., tuple["Dispatchable", ...]] +ArgumentReplacerType = typing.Callable[ + [tuple, dict, tuple], tuple[tuple, dict] +] + +def unpickle_function(mod_name, qname, self_): + import importlib + + try: + module = importlib.import_module(mod_name) + qname = qname.split(".") + func = module + for q in qname: + func = getattr(func, q) + + if self_ is not None: + func = types.MethodType(func, self_) + + return func + except (ImportError, AttributeError) as e: + from pickle import UnpicklingError + + raise UnpicklingError from e + + +def pickle_function(func): + mod_name = getattr(func, "__module__", None) + qname = getattr(func, "__qualname__", None) + self_ = getattr(func, "__self__", None) + + try: + test = unpickle_function(mod_name, qname, self_) + except pickle.UnpicklingError: + test = None + + if test is not func: + raise pickle.PicklingError( + f"Can't pickle {func}: it's not the same object as {test}" + ) + + return unpickle_function, (mod_name, qname, self_) + + +def pickle_state(state): + return _uarray._BackendState._unpickle, state._pickle() + + +def pickle_set_backend_context(ctx): + return _SetBackendContext, ctx._pickle() + + +def pickle_skip_backend_context(ctx): + return _SkipBackendContext, ctx._pickle() + + +copyreg.pickle(_Function, pickle_function) +copyreg.pickle(_uarray._BackendState, pickle_state) +copyreg.pickle(_SetBackendContext, pickle_set_backend_context) +copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context) + + +def get_state(): + """ + Returns an opaque object containing the current state of all the backends. + + Can be used for synchronization between threads/processes. + + See Also + -------- + set_state + Sets the state returned by this function. + """ + return _uarray.get_state() + + +@contextlib.contextmanager +def reset_state(): + """ + Returns a context manager that resets all state once exited. + + See Also + -------- + set_state + Context manager that sets the backend state. + get_state + Gets a state to be set by this context manager. + """ + with set_state(get_state()): + yield + + +@contextlib.contextmanager +def set_state(state): + """ + A context manager that sets the state of the backends to one returned by :obj:`get_state`. + + See Also + -------- + get_state + Gets a state to be set by this context manager. + """ # noqa: E501 + old_state = get_state() + _uarray.set_state(state) + try: + yield + finally: + _uarray.set_state(old_state, True) + + +def create_multimethod(*args, **kwargs): + """ + Creates a decorator for generating multimethods. + + This function creates a decorator that can be used with an argument + extractor in order to generate a multimethod. Other than for the + argument extractor, all arguments are passed on to + :obj:`generate_multimethod`. + + See Also + -------- + generate_multimethod + Generates a multimethod. + """ + + def wrapper(a): + return generate_multimethod(a, *args, **kwargs) + + return wrapper + + +def generate_multimethod( + argument_extractor: ArgumentExtractorType, + argument_replacer: ArgumentReplacerType, + domain: str, + default: typing.Optional[typing.Callable] = None, +): + """ + Generates a multimethod. + + Parameters + ---------- + argument_extractor : ArgumentExtractorType + A callable which extracts the dispatchable arguments. Extracted arguments + should be marked by the :obj:`Dispatchable` class. It has the same signature + as the desired multimethod. + argument_replacer : ArgumentReplacerType + A callable with the signature (args, kwargs, dispatchables), which should also + return an (args, kwargs) pair with the dispatchables replaced inside the + args/kwargs. + domain : str + A string value indicating the domain of this multimethod. + default: Optional[Callable], optional + The default implementation of this multimethod, where ``None`` (the default) + specifies there is no default implementation. + + Examples + -------- + In this example, ``a`` is to be dispatched over, so we return it, while marking it + as an ``int``. + The trailing comma is needed because the args have to be returned as an iterable. + + >>> def override_me(a, b): + ... return Dispatchable(a, int), + + Next, we define the argument replacer that replaces the dispatchables inside + args/kwargs with the supplied ones. + + >>> def override_replacer(args, kwargs, dispatchables): + ... return (dispatchables[0], args[1]), {} + + Next, we define the multimethod. + + >>> overridden_me = generate_multimethod( + ... override_me, override_replacer, "ua_examples" + ... ) + + Notice that there's no default implementation, unless you supply one. + + >>> overridden_me(1, "a") + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + >>> overridden_me2 = generate_multimethod( + ... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y) + ... ) + >>> overridden_me2(1, "a") + (1, 'a') + + See Also + -------- + uarray + See the module documentation for how to override the method by creating + backends. + """ + kw_defaults, arg_defaults, opts = get_defaults(argument_extractor) + ua_func = _Function( + argument_extractor, + argument_replacer, + domain, + arg_defaults, + kw_defaults, + default, + ) + + return functools.update_wrapper(ua_func, argument_extractor) + + +def set_backend(backend, coerce=False, only=False): + """ + A context manager that sets the preferred backend. + + Parameters + ---------- + backend + The backend to set. + coerce + Whether or not to coerce to a specific backend's types. Implies ``only``. + only + Whether or not this should be the last backend to try. + + See Also + -------- + skip_backend: A context manager that allows skipping of backends. + set_global_backend: Set a single, global backend for a domain. + """ + try: + return backend.__ua_cache__["set", coerce, only] + except AttributeError: + backend.__ua_cache__ = {} + except KeyError: + pass + + ctx = _SetBackendContext(backend, coerce, only) + backend.__ua_cache__["set", coerce, only] = ctx + return ctx + + +def skip_backend(backend): + """ + A context manager that allows one to skip a given backend from processing + entirely. This allows one to use another backend's code in a library that + is also a consumer of the same backend. + + Parameters + ---------- + backend + The backend to skip. + + See Also + -------- + set_backend: A context manager that allows setting of backends. + set_global_backend: Set a single, global backend for a domain. + """ + try: + return backend.__ua_cache__["skip"] + except AttributeError: + backend.__ua_cache__ = {} + except KeyError: + pass + + ctx = _SkipBackendContext(backend) + backend.__ua_cache__["skip"] = ctx + return ctx + + +def get_defaults(f): + sig = inspect.signature(f) + kw_defaults = {} + arg_defaults = [] + opts = set() + for k, v in sig.parameters.items(): + if v.default is not inspect.Parameter.empty: + kw_defaults[k] = v.default + if v.kind in ( + inspect.Parameter.POSITIONAL_ONLY, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + ): + arg_defaults.append(v.default) + opts.add(k) + + return kw_defaults, tuple(arg_defaults), opts + + +def set_global_backend(backend, coerce=False, only=False, *, try_last=False): + """ + This utility method replaces the default backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. This will be the first tried + backend outside the :obj:`set_backend` context manager. + + Note that this method is not thread-safe. + + .. warning:: + We caution library authors against using this function in + their code. We do *not* support this use-case. This function + is meant to be used only by users themselves, or by a reference + implementation, if one exists. + + Parameters + ---------- + backend + The backend to register. + coerce : bool + Whether to coerce input types when trying this backend. + only : bool + If ``True``, no more backends will be tried if this fails. + Implied by ``coerce=True``. + try_last : bool + If ``True``, the global backend is tried after registered backends. + + See Also + -------- + set_backend: A context manager that allows setting of backends. + skip_backend: A context manager that allows skipping of backends. + """ + _uarray.set_global_backend(backend, coerce, only, try_last) + + +def register_backend(backend): + """ + This utility method sets registers backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. + + Note that this method is not thread-safe. + + Parameters + ---------- + backend + The backend to register. + """ + _uarray.register_backend(backend) + + +def clear_backends(domain, registered=True, globals=False): + """ + This utility method clears registered backends. + + .. warning:: + We caution library authors against using this function in + their code. We do *not* support this use-case. This function + is meant to be used only by users themselves. + + .. warning:: + Do NOT use this method inside a multimethod call, or the + program is likely to crash. + + Parameters + ---------- + domain : Optional[str] + The domain for which to de-register backends. ``None`` means + de-register for all domains. + registered : bool + Whether or not to clear registered backends. See :obj:`register_backend`. + globals : bool + Whether or not to clear global backends. See :obj:`set_global_backend`. + + See Also + -------- + register_backend : Register a backend globally. + set_global_backend : Set a global backend. + """ + _uarray.clear_backends(domain, registered, globals) + + +class Dispatchable: + """ + A utility class which marks an argument with a specific dispatch type. + + + Attributes + ---------- + value + The value of the Dispatchable. + + type + The type of the Dispatchable. + + Examples + -------- + >>> x = Dispatchable(1, str) + >>> x + , value=1> + + See Also + -------- + all_of_type + Marks all unmarked parameters of a function. + + mark_as + Allows one to create a utility function to mark as a given type. + """ + + def __init__(self, value, dispatch_type, coercible=True): + self.value = value + self.type = dispatch_type + self.coercible = coercible + + def __getitem__(self, index): + return (self.type, self.value)[index] + + def __str__(self): + return f"<{type(self).__name__}: type={self.type!r}, value={self.value!r}>" + + __repr__ = __str__ + + +def mark_as(dispatch_type): + """ + Creates a utility function to mark something as a specific type. + + Examples + -------- + >>> mark_int = mark_as(int) + >>> mark_int(1) + , value=1> + """ + return functools.partial(Dispatchable, dispatch_type=dispatch_type) + + +def all_of_type(arg_type): + """ + Marks all unmarked arguments as a given type. + + Examples + -------- + >>> @all_of_type(str) + ... def f(a, b): + ... return a, Dispatchable(b, int) + >>> f('a', 1) + (, value='a'>, + , value=1>) + """ + + def outer(func): + @functools.wraps(func) + def inner(*args, **kwargs): + extracted_args = func(*args, **kwargs) + return tuple( + Dispatchable(arg, arg_type) + if not isinstance(arg, Dispatchable) + else arg + for arg in extracted_args + ) + + return inner + + return outer + + +def wrap_single_convertor(convert_single): + """ + Wraps a ``__ua_convert__`` defined for a single element to all elements. + If any of them return ``NotImplemented``, the operation is assumed to be + undefined. + + Accepts a signature of (value, type, coerce). + """ + + @functools.wraps(convert_single) + def __ua_convert__(dispatchables, coerce): + converted = [] + for d in dispatchables: + c = convert_single(d.value, d.type, coerce and d.coercible) + + if c is NotImplemented: + return NotImplemented + + converted.append(c) + + return converted + + return __ua_convert__ + + +def wrap_single_convertor_instance(convert_single): + """ + Wraps a ``__ua_convert__`` defined for a single element to all elements. + If any of them return ``NotImplemented``, the operation is assumed to be + undefined. + + Accepts a signature of (value, type, coerce). + """ + + @functools.wraps(convert_single) + def __ua_convert__(self, dispatchables, coerce): + converted = [] + for d in dispatchables: + c = convert_single(self, d.value, d.type, coerce and d.coercible) + + if c is NotImplemented: + return NotImplemented + + converted.append(c) + + return converted + + return __ua_convert__ + + +def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False): + """Set the backend to the first active backend that supports ``value`` + + This is useful for functions that call multimethods without any dispatchable + arguments. You can use :func:`determine_backend` to ensure the same backend + is used everywhere in a block of multimethod calls. + + Parameters + ---------- + value + The value being tested + dispatch_type + The dispatch type associated with ``value``, aka + ":ref:`marking `". + domain: string + The domain to query for backends and set. + coerce: bool + Whether or not to allow coercion to the backend's types. Implies ``only``. + only: bool + Whether or not this should be the last backend to try. + + See Also + -------- + set_backend: For when you know which backend to set + + Notes + ----- + + Support is determined by the ``__ua_convert__`` protocol. Backends not + supporting the type must return ``NotImplemented`` from their + ``__ua_convert__`` if they don't support input of that type. + + Examples + -------- + + Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting + different types, ``TypeA`` and ``TypeB``. Neither supporting the other type: + + >>> with ua.set_backend(ex.BackendA): + ... ex.call_multimethod(ex.TypeB(), ex.TypeB()) + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + Now consider a multimethod that creates a new object of ``TypeA``, or + ``TypeB`` depending on the active backend. + + >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, ex.TypeA()) + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + ``res`` is an object of ``TypeB`` because ``BackendB`` is set in the + innermost with statement. So, ``call_multimethod`` fails since the types + don't match. + + Instead, we need to first find a backend suitable for all of our objects. + + >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB): + ... x = ex.TypeA() + ... with ua.determine_backend(x, "mark", domain="ua_examples"): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, x) + TypeA + + """ + dispatchables = (Dispatchable(value, dispatch_type, coerce),) + backend = _uarray.determine_backend(domain, dispatchables, coerce) + + return set_backend(backend, coerce=coerce, only=only) + + +def determine_backend_multi( + dispatchables, *, domain, only=True, coerce=False, **kwargs +): + """Set a backend supporting all ``dispatchables`` + + This is useful for functions that call multimethods without any dispatchable + arguments. You can use :func:`determine_backend_multi` to ensure the same + backend is used everywhere in a block of multimethod calls involving + multiple arrays. + + Parameters + ---------- + dispatchables: Sequence[Union[uarray.Dispatchable, Any]] + The dispatchables that must be supported + domain: string + The domain to query for backends and set. + coerce: bool + Whether or not to allow coercion to the backend's types. Implies ``only``. + only: bool + Whether or not this should be the last backend to try. + dispatch_type: Optional[Any] + The default dispatch type associated with ``dispatchables``, aka + ":ref:`marking `". + + See Also + -------- + determine_backend: For a single dispatch value + set_backend: For when you know which backend to set + + Notes + ----- + + Support is determined by the ``__ua_convert__`` protocol. Backends not + supporting the type must return ``NotImplemented`` from their + ``__ua_convert__`` if they don't support input of that type. + + Examples + -------- + + :func:`determine_backend` allows the backend to be set from a single + object. :func:`determine_backend_multi` allows multiple objects to be + checked simultaneously for support in the backend. Suppose we have a + ``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call, + and a ``BackendBC`` that doesn't support ``TypeA``. + + >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): + ... a, b = ex.TypeA(), ex.TypeB() + ... with ua.determine_backend_multi( + ... [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")], + ... domain="ua_examples" + ... ): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, a, b) + TypeA + + This won't call ``BackendBC`` because it doesn't support ``TypeA``. + + We can also use leave out the ``ua.Dispatchable`` if we specify the + default ``dispatch_type`` for the ``dispatchables`` argument. + + >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): + ... a, b = ex.TypeA(), ex.TypeB() + ... with ua.determine_backend_multi( + ... [a, b], dispatch_type="mark", domain="ua_examples" + ... ): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, a, b) + TypeA + + """ + if "dispatch_type" in kwargs: + disp_type = kwargs.pop("dispatch_type") + dispatchables = tuple( + d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type) + for d in dispatchables + ) + else: + dispatchables = tuple(dispatchables) + if not all(isinstance(d, Dispatchable) for d in dispatchables): + raise TypeError("dispatchables must be instances of uarray.Dispatchable") + + if len(kwargs) != 0: + raise TypeError(f"Received unexpected keyword arguments: {kwargs}") + + backend = _uarray.determine_backend(domain, dispatchables, coerce) + + return set_backend(backend, coerce=coerce, only=only) diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd1b663d012d1e1113e71c29a81fc12732d2d50d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8e26ee42906a5466444056366760a4712ce681e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..763f2ca3174a75fbdfe9075c72e32300c399c5e3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b28e5f25b653ce219d0486422b9a2e32388472c0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d6ec7bc58c7f29591ef636807e0a2a724d75638 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2505b9c407a4d275a838dbbf722e5e4d73aaf8db Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ece1e3522ef3109f0772ef71d2718aba2df4b6fc Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/__init__.py b/parrot/lib/python3.10/site-packages/scipy/stats/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..180f50a1756f71b579f083a726bfc81195c4da43 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/__init__.py @@ -0,0 +1,649 @@ +""" +.. _statsrefmanual: + +========================================== +Statistical functions (:mod:`scipy.stats`) +========================================== + +.. currentmodule:: scipy.stats + +This module contains a large number of probability distributions, +summary and frequency statistics, correlation functions and statistical +tests, masked statistics, kernel density estimation, quasi-Monte Carlo +functionality, and more. + +Statistics is a very large area, and there are topics that are out of scope +for SciPy and are covered by other packages. Some of the most important ones +are: + +- `statsmodels `__: + regression, linear models, time series analysis, extensions to topics + also covered by ``scipy.stats``. +- `Pandas `__: tabular data, time series + functionality, interfaces to other statistical languages. +- `PyMC `__: Bayesian statistical + modeling, probabilistic machine learning. +- `scikit-learn `__: classification, regression, + model selection. +- `Seaborn `__: statistical data visualization. +- `rpy2 `__: Python to R bridge. + + +Probability distributions +========================= + +Each univariate distribution is an instance of a subclass of `rv_continuous` +(`rv_discrete` for discrete distributions): + +.. autosummary:: + :toctree: generated/ + + rv_continuous + rv_discrete + rv_histogram + +Continuous distributions +------------------------ + +.. autosummary:: + :toctree: generated/ + + alpha -- Alpha + anglit -- Anglit + arcsine -- Arcsine + argus -- Argus + beta -- Beta + betaprime -- Beta Prime + bradford -- Bradford + burr -- Burr (Type III) + burr12 -- Burr (Type XII) + cauchy -- Cauchy + chi -- Chi + chi2 -- Chi-squared + cosine -- Cosine + crystalball -- Crystalball + dgamma -- Double Gamma + dweibull -- Double Weibull + erlang -- Erlang + expon -- Exponential + exponnorm -- Exponentially Modified Normal + exponweib -- Exponentiated Weibull + exponpow -- Exponential Power + f -- F (Snecdor F) + fatiguelife -- Fatigue Life (Birnbaum-Saunders) + fisk -- Fisk + foldcauchy -- Folded Cauchy + foldnorm -- Folded Normal + genlogistic -- Generalized Logistic + gennorm -- Generalized normal + genpareto -- Generalized Pareto + genexpon -- Generalized Exponential + genextreme -- Generalized Extreme Value + gausshyper -- Gauss Hypergeometric + gamma -- Gamma + gengamma -- Generalized gamma + genhalflogistic -- Generalized Half Logistic + genhyperbolic -- Generalized Hyperbolic + geninvgauss -- Generalized Inverse Gaussian + gibrat -- Gibrat + gompertz -- Gompertz (Truncated Gumbel) + gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I + gumbel_l -- Left Sided Gumbel, etc. + halfcauchy -- Half Cauchy + halflogistic -- Half Logistic + halfnorm -- Half Normal + halfgennorm -- Generalized Half Normal + hypsecant -- Hyperbolic Secant + invgamma -- Inverse Gamma + invgauss -- Inverse Gaussian + invweibull -- Inverse Weibull + irwinhall -- Irwin-Hall + jf_skew_t -- Jones and Faddy Skew-T + johnsonsb -- Johnson SB + johnsonsu -- Johnson SU + kappa4 -- Kappa 4 parameter + kappa3 -- Kappa 3 parameter + ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic + kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic + kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic. + laplace -- Laplace + laplace_asymmetric -- Asymmetric Laplace + levy -- Levy + levy_l + levy_stable + logistic -- Logistic + loggamma -- Log-Gamma + loglaplace -- Log-Laplace (Log Double Exponential) + lognorm -- Log-Normal + loguniform -- Log-Uniform + lomax -- Lomax (Pareto of the second kind) + maxwell -- Maxwell + mielke -- Mielke's Beta-Kappa + moyal -- Moyal + nakagami -- Nakagami + ncx2 -- Non-central chi-squared + ncf -- Non-central F + nct -- Non-central Student's T + norm -- Normal (Gaussian) + norminvgauss -- Normal Inverse Gaussian + pareto -- Pareto + pearson3 -- Pearson type III + powerlaw -- Power-function + powerlognorm -- Power log normal + powernorm -- Power normal + rdist -- R-distribution + rayleigh -- Rayleigh + rel_breitwigner -- Relativistic Breit-Wigner + rice -- Rice + recipinvgauss -- Reciprocal Inverse Gaussian + semicircular -- Semicircular + skewcauchy -- Skew Cauchy + skewnorm -- Skew normal + studentized_range -- Studentized Range + t -- Student's T + trapezoid -- Trapezoidal + triang -- Triangular + truncexpon -- Truncated Exponential + truncnorm -- Truncated Normal + truncpareto -- Truncated Pareto + truncweibull_min -- Truncated minimum Weibull distribution + tukeylambda -- Tukey-Lambda + uniform -- Uniform + vonmises -- Von-Mises (Circular) + vonmises_line -- Von-Mises (Line) + wald -- Wald + weibull_min -- Minimum Weibull (see Frechet) + weibull_max -- Maximum Weibull (see Frechet) + wrapcauchy -- Wrapped Cauchy + +The ``fit`` method of the univariate continuous distributions uses +maximum likelihood estimation to fit the distribution to a data set. +The ``fit`` method can accept regular data or *censored data*. +Censored data is represented with instances of the `CensoredData` +class. + +.. autosummary:: + :toctree: generated/ + + CensoredData + + +Multivariate distributions +-------------------------- + +.. autosummary:: + :toctree: generated/ + + multivariate_normal -- Multivariate normal distribution + matrix_normal -- Matrix normal distribution + dirichlet -- Dirichlet + dirichlet_multinomial -- Dirichlet multinomial distribution + wishart -- Wishart + invwishart -- Inverse Wishart + multinomial -- Multinomial distribution + special_ortho_group -- SO(N) group + ortho_group -- O(N) group + unitary_group -- U(N) group + random_correlation -- random correlation matrices + multivariate_t -- Multivariate t-distribution + multivariate_hypergeom -- Multivariate hypergeometric distribution + random_table -- Distribution of random tables with given marginals + uniform_direction -- Uniform distribution on S(N-1) + vonmises_fisher -- Von Mises-Fisher distribution + +`scipy.stats.multivariate_normal` methods accept instances +of the following class to represent the covariance. + +.. autosummary:: + :toctree: generated/ + + Covariance -- Representation of a covariance matrix + + +Discrete distributions +---------------------- + +.. autosummary:: + :toctree: generated/ + + bernoulli -- Bernoulli + betabinom -- Beta-Binomial + betanbinom -- Beta-Negative Binomial + binom -- Binomial + boltzmann -- Boltzmann (Truncated Discrete Exponential) + dlaplace -- Discrete Laplacian + geom -- Geometric + hypergeom -- Hypergeometric + logser -- Logarithmic (Log-Series, Series) + nbinom -- Negative Binomial + nchypergeom_fisher -- Fisher's Noncentral Hypergeometric + nchypergeom_wallenius -- Wallenius's Noncentral Hypergeometric + nhypergeom -- Negative Hypergeometric + planck -- Planck (Discrete Exponential) + poisson -- Poisson + randint -- Discrete Uniform + skellam -- Skellam + yulesimon -- Yule-Simon + zipf -- Zipf (Zeta) + zipfian -- Zipfian + + +An overview of statistical functions is given below. Many of these functions +have a similar version in `scipy.stats.mstats` which work for masked arrays. + +Summary statistics +================== + +.. autosummary:: + :toctree: generated/ + + describe -- Descriptive statistics + gmean -- Geometric mean + hmean -- Harmonic mean + pmean -- Power mean + kurtosis -- Fisher or Pearson kurtosis + mode -- Modal value + moment -- Central moment + expectile -- Expectile + skew -- Skewness + kstat -- + kstatvar -- + tmean -- Truncated arithmetic mean + tvar -- Truncated variance + tmin -- + tmax -- + tstd -- + tsem -- + variation -- Coefficient of variation + find_repeats + rankdata + tiecorrect + trim_mean + gstd -- Geometric Standard Deviation + iqr + sem + bayes_mvs + mvsdist + entropy + differential_entropy + median_abs_deviation + +Frequency statistics +==================== + +.. autosummary:: + :toctree: generated/ + + cumfreq + percentileofscore + scoreatpercentile + relfreq + +.. autosummary:: + :toctree: generated/ + + binned_statistic -- Compute a binned statistic for a set of data. + binned_statistic_2d -- Compute a 2-D binned statistic for a set of data. + binned_statistic_dd -- Compute a d-D binned statistic for a set of data. + +.. _hypotests: + +Hypothesis Tests and related functions +====================================== +SciPy has many functions for performing hypothesis tests that return a +test statistic and a p-value, and several of them return confidence intervals +and/or other related information. + +The headings below are based on common uses of the functions within, but due to +the wide variety of statistical procedures, any attempt at coarse-grained +categorization will be imperfect. Also, note that tests within the same heading +are not interchangeable in general (e.g. many have different distributional +assumptions). + +One Sample Tests / Paired Sample Tests +-------------------------------------- +One sample tests are typically used to assess whether a single sample was +drawn from a specified distribution or a distribution with specified properties +(e.g. zero mean). + +.. autosummary:: + :toctree: generated/ + + ttest_1samp + binomtest + quantile_test + skewtest + kurtosistest + normaltest + jarque_bera + shapiro + anderson + cramervonmises + ks_1samp + goodness_of_fit + chisquare + power_divergence + +Paired sample tests are often used to assess whether two samples were drawn +from the same distribution; they differ from the independent sample tests below +in that each observation in one sample is treated as paired with a +closely-related observation in the other sample (e.g. when environmental +factors are controlled between observations within a pair but not among pairs). +They can also be interpreted or used as one-sample tests (e.g. tests on the +mean or median of *differences* between paired observations). + +.. autosummary:: + :toctree: generated/ + + ttest_rel + wilcoxon + +Association/Correlation Tests +----------------------------- + +These tests are often used to assess whether there is a relationship (e.g. +linear) between paired observations in multiple samples or among the +coordinates of multivariate observations. + +.. autosummary:: + :toctree: generated/ + + linregress + pearsonr + spearmanr + pointbiserialr + kendalltau + weightedtau + somersd + siegelslopes + theilslopes + page_trend_test + multiscale_graphcorr + +These association tests and are to work with samples in the form of contingency +tables. Supporting functions are available in `scipy.stats.contingency`. + +.. autosummary:: + :toctree: generated/ + + chi2_contingency + fisher_exact + barnard_exact + boschloo_exact + +Independent Sample Tests +------------------------ +Independent sample tests are typically used to assess whether multiple samples +were independently drawn from the same distribution or different distributions +with a shared property (e.g. equal means). + +Some tests are specifically for comparing two samples. + +.. autosummary:: + :toctree: generated/ + + ttest_ind_from_stats + poisson_means_test + ttest_ind + mannwhitneyu + bws_test + ranksums + brunnermunzel + mood + ansari + cramervonmises_2samp + epps_singleton_2samp + ks_2samp + kstest + +Others are generalized to multiple samples. + +.. autosummary:: + :toctree: generated/ + + f_oneway + tukey_hsd + dunnett + kruskal + alexandergovern + fligner + levene + bartlett + median_test + friedmanchisquare + anderson_ksamp + +Resampling and Monte Carlo Methods +---------------------------------- +The following functions can reproduce the p-value and confidence interval +results of most of the functions above, and often produce accurate results in a +wider variety of conditions. They can also be used to perform hypothesis tests +and generate confidence intervals for custom statistics. This flexibility comes +at the cost of greater computational requirements and stochastic results. + +.. autosummary:: + :toctree: generated/ + + monte_carlo_test + permutation_test + bootstrap + power + +Instances of the following object can be passed into some hypothesis test +functions to perform a resampling or Monte Carlo version of the hypothesis +test. + +.. autosummary:: + :toctree: generated/ + + MonteCarloMethod + PermutationMethod + BootstrapMethod + +Multiple Hypothesis Testing and Meta-Analysis +--------------------------------------------- +These functions are for assessing the results of individual tests as a whole. +Functions for performing specific multiple hypothesis tests (e.g. post hoc +tests) are listed above. + +.. autosummary:: + :toctree: generated/ + + combine_pvalues + false_discovery_control + + +The following functions are related to the tests above but do not belong in the +above categories. + +Quasi-Monte Carlo +================= + +.. toctree:: + :maxdepth: 4 + + stats.qmc + +Contingency Tables +================== + +.. toctree:: + :maxdepth: 4 + + stats.contingency + +Masked statistics functions +=========================== + +.. toctree:: + + stats.mstats + + +Other statistical functionality +=============================== + +Transformations +--------------- + +.. autosummary:: + :toctree: generated/ + + boxcox + boxcox_normmax + boxcox_llf + yeojohnson + yeojohnson_normmax + yeojohnson_llf + obrientransform + sigmaclip + trimboth + trim1 + zmap + zscore + gzscore + +Statistical distances +--------------------- + +.. autosummary:: + :toctree: generated/ + + wasserstein_distance + wasserstein_distance_nd + energy_distance + +Sampling +-------- + +.. toctree:: + :maxdepth: 4 + + stats.sampling + +Random variate generation / CDF Inversion +----------------------------------------- + +.. autosummary:: + :toctree: generated/ + + rvs_ratio_uniforms + +Fitting / Survival Analysis +--------------------------- + +.. autosummary:: + :toctree: generated/ + + fit + ecdf + logrank + +Directional statistical functions +--------------------------------- + +.. autosummary:: + :toctree: generated/ + + directional_stats + circmean + circvar + circstd + +Sensitivity Analysis +-------------------- + +.. autosummary:: + :toctree: generated/ + + sobol_indices + +Plot-tests +---------- + +.. autosummary:: + :toctree: generated/ + + ppcc_max + ppcc_plot + probplot + boxcox_normplot + yeojohnson_normplot + +Univariate and multivariate kernel density estimation +----------------------------------------------------- + +.. autosummary:: + :toctree: generated/ + + gaussian_kde + +Warnings / Errors used in :mod:`scipy.stats` +-------------------------------------------- + +.. autosummary:: + :toctree: generated/ + + DegenerateDataWarning + ConstantInputWarning + NearConstantInputWarning + FitError + +Result classes used in :mod:`scipy.stats` +----------------------------------------- + +.. warning:: + + These classes are private, but they are included here because instances + of them are returned by other statistical functions. User import and + instantiation is not supported. + +.. toctree:: + :maxdepth: 2 + + stats._result_classes + +""" # noqa: E501 + +from ._warnings_errors import (ConstantInputWarning, NearConstantInputWarning, + DegenerateDataWarning, FitError) +from ._stats_py import * +from ._variation import variation +from .distributions import * +from ._morestats import * +from ._multicomp import * +from ._binomtest import binomtest +from ._binned_statistic import * +from ._kde import gaussian_kde +from . import mstats +from . import qmc +from ._multivariate import * +from . import contingency +from .contingency import chi2_contingency +from ._censored_data import CensoredData +from ._resampling import (bootstrap, monte_carlo_test, permutation_test, power, + MonteCarloMethod, PermutationMethod, BootstrapMethod) +from ._entropy import * +from ._hypotests import * +from ._rvs_sampling import rvs_ratio_uniforms +from ._page_trend_test import page_trend_test +from ._mannwhitneyu import mannwhitneyu +from ._bws_test import bws_test +from ._fit import fit, goodness_of_fit +from ._covariance import Covariance +from ._sensitivity_analysis import * +from ._survival import * +from ._mgc import multiscale_graphcorr + + +# Deprecated namespaces, to be removed in v2.0.0 +from . import ( + biasedurn, kde, morestats, mstats_basic, mstats_extras, mvn, stats +) + + +__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders. + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_axis_nan_policy.py b/parrot/lib/python3.10/site-packages/scipy/stats/_axis_nan_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..f6427c38eb636910f2e28e2aed1bd9d7ad71d36e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_axis_nan_policy.py @@ -0,0 +1,686 @@ +# Many scipy.stats functions support `axis` and `nan_policy` parameters. +# When the two are combined, it can be tricky to get all the behavior just +# right. This file contains utility functions useful for scipy.stats functions +# that support `axis` and `nan_policy`, including a decorator that +# automatically adds `axis` and `nan_policy` arguments to a function. + +import warnings +import numpy as np +from functools import wraps +from scipy._lib._docscrape import FunctionDoc, Parameter +from scipy._lib._util import _contains_nan, AxisError, _get_nan +from scipy._lib._array_api import array_namespace, is_numpy + +import inspect + +too_small_1d_not_omit = ( + "One or more sample arguments is too small; all " + "returned values will be NaN. " + "See documentation for sample size requirements.") + +too_small_1d_omit = ( + "After omitting NaNs, one or more sample arguments " + "is too small; all returned values will be NaN. " + "See documentation for sample size requirements.") + +too_small_nd_not_omit = ( + "All axis-slices of one or more sample arguments are " + "too small; all elements of returned arrays will be NaN. " + "See documentation for sample size requirements.") + +too_small_nd_omit = ( + "After omitting NaNs, one or more axis-slices of one " + "or more sample arguments is too small; corresponding " + "elements of returned arrays will be NaN. " + "See documentation for sample size requirements.") + +class SmallSampleWarning(RuntimeWarning): + pass + + +def _broadcast_arrays(arrays, axis=None, xp=None): + """ + Broadcast shapes of arrays, ignoring incompatibility of specified axes + """ + if not arrays: + return arrays + xp = array_namespace(*arrays) if xp is None else xp + arrays = [xp.asarray(arr) for arr in arrays] + shapes = [arr.shape for arr in arrays] + new_shapes = _broadcast_shapes(shapes, axis) + if axis is None: + new_shapes = [new_shapes]*len(arrays) + return [xp.broadcast_to(array, new_shape) + for array, new_shape in zip(arrays, new_shapes)] + + +def _broadcast_shapes(shapes, axis=None): + """ + Broadcast shapes, ignoring incompatibility of specified axes + """ + if not shapes: + return shapes + + # input validation + if axis is not None: + axis = np.atleast_1d(axis) + axis_int = axis.astype(int) + if not np.array_equal(axis_int, axis): + raise AxisError('`axis` must be an integer, a ' + 'tuple of integers, or `None`.') + axis = axis_int + + # First, ensure all shapes have same number of dimensions by prepending 1s. + n_dims = max([len(shape) for shape in shapes]) + new_shapes = np.ones((len(shapes), n_dims), dtype=int) + for row, shape in zip(new_shapes, shapes): + row[len(row)-len(shape):] = shape # can't use negative indices (-0:) + + # Remove the shape elements of the axes to be ignored, but remember them. + if axis is not None: + axis[axis < 0] = n_dims + axis[axis < 0] + axis = np.sort(axis) + if axis[-1] >= n_dims or axis[0] < 0: + message = (f"`axis` is out of bounds " + f"for array of dimension {n_dims}") + raise AxisError(message) + + if len(np.unique(axis)) != len(axis): + raise AxisError("`axis` must contain only distinct elements") + + removed_shapes = new_shapes[:, axis] + new_shapes = np.delete(new_shapes, axis, axis=1) + + # If arrays are broadcastable, shape elements that are 1 may be replaced + # with a corresponding non-1 shape element. Assuming arrays are + # broadcastable, that final shape element can be found with: + new_shape = np.max(new_shapes, axis=0) + # except in case of an empty array: + new_shape *= new_shapes.all(axis=0) + + # Among all arrays, there can only be one unique non-1 shape element. + # Therefore, if any non-1 shape element does not match what we found + # above, the arrays must not be broadcastable after all. + if np.any(~((new_shapes == 1) | (new_shapes == new_shape))): + raise ValueError("Array shapes are incompatible for broadcasting.") + + if axis is not None: + # Add back the shape elements that were ignored + new_axis = axis - np.arange(len(axis)) + new_shapes = [tuple(np.insert(new_shape, new_axis, removed_shape)) + for removed_shape in removed_shapes] + return new_shapes + else: + return tuple(new_shape) + + +def _broadcast_array_shapes_remove_axis(arrays, axis=None): + """ + Broadcast shapes of arrays, dropping specified axes + + Given a sequence of arrays `arrays` and an integer or tuple `axis`, find + the shape of the broadcast result after consuming/dropping `axis`. + In other words, return output shape of a typical hypothesis test on + `arrays` vectorized along `axis`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats._axis_nan_policy import _broadcast_array_shapes_remove_axis + >>> a = np.zeros((5, 2, 1)) + >>> b = np.zeros((9, 3)) + >>> _broadcast_array_shapes_remove_axis((a, b), 1) + (5, 3) + """ + # Note that here, `axis=None` means do not consume/drop any axes - _not_ + # ravel arrays before broadcasting. + shapes = [arr.shape for arr in arrays] + return _broadcast_shapes_remove_axis(shapes, axis) + + +def _broadcast_shapes_remove_axis(shapes, axis=None): + """ + Broadcast shapes, dropping specified axes + + Same as _broadcast_array_shapes_remove_axis, but given a sequence + of array shapes `shapes` instead of the arrays themselves. + """ + shapes = _broadcast_shapes(shapes, axis) + shape = shapes[0] + if axis is not None: + shape = np.delete(shape, axis) + return tuple(shape) + + +def _broadcast_concatenate(arrays, axis, paired=False): + """Concatenate arrays along an axis with broadcasting.""" + arrays = _broadcast_arrays(arrays, axis if not paired else None) + res = np.concatenate(arrays, axis=axis) + return res + + +# TODO: add support for `axis` tuples +def _remove_nans(samples, paired): + "Remove nans from paired or unpaired 1D samples" + # potential optimization: don't copy arrays that don't contain nans + if not paired: + return [sample[~np.isnan(sample)] for sample in samples] + + # for paired samples, we need to remove the whole pair when any part + # has a nan + nans = np.isnan(samples[0]) + for sample in samples[1:]: + nans = nans | np.isnan(sample) + not_nans = ~nans + return [sample[not_nans] for sample in samples] + + +def _remove_sentinel(samples, paired, sentinel): + "Remove sentinel values from paired or unpaired 1D samples" + # could consolidate with `_remove_nans`, but it's not quite as simple as + # passing `sentinel=np.nan` because `(np.nan == np.nan) is False` + + # potential optimization: don't copy arrays that don't contain sentinel + if not paired: + return [sample[sample != sentinel] for sample in samples] + + # for paired samples, we need to remove the whole pair when any part + # has a nan + sentinels = (samples[0] == sentinel) + for sample in samples[1:]: + sentinels = sentinels | (sample == sentinel) + not_sentinels = ~sentinels + return [sample[not_sentinels] for sample in samples] + + +def _masked_arrays_2_sentinel_arrays(samples): + # masked arrays in `samples` are converted to regular arrays, and values + # corresponding with masked elements are replaced with a sentinel value + + # return without modifying arrays if none have a mask + has_mask = False + for sample in samples: + mask = getattr(sample, 'mask', False) + has_mask = has_mask or np.any(mask) + if not has_mask: + return samples, None # None means there is no sentinel value + + # Choose a sentinel value. We can't use `np.nan`, because sentinel (masked) + # values are always omitted, but there are different nan policies. + dtype = np.result_type(*samples) + dtype = dtype if np.issubdtype(dtype, np.number) else np.float64 + for i in range(len(samples)): + # Things get more complicated if the arrays are of different types. + # We could have different sentinel values for each array, but + # the purpose of this code is convenience, not efficiency. + samples[i] = samples[i].astype(dtype, copy=False) + + inexact = np.issubdtype(dtype, np.inexact) + info = np.finfo if inexact else np.iinfo + max_possible, min_possible = info(dtype).max, info(dtype).min + nextafter = np.nextafter if inexact else (lambda x, _: x - 1) + + sentinel = max_possible + # For simplicity, min_possible/np.infs are not candidate sentinel values + while sentinel > min_possible: + for sample in samples: + if np.any(sample == sentinel): # choose a new sentinel value + sentinel = nextafter(sentinel, -np.inf) + break + else: # when sentinel value is OK, break the while loop + break + else: + message = ("This function replaces masked elements with sentinel " + "values, but the data contains all distinct values of this " + "data type. Consider promoting the dtype to `np.float64`.") + raise ValueError(message) + + # replace masked elements with sentinel value + out_samples = [] + for sample in samples: + mask = getattr(sample, 'mask', None) + if mask is not None: # turn all masked arrays into sentinel arrays + mask = np.broadcast_to(mask, sample.shape) + sample = sample.data.copy() if np.any(mask) else sample.data + sample = np.asarray(sample) # `sample.data` could be a memoryview? + sample[mask] = sentinel + out_samples.append(sample) + + return out_samples, sentinel + + +def _check_empty_inputs(samples, axis): + """ + Check for empty sample; return appropriate output for a vectorized hypotest + """ + # if none of the samples are empty, we need to perform the test + if not any(sample.size == 0 for sample in samples): + return None + # otherwise, the statistic and p-value will be either empty arrays or + # arrays with NaNs. Produce the appropriate array and return it. + output_shape = _broadcast_array_shapes_remove_axis(samples, axis) + output = np.ones(output_shape) * _get_nan(*samples) + return output + + +def _add_reduced_axes(res, reduced_axes, keepdims): + """ + Add reduced axes back to all the arrays in the result object + if keepdims = True. + """ + return ([np.expand_dims(output, reduced_axes) + if not isinstance(output, int) else output for output in res] + if keepdims else res) + + +# Standard docstring / signature entries for `axis`, `nan_policy`, `keepdims` +_name = 'axis' +_desc = ( + """If an int, the axis of the input along which to compute the statistic. +The statistic of each axis-slice (e.g. row) of the input will appear in a +corresponding element of the output. +If ``None``, the input will be raveled before computing the statistic.""" + .split('\n')) + + +def _get_axis_params(default_axis=0, _name=_name, _desc=_desc): # bind NOW + _type = f"int or None, default: {default_axis}" + _axis_parameter_doc = Parameter(_name, _type, _desc) + _axis_parameter = inspect.Parameter(_name, + inspect.Parameter.KEYWORD_ONLY, + default=default_axis) + return _axis_parameter_doc, _axis_parameter + + +_name = 'nan_policy' +_type = "{'propagate', 'omit', 'raise'}" +_desc = ( + """Defines how to handle input NaNs. + +- ``propagate``: if a NaN is present in the axis slice (e.g. row) along + which the statistic is computed, the corresponding entry of the output + will be NaN. +- ``omit``: NaNs will be omitted when performing the calculation. + If insufficient data remains in the axis slice along which the + statistic is computed, the corresponding entry of the output will be + NaN. +- ``raise``: if a NaN is present, a ``ValueError`` will be raised.""" + .split('\n')) +_nan_policy_parameter_doc = Parameter(_name, _type, _desc) +_nan_policy_parameter = inspect.Parameter(_name, + inspect.Parameter.KEYWORD_ONLY, + default='propagate') + +_name = 'keepdims' +_type = "bool, default: False" +_desc = ( + """If this is set to True, the axes which are reduced are left +in the result as dimensions with size one. With this option, +the result will broadcast correctly against the input array.""" + .split('\n')) +_keepdims_parameter_doc = Parameter(_name, _type, _desc) +_keepdims_parameter = inspect.Parameter(_name, + inspect.Parameter.KEYWORD_ONLY, + default=False) + +_standard_note_addition = ( + """\nBeginning in SciPy 1.9, ``np.matrix`` inputs (not recommended for new +code) are converted to ``np.ndarray`` before the calculation is performed. In +this case, the output will be a scalar or ``np.ndarray`` of appropriate shape +rather than a 2D ``np.matrix``. Similarly, while masked elements of masked +arrays are ignored, the output will be a scalar or ``np.ndarray`` rather than a +masked array with ``mask=False``.""").split('\n') + + +def _axis_nan_policy_factory(tuple_to_result, default_axis=0, + n_samples=1, paired=False, + result_to_tuple=None, too_small=0, + n_outputs=2, kwd_samples=[], override=None): + """Factory for a wrapper that adds axis/nan_policy params to a function. + + Parameters + ---------- + tuple_to_result : callable + Callable that returns an object of the type returned by the function + being wrapped (e.g. the namedtuple or dataclass returned by a + statistical test) provided the separate components (e.g. statistic, + pvalue). + default_axis : int, default: 0 + The default value of the axis argument. Standard is 0 except when + backwards compatibility demands otherwise (e.g. `None`). + n_samples : int or callable, default: 1 + The number of data samples accepted by the function + (e.g. `mannwhitneyu`), a callable that accepts a dictionary of + parameters passed into the function and returns the number of data + samples (e.g. `wilcoxon`), or `None` to indicate an arbitrary number + of samples (e.g. `kruskal`). + paired : {False, True} + Whether the function being wrapped treats the samples as paired (i.e. + corresponding elements of each sample should be considered as different + components of the same sample.) + result_to_tuple : callable, optional + Function that unpacks the results of the function being wrapped into + a tuple. This is essentially the inverse of `tuple_to_result`. Default + is `None`, which is appropriate for statistical tests that return a + statistic, pvalue tuple (rather than, e.g., a non-iterable datalass). + too_small : int or callable, default: 0 + The largest unnacceptably small sample for the function being wrapped. + For example, some functions require samples of size two or more or they + raise an error. This argument prevents the error from being raised when + input is not 1D and instead places a NaN in the corresponding element + of the result. If callable, it must accept a list of samples, axis, + and a dictionary of keyword arguments passed to the wrapper function as + arguments and return a bool indicating weather the samples passed are + too small. + n_outputs : int or callable, default: 2 + The number of outputs produced by the function given 1d sample(s). For + example, hypothesis tests that return a namedtuple or result object + with attributes ``statistic`` and ``pvalue`` use the default + ``n_outputs=2``; summary statistics with scalar output use + ``n_outputs=1``. Alternatively, may be a callable that accepts a + dictionary of arguments passed into the wrapped function and returns + the number of outputs corresponding with those arguments. + kwd_samples : sequence, default: [] + The names of keyword parameters that should be treated as samples. For + example, `gmean` accepts as its first argument a sample `a` but + also `weights` as a fourth, optional keyword argument. In this case, we + use `n_samples=1` and kwd_samples=['weights']. + override : dict, default: {'vectorization': False, 'nan_propagation': True} + Pass a dictionary with ``'vectorization': True`` to ensure that the + decorator overrides the function's behavior for multimensional input. + Use ``'nan_propagation': False`` to ensure that the decorator does not + override the function's behavior for ``nan_policy='propagate'``. + """ + # Specify which existing behaviors the decorator must override + temp = override or {} + override = {'vectorization': False, + 'nan_propagation': True} + override.update(temp) + + if result_to_tuple is None: + def result_to_tuple(res): + return res + + if not callable(too_small): + def is_too_small(samples, *ts_args, axis=-1, **ts_kwargs): + for sample in samples: + if sample.shape[axis] <= too_small: + return True + return False + else: + is_too_small = too_small + + def axis_nan_policy_decorator(hypotest_fun_in): + @wraps(hypotest_fun_in) + def axis_nan_policy_wrapper(*args, _no_deco=False, **kwds): + + if _no_deco: # for testing, decorator does nothing + return hypotest_fun_in(*args, **kwds) + + # For now, skip the decorator entirely if using array API. In the future, + # we'll probably want to use it for `keepdims`, `axis` tuples, etc. + if len(args) == 0: # extract sample from `kwds` if there are no `args` + used_kwd_samples = list(set(kwds).intersection(set(kwd_samples))) + temp = used_kwd_samples[:1] + else: + temp = args[0] + + if not is_numpy(array_namespace(temp)): + msg = ("Use of `nan_policy` and `keepdims` " + "is incompatible with non-NumPy arrays.") + if 'nan_policy' in kwds or 'keepdims' in kwds: + raise NotImplementedError(msg) + return hypotest_fun_in(*args, **kwds) + + # We need to be flexible about whether position or keyword + # arguments are used, but we need to make sure users don't pass + # both for the same parameter. To complicate matters, some + # functions accept samples with *args, and some functions already + # accept `axis` and `nan_policy` as positional arguments. + # The strategy is to make sure that there is no duplication + # between `args` and `kwds`, combine the two into `kwds`, then + # the samples, `nan_policy`, and `axis` from `kwds`, as they are + # dealt with separately. + + # Check for intersection between positional and keyword args + params = list(inspect.signature(hypotest_fun_in).parameters) + if n_samples is None: + # Give unique names to each positional sample argument + # Note that *args can't be provided as a keyword argument + params = [f"arg{i}" for i in range(len(args))] + params[1:] + + # raise if there are too many positional args + maxarg = (np.inf if inspect.getfullargspec(hypotest_fun_in).varargs + else len(inspect.getfullargspec(hypotest_fun_in).args)) + if len(args) > maxarg: # let the function raise the right error + hypotest_fun_in(*args, **kwds) + + # raise if multiple values passed for same parameter + d_args = dict(zip(params, args)) + intersection = set(d_args) & set(kwds) + if intersection: # let the function raise the right error + hypotest_fun_in(*args, **kwds) + + # Consolidate other positional and keyword args into `kwds` + kwds.update(d_args) + + # rename avoids UnboundLocalError + if callable(n_samples): + # Future refactoring idea: no need for callable n_samples. + # Just replace `n_samples` and `kwd_samples` with a single + # list of the names of all samples, and treat all of them + # as `kwd_samples` are treated below. + n_samp = n_samples(kwds) + else: + n_samp = n_samples or len(args) + + # get the number of outputs + n_out = n_outputs # rename to avoid UnboundLocalError + if callable(n_out): + n_out = n_out(kwds) + + # If necessary, rearrange function signature: accept other samples + # as positional args right after the first n_samp args + kwd_samp = [name for name in kwd_samples + if kwds.get(name, None) is not None] + n_kwd_samp = len(kwd_samp) + if not kwd_samp: + hypotest_fun_out = hypotest_fun_in + else: + def hypotest_fun_out(*samples, **kwds): + new_kwds = dict(zip(kwd_samp, samples[n_samp:])) + kwds.update(new_kwds) + return hypotest_fun_in(*samples[:n_samp], **kwds) + + # Extract the things we need here + try: # if something is missing + samples = [np.atleast_1d(kwds.pop(param)) + for param in (params[:n_samp] + kwd_samp)] + except KeyError: # let the function raise the right error + # might need to revisit this if required arg is not a "sample" + hypotest_fun_in(*args, **kwds) + vectorized = True if 'axis' in params else False + vectorized = vectorized and not override['vectorization'] + axis = kwds.pop('axis', default_axis) + nan_policy = kwds.pop('nan_policy', 'propagate') + keepdims = kwds.pop("keepdims", False) + del args # avoid the possibility of passing both `args` and `kwds` + + # convert masked arrays to regular arrays with sentinel values + samples, sentinel = _masked_arrays_2_sentinel_arrays(samples) + + # standardize to always work along last axis + reduced_axes = axis + if axis is None: + if samples: + # when axis=None, take the maximum of all dimensions since + # all the dimensions are reduced. + n_dims = np.max([sample.ndim for sample in samples]) + reduced_axes = tuple(range(n_dims)) + samples = [np.asarray(sample.ravel()) for sample in samples] + else: + samples = _broadcast_arrays(samples, axis=axis) + axis = np.atleast_1d(axis) + n_axes = len(axis) + # move all axes in `axis` to the end to be raveled + samples = [np.moveaxis(sample, axis, range(-len(axis), 0)) + for sample in samples] + shapes = [sample.shape for sample in samples] + # New shape is unchanged for all axes _not_ in `axis` + # At the end, we append the product of the shapes of the axes + # in `axis`. Appending -1 doesn't work for zero-size arrays! + new_shapes = [shape[:-n_axes] + (np.prod(shape[-n_axes:]),) + for shape in shapes] + samples = [sample.reshape(new_shape) + for sample, new_shape in zip(samples, new_shapes)] + axis = -1 # work over the last axis + NaN = _get_nan(*samples) if samples else np.nan + + # if axis is not needed, just handle nan_policy and return + ndims = np.array([sample.ndim for sample in samples]) + if np.all(ndims <= 1): + # Addresses nan_policy == "raise" + if nan_policy != 'propagate' or override['nan_propagation']: + contains_nan = [_contains_nan(sample, nan_policy)[0] + for sample in samples] + else: + # Behave as though there are no NaNs (even if there are) + contains_nan = [False]*len(samples) + + # Addresses nan_policy == "propagate" + if any(contains_nan) and (nan_policy == 'propagate' + and override['nan_propagation']): + res = np.full(n_out, NaN) + res = _add_reduced_axes(res, reduced_axes, keepdims) + return tuple_to_result(*res) + + # Addresses nan_policy == "omit" + too_small_msg = too_small_1d_not_omit + if any(contains_nan) and nan_policy == 'omit': + # consider passing in contains_nan + samples = _remove_nans(samples, paired) + too_small_msg = too_small_1d_omit + + if sentinel: + samples = _remove_sentinel(samples, paired, sentinel) + + if is_too_small(samples, kwds): + warnings.warn(too_small_msg, SmallSampleWarning, stacklevel=2) + res = np.full(n_out, NaN) + res = _add_reduced_axes(res, reduced_axes, keepdims) + return tuple_to_result(*res) + + res = hypotest_fun_out(*samples, **kwds) + res = result_to_tuple(res) + res = _add_reduced_axes(res, reduced_axes, keepdims) + return tuple_to_result(*res) + + # check for empty input + empty_output = _check_empty_inputs(samples, axis) + # only return empty output if zero sized input is too small. + if ( + empty_output is not None + and (is_too_small(samples, kwds) or empty_output.size == 0) + ): + if is_too_small(samples, kwds) and empty_output.size != 0: + warnings.warn(too_small_nd_not_omit, SmallSampleWarning, + stacklevel=2) + res = [empty_output.copy() for i in range(n_out)] + res = _add_reduced_axes(res, reduced_axes, keepdims) + return tuple_to_result(*res) + + # otherwise, concatenate all samples along axis, remembering where + # each separate sample begins + lengths = np.array([sample.shape[axis] for sample in samples]) + split_indices = np.cumsum(lengths) + x = _broadcast_concatenate(samples, axis) + + # Addresses nan_policy == "raise" + if nan_policy != 'propagate' or override['nan_propagation']: + contains_nan, _ = _contains_nan(x, nan_policy) + else: + contains_nan = False # behave like there are no NaNs + + if vectorized and not contains_nan and not sentinel: + res = hypotest_fun_out(*samples, axis=axis, **kwds) + res = result_to_tuple(res) + res = _add_reduced_axes(res, reduced_axes, keepdims) + return tuple_to_result(*res) + + # Addresses nan_policy == "omit" + if contains_nan and nan_policy == 'omit': + def hypotest_fun(x): + samples = np.split(x, split_indices)[:n_samp+n_kwd_samp] + samples = _remove_nans(samples, paired) + if sentinel: + samples = _remove_sentinel(samples, paired, sentinel) + if is_too_small(samples, kwds): + warnings.warn(too_small_nd_omit, SmallSampleWarning, + stacklevel=4) + return np.full(n_out, NaN) + return result_to_tuple(hypotest_fun_out(*samples, **kwds)) + + # Addresses nan_policy == "propagate" + elif (contains_nan and nan_policy == 'propagate' + and override['nan_propagation']): + def hypotest_fun(x): + if np.isnan(x).any(): + return np.full(n_out, NaN) + + samples = np.split(x, split_indices)[:n_samp+n_kwd_samp] + if sentinel: + samples = _remove_sentinel(samples, paired, sentinel) + if is_too_small(samples, kwds): + return np.full(n_out, NaN) + return result_to_tuple(hypotest_fun_out(*samples, **kwds)) + + else: + def hypotest_fun(x): + samples = np.split(x, split_indices)[:n_samp+n_kwd_samp] + if sentinel: + samples = _remove_sentinel(samples, paired, sentinel) + if is_too_small(samples, kwds): + return np.full(n_out, NaN) + return result_to_tuple(hypotest_fun_out(*samples, **kwds)) + + x = np.moveaxis(x, axis, 0) + res = np.apply_along_axis(hypotest_fun, axis=0, arr=x) + res = _add_reduced_axes(res, reduced_axes, keepdims) + return tuple_to_result(*res) + + _axis_parameter_doc, _axis_parameter = _get_axis_params(default_axis) + doc = FunctionDoc(axis_nan_policy_wrapper) + parameter_names = [param.name for param in doc['Parameters']] + if 'axis' in parameter_names: + doc['Parameters'][parameter_names.index('axis')] = ( + _axis_parameter_doc) + else: + doc['Parameters'].append(_axis_parameter_doc) + if 'nan_policy' in parameter_names: + doc['Parameters'][parameter_names.index('nan_policy')] = ( + _nan_policy_parameter_doc) + else: + doc['Parameters'].append(_nan_policy_parameter_doc) + if 'keepdims' in parameter_names: + doc['Parameters'][parameter_names.index('keepdims')] = ( + _keepdims_parameter_doc) + else: + doc['Parameters'].append(_keepdims_parameter_doc) + doc['Notes'] += _standard_note_addition + doc = str(doc).split("\n", 1)[1] # remove signature + axis_nan_policy_wrapper.__doc__ = str(doc) + + sig = inspect.signature(axis_nan_policy_wrapper) + parameters = sig.parameters + parameter_list = list(parameters.values()) + if 'axis' not in parameters: + parameter_list.append(_axis_parameter) + if 'nan_policy' not in parameters: + parameter_list.append(_nan_policy_parameter) + if 'keepdims' not in parameters: + parameter_list.append(_keepdims_parameter) + sig = sig.replace(parameters=parameter_list) + axis_nan_policy_wrapper.__signature__ = sig + + return axis_nan_policy_wrapper + return axis_nan_policy_decorator diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd b/parrot/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd new file mode 100644 index 0000000000000000000000000000000000000000..92785f08dbec30a4db286fcb85b42d7221e2228e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd @@ -0,0 +1,27 @@ +# Declare the class with cdef +cdef extern from "biasedurn/stocc.h" nogil: + cdef cppclass CFishersNCHypergeometric: + CFishersNCHypergeometric(int, int, int, double, double) except + + int mode() + double mean() + double variance() + double probability(int x) + double moments(double * mean, double * var) + + cdef cppclass CWalleniusNCHypergeometric: + CWalleniusNCHypergeometric() except + + CWalleniusNCHypergeometric(int, int, int, double, double) except + + int mode() + double mean() + double variance() + double probability(int x) + double moments(double * mean, double * var) + + cdef cppclass StochasticLib3: + StochasticLib3(int seed) except + + double Random() except + + void SetAccuracy(double accur) + int FishersNCHyp (int n, int m, int N, double odds) except + + int WalleniusNCHyp (int n, int m, int N, double odds) except + + double(*next_double)() + double(*next_normal)(const double m, const double s) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_binomtest.py b/parrot/lib/python3.10/site-packages/scipy/stats/_binomtest.py new file mode 100644 index 0000000000000000000000000000000000000000..bdf21117383374e730ab052fcbb0b5b7fca029c1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_binomtest.py @@ -0,0 +1,375 @@ +from math import sqrt +import numpy as np +from scipy._lib._util import _validate_int +from scipy.optimize import brentq +from scipy.special import ndtri +from ._discrete_distns import binom +from ._common import ConfidenceInterval + + +class BinomTestResult: + """ + Result of `scipy.stats.binomtest`. + + Attributes + ---------- + k : int + The number of successes (copied from `binomtest` input). + n : int + The number of trials (copied from `binomtest` input). + alternative : str + Indicates the alternative hypothesis specified in the input + to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``, + or ``'less'``. + statistic: float + The estimate of the proportion of successes. + pvalue : float + The p-value of the hypothesis test. + + """ + def __init__(self, k, n, alternative, statistic, pvalue): + self.k = k + self.n = n + self.alternative = alternative + self.statistic = statistic + self.pvalue = pvalue + + # add alias for backward compatibility + self.proportion_estimate = statistic + + def __repr__(self): + s = ("BinomTestResult(" + f"k={self.k}, " + f"n={self.n}, " + f"alternative={self.alternative!r}, " + f"statistic={self.statistic}, " + f"pvalue={self.pvalue})") + return s + + def proportion_ci(self, confidence_level=0.95, method='exact'): + """ + Compute the confidence interval for ``statistic``. + + Parameters + ---------- + confidence_level : float, optional + Confidence level for the computed confidence interval + of the estimated proportion. Default is 0.95. + method : {'exact', 'wilson', 'wilsoncc'}, optional + Selects the method used to compute the confidence interval + for the estimate of the proportion: + + 'exact' : + Use the Clopper-Pearson exact method [1]_. + 'wilson' : + Wilson's method, without continuity correction ([2]_, [3]_). + 'wilsoncc' : + Wilson's method, with continuity correction ([2]_, [3]_). + + Default is ``'exact'``. + + Returns + ------- + ci : ``ConfidenceInterval`` object + The object has attributes ``low`` and ``high`` that hold the + lower and upper bounds of the confidence interval. + + References + ---------- + .. [1] C. J. Clopper and E. S. Pearson, The use of confidence or + fiducial limits illustrated in the case of the binomial, + Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934). + .. [2] E. B. Wilson, Probable inference, the law of succession, and + statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212 + (1927). + .. [3] Robert G. Newcombe, Two-sided confidence intervals for the + single proportion: comparison of seven methods, Statistics + in Medicine, 17, pp 857-872 (1998). + + Examples + -------- + >>> from scipy.stats import binomtest + >>> result = binomtest(k=7, n=50, p=0.1) + >>> result.statistic + 0.14 + >>> result.proportion_ci() + ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846) + """ + if method not in ('exact', 'wilson', 'wilsoncc'): + raise ValueError(f"method ('{method}') must be one of 'exact', " + "'wilson' or 'wilsoncc'.") + if not (0 <= confidence_level <= 1): + raise ValueError(f'confidence_level ({confidence_level}) must be in ' + 'the interval [0, 1].') + if method == 'exact': + low, high = _binom_exact_conf_int(self.k, self.n, + confidence_level, + self.alternative) + else: + # method is 'wilson' or 'wilsoncc' + low, high = _binom_wilson_conf_int(self.k, self.n, + confidence_level, + self.alternative, + correction=method == 'wilsoncc') + return ConfidenceInterval(low=low, high=high) + + +def _findp(func): + try: + p = brentq(func, 0, 1) + except RuntimeError: + raise RuntimeError('numerical solver failed to converge when ' + 'computing the confidence limits') from None + except ValueError as exc: + raise ValueError('brentq raised a ValueError; report this to the ' + 'SciPy developers') from exc + return p + + +def _binom_exact_conf_int(k, n, confidence_level, alternative): + """ + Compute the estimate and confidence interval for the binomial test. + + Returns proportion, prop_low, prop_high + """ + if alternative == 'two-sided': + alpha = (1 - confidence_level) / 2 + if k == 0: + plow = 0.0 + else: + plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha) + if k == n: + phigh = 1.0 + else: + phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha) + elif alternative == 'less': + alpha = 1 - confidence_level + plow = 0.0 + if k == n: + phigh = 1.0 + else: + phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha) + elif alternative == 'greater': + alpha = 1 - confidence_level + if k == 0: + plow = 0.0 + else: + plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha) + phigh = 1.0 + return plow, phigh + + +def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction): + # This function assumes that the arguments have already been validated. + # In particular, `alternative` must be one of 'two-sided', 'less' or + # 'greater'. + p = k / n + if alternative == 'two-sided': + z = ndtri(0.5 + 0.5*confidence_level) + else: + z = ndtri(confidence_level) + + # For reference, the formulas implemented here are from + # Newcombe (1998) (ref. [3] in the proportion_ci docstring). + denom = 2*(n + z**2) + center = (2*n*p + z**2)/denom + q = 1 - p + if correction: + if alternative == 'less' or k == 0: + lo = 0.0 + else: + dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom + lo = center - dlo + if alternative == 'greater' or k == n: + hi = 1.0 + else: + dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom + hi = center + dhi + else: + delta = z/denom * sqrt(4*n*p*q + z**2) + if alternative == 'less' or k == 0: + lo = 0.0 + else: + lo = center - delta + if alternative == 'greater' or k == n: + hi = 1.0 + else: + hi = center + delta + + return lo, hi + + +def binomtest(k, n, p=0.5, alternative='two-sided'): + """ + Perform a test that the probability of success is p. + + The binomial test [1]_ is a test of the null hypothesis that the + probability of success in a Bernoulli experiment is `p`. + + Details of the test can be found in many texts on statistics, such + as section 24.5 of [2]_. + + Parameters + ---------- + k : int + The number of successes. + n : int + The number of trials. + p : float, optional + The hypothesized probability of success, i.e. the expected + proportion of successes. The value must be in the interval + ``0 <= p <= 1``. The default value is ``p = 0.5``. + alternative : {'two-sided', 'greater', 'less'}, optional + Indicates the alternative hypothesis. The default value is + 'two-sided'. + + Returns + ------- + result : `~scipy.stats._result_classes.BinomTestResult` instance + The return value is an object with the following attributes: + + k : int + The number of successes (copied from `binomtest` input). + n : int + The number of trials (copied from `binomtest` input). + alternative : str + Indicates the alternative hypothesis specified in the input + to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``, + or ``'less'``. + statistic : float + The estimate of the proportion of successes. + pvalue : float + The p-value of the hypothesis test. + + The object has the following methods: + + proportion_ci(confidence_level=0.95, method='exact') : + Compute the confidence interval for ``statistic``. + + Notes + ----- + .. versionadded:: 1.7.0 + + References + ---------- + .. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test + .. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition), + Prentice Hall, Upper Saddle River, New Jersey USA (2010) + + Examples + -------- + >>> from scipy.stats import binomtest + + A car manufacturer claims that no more than 10% of their cars are unsafe. + 15 cars are inspected for safety, 3 were found to be unsafe. Test the + manufacturer's claim: + + >>> result = binomtest(3, n=15, p=0.1, alternative='greater') + >>> result.pvalue + 0.18406106910639114 + + The null hypothesis cannot be rejected at the 5% level of significance + because the returned p-value is greater than the critical value of 5%. + + The test statistic is equal to the estimated proportion, which is simply + ``3/15``: + + >>> result.statistic + 0.2 + + We can use the `proportion_ci()` method of the result to compute the + confidence interval of the estimate: + + >>> result.proportion_ci(confidence_level=0.95) + ConfidenceInterval(low=0.05684686759024681, high=1.0) + + """ + k = _validate_int(k, 'k', minimum=0) + n = _validate_int(n, 'n', minimum=1) + if k > n: + raise ValueError(f'k ({k}) must not be greater than n ({n}).') + + if not (0 <= p <= 1): + raise ValueError(f"p ({p}) must be in range [0,1]") + + if alternative not in ('two-sided', 'less', 'greater'): + raise ValueError(f"alternative ('{alternative}') not recognized; \n" + "must be 'two-sided', 'less' or 'greater'") + if alternative == 'less': + pval = binom.cdf(k, n, p) + elif alternative == 'greater': + pval = binom.sf(k-1, n, p) + else: + # alternative is 'two-sided' + d = binom.pmf(k, n, p) + rerr = 1 + 1e-7 + if k == p * n: + # special case as shortcut, would also be handled by `else` below + pval = 1. + elif k < p * n: + ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p), + -d*rerr, np.ceil(p * n), n) + # y is the number of terms between mode and n that are <= d*rerr. + # ix gave us the first term where a(ix) <= d*rerr < a(ix-1) + # if the first equality doesn't hold, y=n-ix. Otherwise, we + # need to include ix as well as the equality holds. Note that + # the equality will hold in very very rare situations due to rerr. + y = n - ix + int(d*rerr == binom.pmf(ix, n, p)) + pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p) + else: + ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p), + d*rerr, 0, np.floor(p * n)) + # y is the number of terms between 0 and mode that are <= d*rerr. + # we need to add a 1 to account for the 0 index. + # For comparing this with old behavior, see + # tst_binary_srch_for_binom_tst method in test_morestats. + y = ix + 1 + pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p) + + pval = min(1.0, pval) + + result = BinomTestResult(k=k, n=n, alternative=alternative, + statistic=k/n, pvalue=pval) + return result + + +def _binary_search_for_binom_tst(a, d, lo, hi): + """ + Conducts an implicit binary search on a function specified by `a`. + + Meant to be used on the binomial PMF for the case of two-sided tests + to obtain the value on the other side of the mode where the tail + probability should be computed. The values on either side of + the mode are always in order, meaning binary search is applicable. + + Parameters + ---------- + a : callable + The function over which to perform binary search. Its values + for inputs lo and hi should be in ascending order. + d : float + The value to search. + lo : int + The lower end of range to search. + hi : int + The higher end of the range to search. + + Returns + ------- + int + The index, i between lo and hi + such that a(i)<=d d: + hi = mid-1 + else: + return mid + if a(lo) <= d: + return lo + else: + return lo-1 diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_bws_test.py b/parrot/lib/python3.10/site-packages/scipy/stats/_bws_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6496ecfba798dc7ad719f784a57896e296590675 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_bws_test.py @@ -0,0 +1,177 @@ +import numpy as np +from functools import partial +from scipy import stats + + +def _bws_input_validation(x, y, alternative, method): + ''' Input validation and standardization for bws test''' + x, y = np.atleast_1d(x, y) + if x.ndim > 1 or y.ndim > 1: + raise ValueError('`x` and `y` must be exactly one-dimensional.') + if np.isnan(x).any() or np.isnan(y).any(): + raise ValueError('`x` and `y` must not contain NaNs.') + if np.size(x) == 0 or np.size(y) == 0: + raise ValueError('`x` and `y` must be of nonzero size.') + + z = stats.rankdata(np.concatenate((x, y))) + x, y = z[:len(x)], z[len(x):] + + alternatives = {'two-sided', 'less', 'greater'} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f'`alternative` must be one of {alternatives}.') + + method = stats.PermutationMethod() if method is None else method + if not isinstance(method, stats.PermutationMethod): + raise ValueError('`method` must be an instance of ' + '`scipy.stats.PermutationMethod`') + + return x, y, alternative, method + + +def _bws_statistic(x, y, alternative, axis): + '''Compute the BWS test statistic for two independent samples''' + # Public function currently does not accept `axis`, but `permutation_test` + # uses `axis` to make vectorized call. + + Ri, Hj = np.sort(x, axis=axis), np.sort(y, axis=axis) + n, m = Ri.shape[axis], Hj.shape[axis] + i, j = np.arange(1, n+1), np.arange(1, m+1) + + Bx_num = Ri - (m + n)/n * i + By_num = Hj - (m + n)/m * j + + if alternative == 'two-sided': + Bx_num *= Bx_num + By_num *= By_num + else: + Bx_num *= np.abs(Bx_num) + By_num *= np.abs(By_num) + + Bx_den = i/(n+1) * (1 - i/(n+1)) * m*(m+n)/n + By_den = j/(m+1) * (1 - j/(m+1)) * n*(m+n)/m + + Bx = 1/n * np.sum(Bx_num/Bx_den, axis=axis) + By = 1/m * np.sum(By_num/By_den, axis=axis) + + B = (Bx + By) / 2 if alternative == 'two-sided' else (Bx - By) / 2 + + return B + + +def bws_test(x, y, *, alternative="two-sided", method=None): + r'''Perform the Baumgartner-Weiss-Schindler test on two independent samples. + + The Baumgartner-Weiss-Schindler (BWS) test is a nonparametric test of + the null hypothesis that the distribution underlying sample `x` + is the same as the distribution underlying sample `y`. Unlike + the Kolmogorov-Smirnov, Wilcoxon, and Cramer-Von Mises tests, + the BWS test weights the integral by the variance of the difference + in cumulative distribution functions (CDFs), emphasizing the tails of the + distributions, which increases the power of the test in many applications. + + Parameters + ---------- + x, y : array-like + 1-d arrays of samples. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + Let *F(u)* and *G(u)* be the cumulative distribution functions of the + distributions underlying `x` and `y`, respectively. Then the following + alternative hypotheses are available: + + * 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for + at least one *u*. + * 'less': the distribution underlying `x` is stochastically less than + the distribution underlying `y`, i.e. *F(u) >= G(u)* for all *u*. + * 'greater': the distribution underlying `x` is stochastically greater + than the distribution underlying `y`, i.e. *F(u) <= G(u)* for all + *u*. + + Under a more restrictive set of assumptions, the alternative hypotheses + can be expressed in terms of the locations of the distributions; + see [2] section 5.1. + method : PermutationMethod, optional + Configures the method used to compute the p-value. The default is + the default `PermutationMethod` object. + + Returns + ------- + res : PermutationTestResult + An object with attributes: + + statistic : float + The observed test statistic of the data. + pvalue : float + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null hypothesis. + + See also + -------- + scipy.stats.wilcoxon, scipy.stats.mannwhitneyu, scipy.stats.ttest_ind + + Notes + ----- + When ``alternative=='two-sided'``, the statistic is defined by the + equations given in [1]_ Section 2. This statistic is not appropriate for + one-sided alternatives; in that case, the statistic is the *negative* of + that given by the equations in [1]_ Section 2. Consequently, when the + distribution of the first sample is stochastically greater than that of the + second sample, the statistic will tend to be positive. + + References + ---------- + .. [1] Neuhäuser, M. (2005). Exact Tests Based on the + Baumgartner-Weiss-Schindler Statistic: A Survey. Statistical Papers, + 46(1), 1-29. + .. [2] Fay, M. P., & Proschan, M. A. (2010). Wilcoxon-Mann-Whitney or t-test? + On assumptions for hypothesis tests and multiple interpretations of + decision rules. Statistics surveys, 4, 1. + + Examples + -------- + We follow the example of table 3 in [1]_: Fourteen children were divided + randomly into two groups. Their ranks at performing a specific tests are + as follows. + + >>> import numpy as np + >>> x = [1, 2, 3, 4, 6, 7, 8] + >>> y = [5, 9, 10, 11, 12, 13, 14] + + We use the BWS test to assess whether there is a statistically significant + difference between the two groups. + The null hypothesis is that there is no difference in the distributions of + performance between the two groups. We decide that a significance level of + 1% is required to reject the null hypothesis in favor of the alternative + that the distributions are different. + Since the number of samples is very small, we can compare the observed test + statistic against the *exact* distribution of the test statistic under the + null hypothesis. + + >>> from scipy.stats import bws_test + >>> res = bws_test(x, y) + >>> print(res.statistic) + 5.132167152575315 + + This agrees with :math:`B = 5.132` reported in [1]_. The *p*-value produced + by `bws_test` also agrees with :math:`p = 0.0029` reported in [1]_. + + >>> print(res.pvalue) + 0.002913752913752914 + + Because the p-value is below our threshold of 1%, we take this as evidence + against the null hypothesis in favor of the alternative that there is a + difference in performance between the two groups. + ''' + + x, y, alternative, method = _bws_input_validation(x, y, alternative, + method) + bws_statistic = partial(_bws_statistic, alternative=alternative) + + permutation_alternative = 'less' if alternative == 'less' else 'greater' + res = stats.permutation_test((x, y), bws_statistic, + alternative=permutation_alternative, + **method._asdict()) + + return res diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_censored_data.py b/parrot/lib/python3.10/site-packages/scipy/stats/_censored_data.py new file mode 100644 index 0000000000000000000000000000000000000000..f6fee500f1d97db0bae9ebff26824d4d894c7f39 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_censored_data.py @@ -0,0 +1,459 @@ +import numpy as np + + +def _validate_1d(a, name, allow_inf=False): + if np.ndim(a) != 1: + raise ValueError(f'`{name}` must be a one-dimensional sequence.') + if np.isnan(a).any(): + raise ValueError(f'`{name}` must not contain nan.') + if not allow_inf and np.isinf(a).any(): + raise ValueError(f'`{name}` must contain only finite values.') + + +def _validate_interval(interval): + interval = np.asarray(interval) + if interval.shape == (0,): + # The input was a sequence with length 0. + interval = interval.reshape((0, 2)) + if interval.ndim != 2 or interval.shape[-1] != 2: + raise ValueError('`interval` must be a two-dimensional array with ' + 'shape (m, 2), where m is the number of ' + 'interval-censored values, but got shape ' + f'{interval.shape}') + + if np.isnan(interval).any(): + raise ValueError('`interval` must not contain nan.') + if np.isinf(interval).all(axis=1).any(): + raise ValueError('In each row in `interval`, both values must not' + ' be infinite.') + if (interval[:, 0] > interval[:, 1]).any(): + raise ValueError('In each row of `interval`, the left value must not' + ' exceed the right value.') + + uncensored_mask = interval[:, 0] == interval[:, 1] + left_mask = np.isinf(interval[:, 0]) + right_mask = np.isinf(interval[:, 1]) + interval_mask = np.isfinite(interval).all(axis=1) & ~uncensored_mask + + uncensored2 = interval[uncensored_mask, 0] + left2 = interval[left_mask, 1] + right2 = interval[right_mask, 0] + interval2 = interval[interval_mask] + + return uncensored2, left2, right2, interval2 + + +def _validate_x_censored(x, censored): + x = np.asarray(x) + if x.ndim != 1: + raise ValueError('`x` must be one-dimensional.') + censored = np.asarray(censored) + if censored.ndim != 1: + raise ValueError('`censored` must be one-dimensional.') + if (~np.isfinite(x)).any(): + raise ValueError('`x` must not contain nan or inf.') + if censored.size != x.size: + raise ValueError('`x` and `censored` must have the same length.') + return x, censored.astype(bool) + + +class CensoredData: + """ + Instances of this class represent censored data. + + Instances may be passed to the ``fit`` method of continuous + univariate SciPy distributions for maximum likelihood estimation. + The *only* method of the univariate continuous distributions that + understands `CensoredData` is the ``fit`` method. An instance of + `CensoredData` can not be passed to methods such as ``pdf`` and + ``cdf``. + + An observation is said to be *censored* when the precise value is unknown, + but it has a known upper and/or lower bound. The conventional terminology + is: + + * left-censored: an observation is below a certain value but it is + unknown by how much. + * right-censored: an observation is above a certain value but it is + unknown by how much. + * interval-censored: an observation lies somewhere on an interval between + two values. + + Left-, right-, and interval-censored data can be represented by + `CensoredData`. + + For convenience, the class methods ``left_censored`` and + ``right_censored`` are provided to create a `CensoredData` + instance from a single one-dimensional array of measurements + and a corresponding boolean array to indicate which measurements + are censored. The class method ``interval_censored`` accepts two + one-dimensional arrays that hold the lower and upper bounds of the + intervals. + + Parameters + ---------- + uncensored : array_like, 1D + Uncensored observations. + left : array_like, 1D + Left-censored observations. + right : array_like, 1D + Right-censored observations. + interval : array_like, 2D, with shape (m, 2) + Interval-censored observations. Each row ``interval[k, :]`` + represents the interval for the kth interval-censored observation. + + Notes + ----- + In the input array `interval`, the lower bound of the interval may + be ``-inf``, and the upper bound may be ``inf``, but at least one must be + finite. When the lower bound is ``-inf``, the row represents a left- + censored observation, and when the upper bound is ``inf``, the row + represents a right-censored observation. If the length of an interval + is 0 (i.e. ``interval[k, 0] == interval[k, 1]``, the observation is + treated as uncensored. So one can represent all the types of censored + and uncensored data in ``interval``, but it is generally more convenient + to use `uncensored`, `left` and `right` for uncensored, left-censored and + right-censored observations, respectively. + + Examples + -------- + In the most general case, a censored data set may contain values that + are left-censored, right-censored, interval-censored, and uncensored. + For example, here we create a data set with five observations. Two + are uncensored (values 1 and 1.5), one is a left-censored observation + of 0, one is a right-censored observation of 10 and one is + interval-censored in the interval [2, 3]. + + >>> import numpy as np + >>> from scipy.stats import CensoredData + >>> data = CensoredData(uncensored=[1, 1.5], left=[0], right=[10], + ... interval=[[2, 3]]) + >>> print(data) + CensoredData(5 values: 2 not censored, 1 left-censored, + 1 right-censored, 1 interval-censored) + + Equivalently, + + >>> data = CensoredData(interval=[[1, 1], + ... [1.5, 1.5], + ... [-np.inf, 0], + ... [10, np.inf], + ... [2, 3]]) + >>> print(data) + CensoredData(5 values: 2 not censored, 1 left-censored, + 1 right-censored, 1 interval-censored) + + A common case is to have a mix of uncensored observations and censored + observations that are all right-censored (or all left-censored). For + example, consider an experiment in which six devices are started at + various times and left running until they fail. Assume that time is + measured in hours, and the experiment is stopped after 30 hours, even + if all the devices have not failed by that time. We might end up with + data such as this:: + + Device Start-time Fail-time Time-to-failure + 1 0 13 13 + 2 2 24 22 + 3 5 22 17 + 4 8 23 15 + 5 10 *** >20 + 6 12 *** >18 + + Two of the devices had not failed when the experiment was stopped; + the observations of the time-to-failure for these two devices are + right-censored. We can represent this data with + + >>> data = CensoredData(uncensored=[13, 22, 17, 15], right=[20, 18]) + >>> print(data) + CensoredData(6 values: 4 not censored, 2 right-censored) + + Alternatively, we can use the method `CensoredData.right_censored` to + create a representation of this data. The time-to-failure observations + are put the list ``ttf``. The ``censored`` list indicates which values + in ``ttf`` are censored. + + >>> ttf = [13, 22, 17, 15, 20, 18] + >>> censored = [False, False, False, False, True, True] + + Pass these lists to `CensoredData.right_censored` to create an + instance of `CensoredData`. + + >>> data = CensoredData.right_censored(ttf, censored) + >>> print(data) + CensoredData(6 values: 4 not censored, 2 right-censored) + + If the input data is interval censored and already stored in two + arrays, one holding the low end of the intervals and another + holding the high ends, the class method ``interval_censored`` can + be used to create the `CensoredData` instance. + + This example creates an instance with four interval-censored values. + The intervals are [10, 11], [0.5, 1], [2, 3], and [12.5, 13.5]. + + >>> a = [10, 0.5, 2, 12.5] # Low ends of the intervals + >>> b = [11, 1.0, 3, 13.5] # High ends of the intervals + >>> data = CensoredData.interval_censored(low=a, high=b) + >>> print(data) + CensoredData(4 values: 0 not censored, 4 interval-censored) + + Finally, we create and censor some data from the `weibull_min` + distribution, and then fit `weibull_min` to that data. We'll assume + that the location parameter is known to be 0. + + >>> from scipy.stats import weibull_min + >>> rng = np.random.default_rng() + + Create the random data set. + + >>> x = weibull_min.rvs(2.5, loc=0, scale=30, size=250, random_state=rng) + >>> x[x > 40] = 40 # Right-censor values greater or equal to 40. + + Create the `CensoredData` instance with the `right_censored` method. + The censored values are those where the value is 40. + + >>> data = CensoredData.right_censored(x, x == 40) + >>> print(data) + CensoredData(250 values: 215 not censored, 35 right-censored) + + 35 values have been right-censored. + + Fit `weibull_min` to the censored data. We expect to shape and scale + to be approximately 2.5 and 30, respectively. + + >>> weibull_min.fit(data, floc=0) + (2.3575922823897315, 0, 30.40650074451254) + + """ + + def __init__(self, uncensored=None, *, left=None, right=None, + interval=None): + if uncensored is None: + uncensored = [] + if left is None: + left = [] + if right is None: + right = [] + if interval is None: + interval = np.empty((0, 2)) + + _validate_1d(uncensored, 'uncensored') + _validate_1d(left, 'left') + _validate_1d(right, 'right') + uncensored2, left2, right2, interval2 = _validate_interval(interval) + + self._uncensored = np.concatenate((uncensored, uncensored2)) + self._left = np.concatenate((left, left2)) + self._right = np.concatenate((right, right2)) + # Note that by construction, the private attribute _interval + # will be a 2D array that contains only finite values representing + # intervals with nonzero but finite length. + self._interval = interval2 + + def __repr__(self): + uncensored_str = " ".join(np.array_repr(self._uncensored).split()) + left_str = " ".join(np.array_repr(self._left).split()) + right_str = " ".join(np.array_repr(self._right).split()) + interval_str = " ".join(np.array_repr(self._interval).split()) + return (f"CensoredData(uncensored={uncensored_str}, left={left_str}, " + f"right={right_str}, interval={interval_str})") + + def __str__(self): + num_nc = len(self._uncensored) + num_lc = len(self._left) + num_rc = len(self._right) + num_ic = len(self._interval) + n = num_nc + num_lc + num_rc + num_ic + parts = [f'{num_nc} not censored'] + if num_lc > 0: + parts.append(f'{num_lc} left-censored') + if num_rc > 0: + parts.append(f'{num_rc} right-censored') + if num_ic > 0: + parts.append(f'{num_ic} interval-censored') + return f'CensoredData({n} values: ' + ', '.join(parts) + ')' + + # This is not a complete implementation of the arithmetic operators. + # All we need is subtracting a scalar and dividing by a scalar. + + def __sub__(self, other): + return CensoredData(uncensored=self._uncensored - other, + left=self._left - other, + right=self._right - other, + interval=self._interval - other) + + def __truediv__(self, other): + return CensoredData(uncensored=self._uncensored / other, + left=self._left / other, + right=self._right / other, + interval=self._interval / other) + + def __len__(self): + """ + The number of values (censored and not censored). + """ + return (len(self._uncensored) + len(self._left) + len(self._right) + + len(self._interval)) + + def num_censored(self): + """ + Number of censored values. + """ + return len(self._left) + len(self._right) + len(self._interval) + + @classmethod + def right_censored(cls, x, censored): + """ + Create a `CensoredData` instance of right-censored data. + + Parameters + ---------- + x : array_like + `x` is the array of observed data or measurements. + `x` must be a one-dimensional sequence of finite numbers. + censored : array_like of bool + `censored` must be a one-dimensional sequence of boolean + values. If ``censored[k]`` is True, the corresponding value + in `x` is right-censored. That is, the value ``x[k]`` + is the lower bound of the true (but unknown) value. + + Returns + ------- + data : `CensoredData` + An instance of `CensoredData` that represents the + collection of uncensored and right-censored values. + + Examples + -------- + >>> from scipy.stats import CensoredData + + Two uncensored values (4 and 10) and two right-censored values + (24 and 25). + + >>> data = CensoredData.right_censored([4, 10, 24, 25], + ... [False, False, True, True]) + >>> data + CensoredData(uncensored=array([ 4., 10.]), + left=array([], dtype=float64), right=array([24., 25.]), + interval=array([], shape=(0, 2), dtype=float64)) + >>> print(data) + CensoredData(4 values: 2 not censored, 2 right-censored) + """ + x, censored = _validate_x_censored(x, censored) + return cls(uncensored=x[~censored], right=x[censored]) + + @classmethod + def left_censored(cls, x, censored): + """ + Create a `CensoredData` instance of left-censored data. + + Parameters + ---------- + x : array_like + `x` is the array of observed data or measurements. + `x` must be a one-dimensional sequence of finite numbers. + censored : array_like of bool + `censored` must be a one-dimensional sequence of boolean + values. If ``censored[k]`` is True, the corresponding value + in `x` is left-censored. That is, the value ``x[k]`` + is the upper bound of the true (but unknown) value. + + Returns + ------- + data : `CensoredData` + An instance of `CensoredData` that represents the + collection of uncensored and left-censored values. + + Examples + -------- + >>> from scipy.stats import CensoredData + + Two uncensored values (0.12 and 0.033) and two left-censored values + (both 1e-3). + + >>> data = CensoredData.left_censored([0.12, 0.033, 1e-3, 1e-3], + ... [False, False, True, True]) + >>> data + CensoredData(uncensored=array([0.12 , 0.033]), + left=array([0.001, 0.001]), right=array([], dtype=float64), + interval=array([], shape=(0, 2), dtype=float64)) + >>> print(data) + CensoredData(4 values: 2 not censored, 2 left-censored) + """ + x, censored = _validate_x_censored(x, censored) + return cls(uncensored=x[~censored], left=x[censored]) + + @classmethod + def interval_censored(cls, low, high): + """ + Create a `CensoredData` instance of interval-censored data. + + This method is useful when all the data is interval-censored, and + the low and high ends of the intervals are already stored in + separate one-dimensional arrays. + + Parameters + ---------- + low : array_like + The one-dimensional array containing the low ends of the + intervals. + high : array_like + The one-dimensional array containing the high ends of the + intervals. + + Returns + ------- + data : `CensoredData` + An instance of `CensoredData` that represents the + collection of censored values. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import CensoredData + + ``a`` and ``b`` are the low and high ends of a collection of + interval-censored values. + + >>> a = [0.5, 2.0, 3.0, 5.5] + >>> b = [1.0, 2.5, 3.5, 7.0] + >>> data = CensoredData.interval_censored(low=a, high=b) + >>> print(data) + CensoredData(4 values: 0 not censored, 4 interval-censored) + """ + _validate_1d(low, 'low', allow_inf=True) + _validate_1d(high, 'high', allow_inf=True) + if len(low) != len(high): + raise ValueError('`low` and `high` must have the same length.') + interval = np.column_stack((low, high)) + uncensored, left, right, interval = _validate_interval(interval) + return cls(uncensored=uncensored, left=left, right=right, + interval=interval) + + def _uncensor(self): + """ + This function is used when a non-censored version of the data + is needed to create a rough estimate of the parameters of a + distribution via the method of moments or some similar method. + The data is "uncensored" by taking the given endpoints as the + data for the left- or right-censored data, and the mean for the + interval-censored data. + """ + data = np.concatenate((self._uncensored, self._left, self._right, + self._interval.mean(axis=1))) + return data + + def _supported(self, a, b): + """ + Return a subset of self containing the values that are in + (or overlap with) the interval (a, b). + """ + uncensored = self._uncensored + uncensored = uncensored[(a < uncensored) & (uncensored < b)] + left = self._left + left = left[a < left] + right = self._right + right = right[right < b] + interval = self._interval + interval = interval[(a < interval[:, 1]) & (interval[:, 0] < b)] + return CensoredData(uncensored, left=left, right=right, + interval=interval) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_crosstab.py b/parrot/lib/python3.10/site-packages/scipy/stats/_crosstab.py new file mode 100644 index 0000000000000000000000000000000000000000..6c267ff85eafca6b805909d8fbc241f03cd4acbc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_crosstab.py @@ -0,0 +1,204 @@ +import numpy as np +from scipy.sparse import coo_matrix +from scipy._lib._bunch import _make_tuple_bunch + + +CrosstabResult = _make_tuple_bunch( + "CrosstabResult", ["elements", "count"] +) + + +def crosstab(*args, levels=None, sparse=False): + """ + Return table of counts for each possible unique combination in ``*args``. + + When ``len(args) > 1``, the array computed by this function is + often referred to as a *contingency table* [1]_. + + The arguments must be sequences with the same length. The second return + value, `count`, is an integer array with ``len(args)`` dimensions. If + `levels` is None, the shape of `count` is ``(n0, n1, ...)``, where ``nk`` + is the number of unique elements in ``args[k]``. + + Parameters + ---------- + *args : sequences + A sequence of sequences whose unique aligned elements are to be + counted. The sequences in args must all be the same length. + levels : sequence, optional + If `levels` is given, it must be a sequence that is the same length as + `args`. Each element in `levels` is either a sequence or None. If it + is a sequence, it gives the values in the corresponding sequence in + `args` that are to be counted. If any value in the sequences in `args` + does not occur in the corresponding sequence in `levels`, that value + is ignored and not counted in the returned array `count`. The default + value of `levels` for ``args[i]`` is ``np.unique(args[i])`` + sparse : bool, optional + If True, return a sparse matrix. The matrix will be an instance of + the `scipy.sparse.coo_matrix` class. Because SciPy's sparse matrices + must be 2-d, only two input sequences are allowed when `sparse` is + True. Default is False. + + Returns + ------- + res : CrosstabResult + An object containing the following attributes: + + elements : tuple of numpy.ndarrays. + Tuple of length ``len(args)`` containing the arrays of elements + that are counted in `count`. These can be interpreted as the + labels of the corresponding dimensions of `count`. If `levels` was + given, then if ``levels[i]`` is not None, ``elements[i]`` will + hold the values given in ``levels[i]``. + count : numpy.ndarray or scipy.sparse.coo_matrix + Counts of the unique elements in ``zip(*args)``, stored in an + array. Also known as a *contingency table* when ``len(args) > 1``. + + See Also + -------- + numpy.unique + + Notes + ----- + .. versionadded:: 1.7.0 + + References + ---------- + .. [1] "Contingency table", http://en.wikipedia.org/wiki/Contingency_table + + Examples + -------- + >>> from scipy.stats.contingency import crosstab + + Given the lists `a` and `x`, create a contingency table that counts the + frequencies of the corresponding pairs. + + >>> a = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B'] + >>> x = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z'] + >>> res = crosstab(a, x) + >>> avals, xvals = res.elements + >>> avals + array(['A', 'B'], dtype='>> xvals + array(['X', 'Y', 'Z'], dtype='>> res.count + array([[2, 3, 0], + [1, 0, 4]]) + + So `('A', 'X')` occurs twice, `('A', 'Y')` occurs three times, etc. + + Higher dimensional contingency tables can be created. + + >>> p = [0, 0, 0, 0, 1, 1, 1, 0, 0, 1] + >>> res = crosstab(a, x, p) + >>> res.count + array([[[2, 0], + [2, 1], + [0, 0]], + [[1, 0], + [0, 0], + [1, 3]]]) + >>> res.count.shape + (2, 3, 2) + + The values to be counted can be set by using the `levels` argument. + It allows the elements of interest in each input sequence to be + given explicitly instead finding the unique elements of the sequence. + + For example, suppose one of the arguments is an array containing the + answers to a survey question, with integer values 1 to 4. Even if the + value 1 does not occur in the data, we want an entry for it in the table. + + >>> q1 = [2, 3, 3, 2, 4, 4, 2, 3, 4, 4, 4, 3, 3, 3, 4] # 1 does not occur. + >>> q2 = [4, 4, 2, 2, 2, 4, 1, 1, 2, 2, 4, 2, 2, 2, 4] # 3 does not occur. + >>> options = [1, 2, 3, 4] + >>> res = crosstab(q1, q2, levels=(options, options)) + >>> res.count + array([[0, 0, 0, 0], + [1, 1, 0, 1], + [1, 4, 0, 1], + [0, 3, 0, 3]]) + + If `levels` is given, but an element of `levels` is None, the unique values + of the corresponding argument are used. For example, + + >>> res = crosstab(q1, q2, levels=(None, options)) + >>> res.elements + [array([2, 3, 4]), [1, 2, 3, 4]] + >>> res.count + array([[1, 1, 0, 1], + [1, 4, 0, 1], + [0, 3, 0, 3]]) + + If we want to ignore the pairs where 4 occurs in ``q2``, we can + give just the values [1, 2] to `levels`, and the 4 will be ignored: + + >>> res = crosstab(q1, q2, levels=(None, [1, 2])) + >>> res.elements + [array([2, 3, 4]), [1, 2]] + >>> res.count + array([[1, 1], + [1, 4], + [0, 3]]) + + Finally, let's repeat the first example, but return a sparse matrix: + + >>> res = crosstab(a, x, sparse=True) + >>> res.count + + >>> res.count.toarray() + array([[2, 3, 0], + [1, 0, 4]]) + + """ + nargs = len(args) + if nargs == 0: + raise TypeError("At least one input sequence is required.") + + len0 = len(args[0]) + if not all(len(a) == len0 for a in args[1:]): + raise ValueError("All input sequences must have the same length.") + + if sparse and nargs != 2: + raise ValueError("When `sparse` is True, only two input sequences " + "are allowed.") + + if levels is None: + # Call np.unique with return_inverse=True on each argument. + actual_levels, indices = zip(*[np.unique(a, return_inverse=True) + for a in args]) + else: + # `levels` is not None... + if len(levels) != nargs: + raise ValueError('len(levels) must equal the number of input ' + 'sequences') + + args = [np.asarray(arg) for arg in args] + mask = np.zeros((nargs, len0), dtype=np.bool_) + inv = np.zeros((nargs, len0), dtype=np.intp) + actual_levels = [] + for k, (levels_list, arg) in enumerate(zip(levels, args)): + if levels_list is None: + levels_list, inv[k, :] = np.unique(arg, return_inverse=True) + mask[k, :] = True + else: + q = arg == np.asarray(levels_list).reshape(-1, 1) + mask[k, :] = np.any(q, axis=0) + qnz = q.T.nonzero() + inv[k, qnz[0]] = qnz[1] + actual_levels.append(levels_list) + + mask_all = mask.all(axis=0) + indices = tuple(inv[:, mask_all]) + + if sparse: + count = coo_matrix((np.ones(len(indices[0]), dtype=int), + (indices[0], indices[1]))) + count.sum_duplicates() + else: + shape = [len(u) for u in actual_levels] + count = np.zeros(shape, dtype=int) + np.add.at(count, indices, 1) + + return CrosstabResult(actual_levels, count) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_discrete_distns.py b/parrot/lib/python3.10/site-packages/scipy/stats/_discrete_distns.py new file mode 100644 index 0000000000000000000000000000000000000000..f7721f5c3a03bd05b9027240056c177eb53a36b4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_discrete_distns.py @@ -0,0 +1,1922 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +from functools import partial + +from scipy import special +from scipy.special import entr, logsumexp, betaln, gammaln as gamln, zeta +from scipy._lib._util import _lazywhere, rng_integers +from scipy.interpolate import interp1d + +from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh + +import numpy as np + +from ._distn_infrastructure import (rv_discrete, get_distribution_names, + _vectorize_rvs_over_shapes, + _ShapeInfo, _isintegral) +from ._biasedurn import (_PyFishersNCHypergeometric, + _PyWalleniusNCHypergeometric, + _PyStochasticLib3) +import scipy.special._ufuncs as scu + + + +class binom_gen(rv_discrete): + r"""A binomial discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `binom` is: + + .. math:: + + f(k) = \binom{n}{k} p^k (1-p)^{n-k} + + for :math:`k \in \{0, 1, \dots, n\}`, :math:`0 \leq p \leq 1` + + `binom` takes :math:`n` and :math:`p` as shape parameters, + where :math:`p` is the probability of a single success + and :math:`1-p` is the probability of a single failure. + + %(after_notes)s + + %(example)s + + See Also + -------- + hypergeom, nbinom, nhypergeom + + """ + def _shape_info(self): + return [_ShapeInfo("n", True, (0, np.inf), (True, False)), + _ShapeInfo("p", False, (0, 1), (True, True))] + + def _rvs(self, n, p, size=None, random_state=None): + return random_state.binomial(n, p, size) + + def _argcheck(self, n, p): + return (n >= 0) & _isintegral(n) & (p >= 0) & (p <= 1) + + def _get_support(self, n, p): + return self.a, n + + def _logpmf(self, x, n, p): + k = floor(x) + combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1))) + return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p) + + def _pmf(self, x, n, p): + # binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k) + return scu._binom_pmf(x, n, p) + + def _cdf(self, x, n, p): + k = floor(x) + return scu._binom_cdf(k, n, p) + + def _sf(self, x, n, p): + k = floor(x) + return scu._binom_sf(k, n, p) + + def _isf(self, x, n, p): + return scu._binom_isf(x, n, p) + + def _ppf(self, q, n, p): + return scu._binom_ppf(q, n, p) + + def _stats(self, n, p, moments='mv'): + mu = n * p + var = mu - n * np.square(p) + g1, g2 = None, None + if 's' in moments: + pq = p - np.square(p) + npq_sqrt = np.sqrt(n * pq) + t1 = np.reciprocal(npq_sqrt) + t2 = (2.0 * p) / npq_sqrt + g1 = t1 - t2 + if 'k' in moments: + pq = p - np.square(p) + npq = n * pq + t1 = np.reciprocal(npq) + t2 = 6.0/n + g2 = t1 - t2 + return mu, var, g1, g2 + + def _entropy(self, n, p): + k = np.r_[0:n + 1] + vals = self._pmf(k, n, p) + return np.sum(entr(vals), axis=0) + + +binom = binom_gen(name='binom') + + +class bernoulli_gen(binom_gen): + r"""A Bernoulli discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `bernoulli` is: + + .. math:: + + f(k) = \begin{cases}1-p &\text{if } k = 0\\ + p &\text{if } k = 1\end{cases} + + for :math:`k` in :math:`\{0, 1\}`, :math:`0 \leq p \leq 1` + + `bernoulli` takes :math:`p` as shape parameter, + where :math:`p` is the probability of a single success + and :math:`1-p` is the probability of a single failure. + + %(after_notes)s + + %(example)s + + """ + def _shape_info(self): + return [_ShapeInfo("p", False, (0, 1), (True, True))] + + def _rvs(self, p, size=None, random_state=None): + return binom_gen._rvs(self, 1, p, size=size, random_state=random_state) + + def _argcheck(self, p): + return (p >= 0) & (p <= 1) + + def _get_support(self, p): + # Overrides binom_gen._get_support!x + return self.a, self.b + + def _logpmf(self, x, p): + return binom._logpmf(x, 1, p) + + def _pmf(self, x, p): + # bernoulli.pmf(k) = 1-p if k = 0 + # = p if k = 1 + return binom._pmf(x, 1, p) + + def _cdf(self, x, p): + return binom._cdf(x, 1, p) + + def _sf(self, x, p): + return binom._sf(x, 1, p) + + def _isf(self, x, p): + return binom._isf(x, 1, p) + + def _ppf(self, q, p): + return binom._ppf(q, 1, p) + + def _stats(self, p): + return binom._stats(1, p) + + def _entropy(self, p): + return entr(p) + entr(1-p) + + +bernoulli = bernoulli_gen(b=1, name='bernoulli') + + +class betabinom_gen(rv_discrete): + r"""A beta-binomial discrete random variable. + + %(before_notes)s + + Notes + ----- + The beta-binomial distribution is a binomial distribution with a + probability of success `p` that follows a beta distribution. + + The probability mass function for `betabinom` is: + + .. math:: + + f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)} + + for :math:`k \in \{0, 1, \dots, n\}`, :math:`n \geq 0`, :math:`a > 0`, + :math:`b > 0`, where :math:`B(a, b)` is the beta function. + + `betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution + + %(after_notes)s + + .. versionadded:: 1.4.0 + + See Also + -------- + beta, binom + + %(example)s + + """ + def _shape_info(self): + return [_ShapeInfo("n", True, (0, np.inf), (True, False)), + _ShapeInfo("a", False, (0, np.inf), (False, False)), + _ShapeInfo("b", False, (0, np.inf), (False, False))] + + def _rvs(self, n, a, b, size=None, random_state=None): + p = random_state.beta(a, b, size) + return random_state.binomial(n, p, size) + + def _get_support(self, n, a, b): + return 0, n + + def _argcheck(self, n, a, b): + return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0) + + def _logpmf(self, x, n, a, b): + k = floor(x) + combiln = -log(n + 1) - betaln(n - k + 1, k + 1) + return combiln + betaln(k + a, n - k + b) - betaln(a, b) + + def _pmf(self, x, n, a, b): + return exp(self._logpmf(x, n, a, b)) + + def _stats(self, n, a, b, moments='mv'): + e_p = a / (a + b) + e_q = 1 - e_p + mu = n * e_p + var = n * (a + b + n) * e_p * e_q / (a + b + 1) + g1, g2 = None, None + if 's' in moments: + g1 = 1.0 / sqrt(var) + g1 *= (a + b + 2 * n) * (b - a) + g1 /= (a + b + 2) * (a + b) + if 'k' in moments: + g2 = (a + b).astype(e_p.dtype) + g2 *= (a + b - 1 + 6 * n) + g2 += 3 * a * b * (n - 2) + g2 += 6 * n ** 2 + g2 -= 3 * e_p * b * n * (6 - n) + g2 -= 18 * e_p * e_q * n ** 2 + g2 *= (a + b) ** 2 * (1 + a + b) + g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n)) + g2 -= 3 + return mu, var, g1, g2 + + +betabinom = betabinom_gen(name='betabinom') + + +class nbinom_gen(rv_discrete): + r"""A negative binomial discrete random variable. + + %(before_notes)s + + Notes + ----- + Negative binomial distribution describes a sequence of i.i.d. Bernoulli + trials, repeated until a predefined, non-random number of successes occurs. + + The probability mass function of the number of failures for `nbinom` is: + + .. math:: + + f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k + + for :math:`k \ge 0`, :math:`0 < p \leq 1` + + `nbinom` takes :math:`n` and :math:`p` as shape parameters where :math:`n` + is the number of successes, :math:`p` is the probability of a single + success, and :math:`1-p` is the probability of a single failure. + + Another common parameterization of the negative binomial distribution is + in terms of the mean number of failures :math:`\mu` to achieve :math:`n` + successes. The mean :math:`\mu` is related to the probability of success + as + + .. math:: + + p = \frac{n}{n + \mu} + + The number of successes :math:`n` may also be specified in terms of a + "dispersion", "heterogeneity", or "aggregation" parameter :math:`\alpha`, + which relates the mean :math:`\mu` to the variance :math:`\sigma^2`, + e.g. :math:`\sigma^2 = \mu + \alpha \mu^2`. Regardless of the convention + used for :math:`\alpha`, + + .. math:: + + p &= \frac{\mu}{\sigma^2} \\ + n &= \frac{\mu^2}{\sigma^2 - \mu} + + %(after_notes)s + + %(example)s + + See Also + -------- + hypergeom, binom, nhypergeom + + """ + def _shape_info(self): + return [_ShapeInfo("n", True, (0, np.inf), (True, False)), + _ShapeInfo("p", False, (0, 1), (True, True))] + + def _rvs(self, n, p, size=None, random_state=None): + return random_state.negative_binomial(n, p, size) + + def _argcheck(self, n, p): + return (n > 0) & (p > 0) & (p <= 1) + + def _pmf(self, x, n, p): + # nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k + return scu._nbinom_pmf(x, n, p) + + def _logpmf(self, x, n, p): + coeff = gamln(n+x) - gamln(x+1) - gamln(n) + return coeff + n*log(p) + special.xlog1py(x, -p) + + def _cdf(self, x, n, p): + k = floor(x) + return scu._nbinom_cdf(k, n, p) + + def _logcdf(self, x, n, p): + k = floor(x) + k, n, p = np.broadcast_arrays(k, n, p) + cdf = self._cdf(k, n, p) + cond = cdf > 0.5 + def f1(k, n, p): + return np.log1p(-special.betainc(k + 1, n, 1 - p)) + + # do calc in place + logcdf = cdf + with np.errstate(divide='ignore'): + logcdf[cond] = f1(k[cond], n[cond], p[cond]) + logcdf[~cond] = np.log(cdf[~cond]) + return logcdf + + def _sf(self, x, n, p): + k = floor(x) + return scu._nbinom_sf(k, n, p) + + def _isf(self, x, n, p): + with np.errstate(over='ignore'): # see gh-17432 + return scu._nbinom_isf(x, n, p) + + def _ppf(self, q, n, p): + with np.errstate(over='ignore'): # see gh-17432 + return scu._nbinom_ppf(q, n, p) + + def _stats(self, n, p): + return ( + scu._nbinom_mean(n, p), + scu._nbinom_variance(n, p), + scu._nbinom_skewness(n, p), + scu._nbinom_kurtosis_excess(n, p), + ) + + +nbinom = nbinom_gen(name='nbinom') + + +class betanbinom_gen(rv_discrete): + r"""A beta-negative-binomial discrete random variable. + + %(before_notes)s + + Notes + ----- + The beta-negative-binomial distribution is a negative binomial + distribution with a probability of success `p` that follows a + beta distribution. + + The probability mass function for `betanbinom` is: + + .. math:: + + f(k) = \binom{n + k - 1}{k} \frac{B(a + n, b + k)}{B(a, b)} + + for :math:`k \ge 0`, :math:`n \geq 0`, :math:`a > 0`, + :math:`b > 0`, where :math:`B(a, b)` is the beta function. + + `betanbinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Beta_negative_binomial_distribution + + %(after_notes)s + + .. versionadded:: 1.12.0 + + See Also + -------- + betabinom : Beta binomial distribution + + %(example)s + + """ + def _shape_info(self): + return [_ShapeInfo("n", True, (0, np.inf), (True, False)), + _ShapeInfo("a", False, (0, np.inf), (False, False)), + _ShapeInfo("b", False, (0, np.inf), (False, False))] + + def _rvs(self, n, a, b, size=None, random_state=None): + p = random_state.beta(a, b, size) + return random_state.negative_binomial(n, p, size) + + def _argcheck(self, n, a, b): + return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0) + + def _logpmf(self, x, n, a, b): + k = floor(x) + combiln = -np.log(n + k) - betaln(n, k + 1) + return combiln + betaln(a + n, b + k) - betaln(a, b) + + def _pmf(self, x, n, a, b): + return exp(self._logpmf(x, n, a, b)) + + def _stats(self, n, a, b, moments='mv'): + # reference: Wolfram Alpha input + # BetaNegativeBinomialDistribution[a, b, n] + def mean(n, a, b): + return n * b / (a - 1.) + mu = _lazywhere(a > 1, (n, a, b), f=mean, fillvalue=np.inf) + def var(n, a, b): + return (n * b * (n + a - 1.) * (a + b - 1.) + / ((a - 2.) * (a - 1.)**2.)) + var = _lazywhere(a > 2, (n, a, b), f=var, fillvalue=np.inf) + g1, g2 = None, None + def skew(n, a, b): + return ((2 * n + a - 1.) * (2 * b + a - 1.) + / (a - 3.) / sqrt(n * b * (n + a - 1.) * (b + a - 1.) + / (a - 2.))) + if 's' in moments: + g1 = _lazywhere(a > 3, (n, a, b), f=skew, fillvalue=np.inf) + def kurtosis(n, a, b): + term = (a - 2.) + term_2 = ((a - 1.)**2. * (a**2. + a * (6 * b - 1.) + + 6. * (b - 1.) * b) + + 3. * n**2. * ((a + 5.) * b**2. + (a + 5.) + * (a - 1.) * b + 2. * (a - 1.)**2) + + 3 * (a - 1.) * n + * ((a + 5.) * b**2. + (a + 5.) * (a - 1.) * b + + 2. * (a - 1.)**2.)) + denominator = ((a - 4.) * (a - 3.) * b * n + * (a + b - 1.) * (a + n - 1.)) + # Wolfram Alpha uses Pearson kurtosis, so we substract 3 to get + # scipy's Fisher kurtosis + return term * term_2 / denominator - 3. + if 'k' in moments: + g2 = _lazywhere(a > 4, (n, a, b), f=kurtosis, fillvalue=np.inf) + return mu, var, g1, g2 + + +betanbinom = betanbinom_gen(name='betanbinom') + + +class geom_gen(rv_discrete): + r"""A geometric discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `geom` is: + + .. math:: + + f(k) = (1-p)^{k-1} p + + for :math:`k \ge 1`, :math:`0 < p \leq 1` + + `geom` takes :math:`p` as shape parameter, + where :math:`p` is the probability of a single success + and :math:`1-p` is the probability of a single failure. + + %(after_notes)s + + See Also + -------- + planck + + %(example)s + + """ + + def _shape_info(self): + return [_ShapeInfo("p", False, (0, 1), (True, True))] + + def _rvs(self, p, size=None, random_state=None): + return random_state.geometric(p, size=size) + + def _argcheck(self, p): + return (p <= 1) & (p > 0) + + def _pmf(self, k, p): + return np.power(1-p, k-1) * p + + def _logpmf(self, k, p): + return special.xlog1py(k - 1, -p) + log(p) + + def _cdf(self, x, p): + k = floor(x) + return -expm1(log1p(-p)*k) + + def _sf(self, x, p): + return np.exp(self._logsf(x, p)) + + def _logsf(self, x, p): + k = floor(x) + return k*log1p(-p) + + def _ppf(self, q, p): + vals = ceil(log1p(-q) / log1p(-p)) + temp = self._cdf(vals-1, p) + return np.where((temp >= q) & (vals > 0), vals-1, vals) + + def _stats(self, p): + mu = 1.0/p + qr = 1.0-p + var = qr / p / p + g1 = (2.0-p) / sqrt(qr) + g2 = np.polyval([1, -6, 6], p)/(1.0-p) + return mu, var, g1, g2 + + def _entropy(self, p): + return -np.log(p) - np.log1p(-p) * (1.0-p) / p + + +geom = geom_gen(a=1, name='geom', longname="A geometric") + + +class hypergeom_gen(rv_discrete): + r"""A hypergeometric discrete random variable. + + The hypergeometric distribution models drawing objects from a bin. + `M` is the total number of objects, `n` is total number of Type I objects. + The random variate represents the number of Type I objects in `N` drawn + without replacement from the total population. + + %(before_notes)s + + Notes + ----- + The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not + universally accepted. See the Examples for a clarification of the + definitions used here. + + The probability mass function is defined as, + + .. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}} + {\binom{M}{N}} + + for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial + coefficients are defined as, + + .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. + + %(after_notes)s + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import hypergeom + >>> import matplotlib.pyplot as plt + + Suppose we have a collection of 20 animals, of which 7 are dogs. Then if + we want to know the probability of finding a given number of dogs if we + choose at random 12 of the 20 animals, we can initialize a frozen + distribution and plot the probability mass function: + + >>> [M, n, N] = [20, 7, 12] + >>> rv = hypergeom(M, n, N) + >>> x = np.arange(0, n+1) + >>> pmf_dogs = rv.pmf(x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, pmf_dogs, 'bo') + >>> ax.vlines(x, 0, pmf_dogs, lw=2) + >>> ax.set_xlabel('# of dogs in our group of chosen animals') + >>> ax.set_ylabel('hypergeom PMF') + >>> plt.show() + + Instead of using a frozen distribution we can also use `hypergeom` + methods directly. To for example obtain the cumulative distribution + function, use: + + >>> prb = hypergeom.cdf(x, M, n, N) + + And to generate random numbers: + + >>> R = hypergeom.rvs(M, n, N, size=10) + + See Also + -------- + nhypergeom, binom, nbinom + + """ + def _shape_info(self): + return [_ShapeInfo("M", True, (0, np.inf), (True, False)), + _ShapeInfo("n", True, (0, np.inf), (True, False)), + _ShapeInfo("N", True, (0, np.inf), (True, False))] + + def _rvs(self, M, n, N, size=None, random_state=None): + return random_state.hypergeometric(n, M-n, N, size=size) + + def _get_support(self, M, n, N): + return np.maximum(N-(M-n), 0), np.minimum(n, N) + + def _argcheck(self, M, n, N): + cond = (M > 0) & (n >= 0) & (N >= 0) + cond &= (n <= M) & (N <= M) + cond &= _isintegral(M) & _isintegral(n) & _isintegral(N) + return cond + + def _logpmf(self, k, M, n, N): + tot, good = M, n + bad = tot - good + result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) - + betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) - + betaln(tot+1, 1)) + return result + + def _pmf(self, k, M, n, N): + return scu._hypergeom_pmf(k, n, N, M) + + def _cdf(self, k, M, n, N): + return scu._hypergeom_cdf(k, n, N, M) + + def _stats(self, M, n, N): + M, n, N = 1. * M, 1. * n, 1. * N + m = M - n + + # Boost kurtosis_excess doesn't return the same as the value + # computed here. + g2 = M * (M + 1) - 6. * N * (M - N) - 6. * n * m + g2 *= (M - 1) * M * M + g2 += 6. * n * N * (M - N) * m * (5. * M - 6) + g2 /= n * N * (M - N) * m * (M - 2.) * (M - 3.) + return ( + scu._hypergeom_mean(n, N, M), + scu._hypergeom_variance(n, N, M), + scu._hypergeom_skewness(n, N, M), + g2, + ) + + def _entropy(self, M, n, N): + k = np.r_[N - (M - n):min(n, N) + 1] + vals = self.pmf(k, M, n, N) + return np.sum(entr(vals), axis=0) + + def _sf(self, k, M, n, N): + return scu._hypergeom_sf(k, n, N, M) + + def _logsf(self, k, M, n, N): + res = [] + for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)): + if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5): + # Less terms to sum if we calculate log(1-cdf) + res.append(log1p(-exp(self.logcdf(quant, tot, good, draw)))) + else: + # Integration over probability mass function using logsumexp + k2 = np.arange(quant + 1, draw + 1) + res.append(logsumexp(self._logpmf(k2, tot, good, draw))) + return np.asarray(res) + + def _logcdf(self, k, M, n, N): + res = [] + for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)): + if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5): + # Less terms to sum if we calculate log(1-sf) + res.append(log1p(-exp(self.logsf(quant, tot, good, draw)))) + else: + # Integration over probability mass function using logsumexp + k2 = np.arange(0, quant + 1) + res.append(logsumexp(self._logpmf(k2, tot, good, draw))) + return np.asarray(res) + + +hypergeom = hypergeom_gen(name='hypergeom') + + +class nhypergeom_gen(rv_discrete): + r"""A negative hypergeometric discrete random variable. + + Consider a box containing :math:`M` balls:, :math:`n` red and + :math:`M-n` blue. We randomly sample balls from the box, one + at a time and *without* replacement, until we have picked :math:`r` + blue balls. `nhypergeom` is the distribution of the number of + red balls :math:`k` we have picked. + + %(before_notes)s + + Notes + ----- + The symbols used to denote the shape parameters (`M`, `n`, and `r`) are not + universally accepted. See the Examples for a clarification of the + definitions used here. + + The probability mass function is defined as, + + .. math:: f(k; M, n, r) = \frac{{{k+r-1}\choose{k}}{{M-r-k}\choose{n-k}}} + {{M \choose n}} + + for :math:`k \in [0, n]`, :math:`n \in [0, M]`, :math:`r \in [0, M-n]`, + and the binomial coefficient is: + + .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. + + It is equivalent to observing :math:`k` successes in :math:`k+r-1` + samples with :math:`k+r`'th sample being a failure. The former + can be modelled as a hypergeometric distribution. The probability + of the latter is simply the number of failures remaining + :math:`M-n-(r-1)` divided by the size of the remaining population + :math:`M-(k+r-1)`. This relationship can be shown as: + + .. math:: NHG(k;M,n,r) = HG(k;M,n,k+r-1)\frac{(M-n-(r-1))}{(M-(k+r-1))} + + where :math:`NHG` is probability mass function (PMF) of the + negative hypergeometric distribution and :math:`HG` is the + PMF of the hypergeometric distribution. + + %(after_notes)s + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import nhypergeom + >>> import matplotlib.pyplot as plt + + Suppose we have a collection of 20 animals, of which 7 are dogs. + Then if we want to know the probability of finding a given number + of dogs (successes) in a sample with exactly 12 animals that + aren't dogs (failures), we can initialize a frozen distribution + and plot the probability mass function: + + >>> M, n, r = [20, 7, 12] + >>> rv = nhypergeom(M, n, r) + >>> x = np.arange(0, n+2) + >>> pmf_dogs = rv.pmf(x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, pmf_dogs, 'bo') + >>> ax.vlines(x, 0, pmf_dogs, lw=2) + >>> ax.set_xlabel('# of dogs in our group with given 12 failures') + >>> ax.set_ylabel('nhypergeom PMF') + >>> plt.show() + + Instead of using a frozen distribution we can also use `nhypergeom` + methods directly. To for example obtain the probability mass + function, use: + + >>> prb = nhypergeom.pmf(x, M, n, r) + + And to generate random numbers: + + >>> R = nhypergeom.rvs(M, n, r, size=10) + + To verify the relationship between `hypergeom` and `nhypergeom`, use: + + >>> from scipy.stats import hypergeom, nhypergeom + >>> M, n, r = 45, 13, 8 + >>> k = 6 + >>> nhypergeom.pmf(k, M, n, r) + 0.06180776620271643 + >>> hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1)) + 0.06180776620271644 + + See Also + -------- + hypergeom, binom, nbinom + + References + ---------- + .. [1] Negative Hypergeometric Distribution on Wikipedia + https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution + + .. [2] Negative Hypergeometric Distribution from + http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Negativehypergeometric.pdf + + """ + + def _shape_info(self): + return [_ShapeInfo("M", True, (0, np.inf), (True, False)), + _ShapeInfo("n", True, (0, np.inf), (True, False)), + _ShapeInfo("r", True, (0, np.inf), (True, False))] + + def _get_support(self, M, n, r): + return 0, n + + def _argcheck(self, M, n, r): + cond = (n >= 0) & (n <= M) & (r >= 0) & (r <= M-n) + cond &= _isintegral(M) & _isintegral(n) & _isintegral(r) + return cond + + def _rvs(self, M, n, r, size=None, random_state=None): + + @_vectorize_rvs_over_shapes + def _rvs1(M, n, r, size, random_state): + # invert cdf by calculating all values in support, scalar M, n, r + a, b = self.support(M, n, r) + ks = np.arange(a, b+1) + cdf = self.cdf(ks, M, n, r) + ppf = interp1d(cdf, ks, kind='next', fill_value='extrapolate') + rvs = ppf(random_state.uniform(size=size)).astype(int) + if size is None: + return rvs.item() + return rvs + + return _rvs1(M, n, r, size=size, random_state=random_state) + + def _logpmf(self, k, M, n, r): + cond = ((r == 0) & (k == 0)) + result = _lazywhere(~cond, (k, M, n, r), + lambda k, M, n, r: + (-betaln(k+1, r) + betaln(k+r, 1) - + betaln(n-k+1, M-r-n+1) + betaln(M-r-k+1, 1) + + betaln(n+1, M-n+1) - betaln(M+1, 1)), + fillvalue=0.0) + return result + + def _pmf(self, k, M, n, r): + # same as the following but numerically more precise + # return comb(k+r-1, k) * comb(M-r-k, n-k) / comb(M, n) + return exp(self._logpmf(k, M, n, r)) + + def _stats(self, M, n, r): + # Promote the datatype to at least float + # mu = rn / (M-n+1) + M, n, r = 1.*M, 1.*n, 1.*r + mu = r*n / (M-n+1) + + var = r*(M+1)*n / ((M-n+1)*(M-n+2)) * (1 - r / (M-n+1)) + + # The skew and kurtosis are mathematically + # intractable so return `None`. See [2]_. + g1, g2 = None, None + return mu, var, g1, g2 + + +nhypergeom = nhypergeom_gen(name='nhypergeom') + + +# FIXME: Fails _cdfvec +class logser_gen(rv_discrete): + r"""A Logarithmic (Log-Series, Series) discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `logser` is: + + .. math:: + + f(k) = - \frac{p^k}{k \log(1-p)} + + for :math:`k \ge 1`, :math:`0 < p < 1` + + `logser` takes :math:`p` as shape parameter, + where :math:`p` is the probability of a single success + and :math:`1-p` is the probability of a single failure. + + %(after_notes)s + + %(example)s + + """ + + def _shape_info(self): + return [_ShapeInfo("p", False, (0, 1), (True, True))] + + def _rvs(self, p, size=None, random_state=None): + # looks wrong for p>0.5, too few k=1 + # trying to use generic is worse, no k=1 at all + return random_state.logseries(p, size=size) + + def _argcheck(self, p): + return (p > 0) & (p < 1) + + def _pmf(self, k, p): + # logser.pmf(k) = - p**k / (k*log(1-p)) + return -np.power(p, k) * 1.0 / k / special.log1p(-p) + + def _stats(self, p): + r = special.log1p(-p) + mu = p / (p - 1.0) / r + mu2p = -p / r / (p - 1.0)**2 + var = mu2p - mu*mu + mu3p = -p / r * (1.0+p) / (1.0 - p)**3 + mu3 = mu3p - 3*mu*mu2p + 2*mu**3 + g1 = mu3 / np.power(var, 1.5) + + mu4p = -p / r * ( + 1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4) + mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 + g2 = mu4 / var**2 - 3.0 + return mu, var, g1, g2 + + +logser = logser_gen(a=1, name='logser', longname='A logarithmic') + + +class poisson_gen(rv_discrete): + r"""A Poisson discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `poisson` is: + + .. math:: + + f(k) = \exp(-\mu) \frac{\mu^k}{k!} + + for :math:`k \ge 0`. + + `poisson` takes :math:`\mu \geq 0` as shape parameter. + When :math:`\mu = 0`, the ``pmf`` method + returns ``1.0`` at quantile :math:`k = 0`. + + %(after_notes)s + + %(example)s + + """ + + def _shape_info(self): + return [_ShapeInfo("mu", False, (0, np.inf), (True, False))] + + # Override rv_discrete._argcheck to allow mu=0. + def _argcheck(self, mu): + return mu >= 0 + + def _rvs(self, mu, size=None, random_state=None): + return random_state.poisson(mu, size) + + def _logpmf(self, k, mu): + Pk = special.xlogy(k, mu) - gamln(k + 1) - mu + return Pk + + def _pmf(self, k, mu): + # poisson.pmf(k) = exp(-mu) * mu**k / k! + return exp(self._logpmf(k, mu)) + + def _cdf(self, x, mu): + k = floor(x) + return special.pdtr(k, mu) + + def _sf(self, x, mu): + k = floor(x) + return special.pdtrc(k, mu) + + def _ppf(self, q, mu): + vals = ceil(special.pdtrik(q, mu)) + vals1 = np.maximum(vals - 1, 0) + temp = special.pdtr(vals1, mu) + return np.where(temp >= q, vals1, vals) + + def _stats(self, mu): + var = mu + tmp = np.asarray(mu) + mu_nonzero = tmp > 0 + g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf) + g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf) + return mu, var, g1, g2 + + +poisson = poisson_gen(name="poisson", longname='A Poisson') + + +class planck_gen(rv_discrete): + r"""A Planck discrete exponential random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `planck` is: + + .. math:: + + f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) + + for :math:`k \ge 0` and :math:`\lambda > 0`. + + `planck` takes :math:`\lambda` as shape parameter. The Planck distribution + can be written as a geometric distribution (`geom`) with + :math:`p = 1 - \exp(-\lambda)` shifted by ``loc = -1``. + + %(after_notes)s + + See Also + -------- + geom + + %(example)s + + """ + def _shape_info(self): + return [_ShapeInfo("lambda", False, (0, np.inf), (False, False))] + + def _argcheck(self, lambda_): + return lambda_ > 0 + + def _pmf(self, k, lambda_): + return -expm1(-lambda_)*exp(-lambda_*k) + + def _cdf(self, x, lambda_): + k = floor(x) + return -expm1(-lambda_*(k+1)) + + def _sf(self, x, lambda_): + return exp(self._logsf(x, lambda_)) + + def _logsf(self, x, lambda_): + k = floor(x) + return -lambda_*(k+1) + + def _ppf(self, q, lambda_): + vals = ceil(-1.0/lambda_ * log1p(-q)-1) + vals1 = (vals-1).clip(*(self._get_support(lambda_))) + temp = self._cdf(vals1, lambda_) + return np.where(temp >= q, vals1, vals) + + def _rvs(self, lambda_, size=None, random_state=None): + # use relation to geometric distribution for sampling + p = -expm1(-lambda_) + return random_state.geometric(p, size=size) - 1.0 + + def _stats(self, lambda_): + mu = 1/expm1(lambda_) + var = exp(-lambda_)/(expm1(-lambda_))**2 + g1 = 2*cosh(lambda_/2.0) + g2 = 4+2*cosh(lambda_) + return mu, var, g1, g2 + + def _entropy(self, lambda_): + C = -expm1(-lambda_) + return lambda_*exp(-lambda_)/C - log(C) + + +planck = planck_gen(a=0, name='planck', longname='A discrete exponential ') + + +class boltzmann_gen(rv_discrete): + r"""A Boltzmann (Truncated Discrete Exponential) random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `boltzmann` is: + + .. math:: + + f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N)) + + for :math:`k = 0,..., N-1`. + + `boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _shape_info(self): + return [_ShapeInfo("lambda_", False, (0, np.inf), (False, False)), + _ShapeInfo("N", True, (0, np.inf), (False, False))] + + def _argcheck(self, lambda_, N): + return (lambda_ > 0) & (N > 0) & _isintegral(N) + + def _get_support(self, lambda_, N): + return self.a, N - 1 + + def _pmf(self, k, lambda_, N): + # boltzmann.pmf(k) = + # (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N)) + fact = (1-exp(-lambda_))/(1-exp(-lambda_*N)) + return fact*exp(-lambda_*k) + + def _cdf(self, x, lambda_, N): + k = floor(x) + return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N)) + + def _ppf(self, q, lambda_, N): + qnew = q*(1-exp(-lambda_*N)) + vals = ceil(-1.0/lambda_ * log(1-qnew)-1) + vals1 = (vals-1).clip(0.0, np.inf) + temp = self._cdf(vals1, lambda_, N) + return np.where(temp >= q, vals1, vals) + + def _stats(self, lambda_, N): + z = exp(-lambda_) + zN = exp(-lambda_*N) + mu = z/(1.0-z)-N*zN/(1-zN) + var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2 + trm = (1-zN)/(1-z) + trm2 = (z*trm**2 - N*N*zN) + g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN) + g1 = g1 / trm2**(1.5) + g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN) + g2 = g2 / trm2 / trm2 + return mu, var, g1, g2 + + +boltzmann = boltzmann_gen(name='boltzmann', a=0, + longname='A truncated discrete exponential ') + + +class randint_gen(rv_discrete): + r"""A uniform discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `randint` is: + + .. math:: + + f(k) = \frac{1}{\texttt{high} - \texttt{low}} + + for :math:`k \in \{\texttt{low}, \dots, \texttt{high} - 1\}`. + + `randint` takes :math:`\texttt{low}` and :math:`\texttt{high}` as shape + parameters. + + %(after_notes)s + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import randint + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + + Calculate the first four moments: + + >>> low, high = 7, 31 + >>> mean, var, skew, kurt = randint.stats(low, high, moments='mvsk') + + Display the probability mass function (``pmf``): + + >>> x = np.arange(low - 5, high + 5) + >>> ax.plot(x, randint.pmf(x, low, high), 'bo', ms=8, label='randint pmf') + >>> ax.vlines(x, 0, randint.pmf(x, low, high), colors='b', lw=5, alpha=0.5) + + Alternatively, the distribution object can be called (as a function) to + fix the shape and location. This returns a "frozen" RV object holding the + given parameters fixed. + + Freeze the distribution and display the frozen ``pmf``: + + >>> rv = randint(low, high) + >>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', + ... lw=1, label='frozen pmf') + >>> ax.legend(loc='lower center') + >>> plt.show() + + Check the relationship between the cumulative distribution function + (``cdf``) and its inverse, the percent point function (``ppf``): + + >>> q = np.arange(low, high) + >>> p = randint.cdf(q, low, high) + >>> np.allclose(q, randint.ppf(p, low, high)) + True + + Generate random numbers: + + >>> r = randint.rvs(low, high, size=1000) + + """ + + def _shape_info(self): + return [_ShapeInfo("low", True, (-np.inf, np.inf), (False, False)), + _ShapeInfo("high", True, (-np.inf, np.inf), (False, False))] + + def _argcheck(self, low, high): + return (high > low) & _isintegral(low) & _isintegral(high) + + def _get_support(self, low, high): + return low, high-1 + + def _pmf(self, k, low, high): + # randint.pmf(k) = 1./(high - low) + p = np.ones_like(k) / (high - low) + return np.where((k >= low) & (k < high), p, 0.) + + def _cdf(self, x, low, high): + k = floor(x) + return (k - low + 1.) / (high - low) + + def _ppf(self, q, low, high): + vals = ceil(q * (high - low) + low) - 1 + vals1 = (vals - 1).clip(low, high) + temp = self._cdf(vals1, low, high) + return np.where(temp >= q, vals1, vals) + + def _stats(self, low, high): + m2, m1 = np.asarray(high), np.asarray(low) + mu = (m2 + m1 - 1.0) / 2 + d = m2 - m1 + var = (d*d - 1) / 12.0 + g1 = 0.0 + g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0) + return mu, var, g1, g2 + + def _rvs(self, low, high, size=None, random_state=None): + """An array of *size* random integers >= ``low`` and < ``high``.""" + if np.asarray(low).size == 1 and np.asarray(high).size == 1: + # no need to vectorize in that case + return rng_integers(random_state, low, high, size=size) + + if size is not None: + # NumPy's RandomState.randint() doesn't broadcast its arguments. + # Use `broadcast_to()` to extend the shapes of low and high + # up to size. Then we can use the numpy.vectorize'd + # randint without needing to pass it a `size` argument. + low = np.broadcast_to(low, size) + high = np.broadcast_to(high, size) + randint = np.vectorize(partial(rng_integers, random_state), + otypes=[np.dtype(int)]) + return randint(low, high) + + def _entropy(self, low, high): + return log(high - low) + + +randint = randint_gen(name='randint', longname='A discrete uniform ' + '(random integer)') + + +# FIXME: problems sampling. +class zipf_gen(rv_discrete): + r"""A Zipf (Zeta) discrete random variable. + + %(before_notes)s + + See Also + -------- + zipfian + + Notes + ----- + The probability mass function for `zipf` is: + + .. math:: + + f(k, a) = \frac{1}{\zeta(a) k^a} + + for :math:`k \ge 1`, :math:`a > 1`. + + `zipf` takes :math:`a > 1` as shape parameter. :math:`\zeta` is the + Riemann zeta function (`scipy.special.zeta`) + + The Zipf distribution is also known as the zeta distribution, which is + a special case of the Zipfian distribution (`zipfian`). + + %(after_notes)s + + References + ---------- + .. [1] "Zeta Distribution", Wikipedia, + https://en.wikipedia.org/wiki/Zeta_distribution + + %(example)s + + Confirm that `zipf` is the large `n` limit of `zipfian`. + + >>> import numpy as np + >>> from scipy.stats import zipf, zipfian + >>> k = np.arange(11) + >>> np.allclose(zipf.pmf(k, a), zipfian.pmf(k, a, n=10000000)) + True + + """ + + def _shape_info(self): + return [_ShapeInfo("a", False, (1, np.inf), (False, False))] + + def _rvs(self, a, size=None, random_state=None): + return random_state.zipf(a, size=size) + + def _argcheck(self, a): + return a > 1 + + def _pmf(self, k, a): + k = k.astype(np.float64) + # zipf.pmf(k, a) = 1/(zeta(a) * k**a) + Pk = 1.0 / special.zeta(a, 1) * k**-a + return Pk + + def _munp(self, n, a): + return _lazywhere( + a > n + 1, (a, n), + lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1), + np.inf) + + +zipf = zipf_gen(a=1, name='zipf', longname='A Zipf') + + +def _gen_harmonic_gt1(n, a): + """Generalized harmonic number, a > 1""" + # See https://en.wikipedia.org/wiki/Harmonic_number; search for "hurwitz" + return zeta(a, 1) - zeta(a, n+1) + + +def _gen_harmonic_leq1(n, a): + """Generalized harmonic number, a <= 1""" + if not np.size(n): + return n + n_max = np.max(n) # loop starts at maximum of all n + out = np.zeros_like(a, dtype=float) + # add terms of harmonic series; starting from smallest to avoid roundoff + for i in np.arange(n_max, 0, -1, dtype=float): + mask = i <= n # don't add terms after nth + out[mask] += 1/i**a[mask] + return out + + +def _gen_harmonic(n, a): + """Generalized harmonic number""" + n, a = np.broadcast_arrays(n, a) + return _lazywhere(a > 1, (n, a), + f=_gen_harmonic_gt1, f2=_gen_harmonic_leq1) + + +class zipfian_gen(rv_discrete): + r"""A Zipfian discrete random variable. + + %(before_notes)s + + See Also + -------- + zipf + + Notes + ----- + The probability mass function for `zipfian` is: + + .. math:: + + f(k, a, n) = \frac{1}{H_{n,a} k^a} + + for :math:`k \in \{1, 2, \dots, n-1, n\}`, :math:`a \ge 0`, + :math:`n \in \{1, 2, 3, \dots\}`. + + `zipfian` takes :math:`a` and :math:`n` as shape parameters. + :math:`H_{n,a}` is the :math:`n`:sup:`th` generalized harmonic + number of order :math:`a`. + + The Zipfian distribution reduces to the Zipf (zeta) distribution as + :math:`n \rightarrow \infty`. + + %(after_notes)s + + References + ---------- + .. [1] "Zipf's Law", Wikipedia, https://en.wikipedia.org/wiki/Zipf's_law + .. [2] Larry Leemis, "Zipf Distribution", Univariate Distribution + Relationships. http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf + + %(example)s + + Confirm that `zipfian` reduces to `zipf` for large `n`, `a > 1`. + + >>> import numpy as np + >>> from scipy.stats import zipf, zipfian + >>> k = np.arange(11) + >>> np.allclose(zipfian.pmf(k, a=3.5, n=10000000), zipf.pmf(k, a=3.5)) + True + + """ + + def _shape_info(self): + return [_ShapeInfo("a", False, (0, np.inf), (True, False)), + _ShapeInfo("n", True, (0, np.inf), (False, False))] + + def _argcheck(self, a, n): + # we need np.asarray here because moment (maybe others) don't convert + return (a >= 0) & (n > 0) & (n == np.asarray(n, dtype=int)) + + def _get_support(self, a, n): + return 1, n + + def _pmf(self, k, a, n): + k = k.astype(np.float64) + return 1.0 / _gen_harmonic(n, a) * k**-a + + def _cdf(self, k, a, n): + return _gen_harmonic(k, a) / _gen_harmonic(n, a) + + def _sf(self, k, a, n): + k = k + 1 # # to match SciPy convention + # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf + return ((k**a*(_gen_harmonic(n, a) - _gen_harmonic(k, a)) + 1) + / (k**a*_gen_harmonic(n, a))) + + def _stats(self, a, n): + # see # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf + Hna = _gen_harmonic(n, a) + Hna1 = _gen_harmonic(n, a-1) + Hna2 = _gen_harmonic(n, a-2) + Hna3 = _gen_harmonic(n, a-3) + Hna4 = _gen_harmonic(n, a-4) + mu1 = Hna1/Hna + mu2n = (Hna2*Hna - Hna1**2) + mu2d = Hna**2 + mu2 = mu2n / mu2d + g1 = (Hna3/Hna - 3*Hna1*Hna2/Hna**2 + 2*Hna1**3/Hna**3)/mu2**(3/2) + g2 = (Hna**3*Hna4 - 4*Hna**2*Hna1*Hna3 + 6*Hna*Hna1**2*Hna2 + - 3*Hna1**4) / mu2n**2 + g2 -= 3 + return mu1, mu2, g1, g2 + + +zipfian = zipfian_gen(a=1, name='zipfian', longname='A Zipfian') + + +class dlaplace_gen(rv_discrete): + r"""A Laplacian discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `dlaplace` is: + + .. math:: + + f(k) = \tanh(a/2) \exp(-a |k|) + + for integers :math:`k` and :math:`a > 0`. + + `dlaplace` takes :math:`a` as shape parameter. + + %(after_notes)s + + %(example)s + + """ + + def _shape_info(self): + return [_ShapeInfo("a", False, (0, np.inf), (False, False))] + + def _pmf(self, k, a): + # dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k)) + return tanh(a/2.0) * exp(-a * abs(k)) + + def _cdf(self, x, a): + k = floor(x) + + def f(k, a): + return 1.0 - exp(-a * k) / (exp(a) + 1) + + def f2(k, a): + return exp(a * (k + 1)) / (exp(a) + 1) + + return _lazywhere(k >= 0, (k, a), f=f, f2=f2) + + def _ppf(self, q, a): + const = 1 + exp(a) + vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), + log(q*const) / a - 1, + -log((1-q) * const) / a)) + vals1 = vals - 1 + return np.where(self._cdf(vals1, a) >= q, vals1, vals) + + def _stats(self, a): + ea = exp(a) + mu2 = 2.*ea/(ea-1.)**2 + mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4 + return 0., mu2, 0., mu4/mu2**2 - 3. + + def _entropy(self, a): + return a / sinh(a) - log(tanh(a/2.0)) + + def _rvs(self, a, size=None, random_state=None): + # The discrete Laplace is equivalent to the two-sided geometric + # distribution with PMF: + # f(k) = (1 - alpha)/(1 + alpha) * alpha^abs(k) + # Reference: + # https://www.sciencedirect.com/science/ + # article/abs/pii/S0378375804003519 + # Furthermore, the two-sided geometric distribution is + # equivalent to the difference between two iid geometric + # distributions. + # Reference (page 179): + # https://pdfs.semanticscholar.org/61b3/ + # b99f466815808fd0d03f5d2791eea8b541a1.pdf + # Thus, we can leverage the following: + # 1) alpha = e^-a + # 2) probability_of_success = 1 - alpha (Bernoulli trial) + probOfSuccess = -np.expm1(-np.asarray(a)) + x = random_state.geometric(probOfSuccess, size=size) + y = random_state.geometric(probOfSuccess, size=size) + return x - y + + +dlaplace = dlaplace_gen(a=-np.inf, + name='dlaplace', longname='A discrete Laplacian') + + +class skellam_gen(rv_discrete): + r"""A Skellam discrete random variable. + + %(before_notes)s + + Notes + ----- + Probability distribution of the difference of two correlated or + uncorrelated Poisson random variables. + + Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with + expected values :math:`\lambda_1` and :math:`\lambda_2`. Then, + :math:`k_1 - k_2` follows a Skellam distribution with parameters + :math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and + :math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where + :math:`\rho` is the correlation coefficient between :math:`k_1` and + :math:`k_2`. If the two Poisson-distributed r.v. are independent then + :math:`\rho = 0`. + + Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive. + + For details see: https://en.wikipedia.org/wiki/Skellam_distribution + + `skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _shape_info(self): + return [_ShapeInfo("mu1", False, (0, np.inf), (False, False)), + _ShapeInfo("mu2", False, (0, np.inf), (False, False))] + + def _rvs(self, mu1, mu2, size=None, random_state=None): + n = size + return (random_state.poisson(mu1, n) - + random_state.poisson(mu2, n)) + + def _pmf(self, x, mu1, mu2): + with np.errstate(over='ignore'): # see gh-17432 + px = np.where(x < 0, + scu._ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2, + scu._ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2) + # ncx2.pdf() returns nan's for extremely low probabilities + return px + + def _cdf(self, x, mu1, mu2): + x = floor(x) + with np.errstate(over='ignore'): # see gh-17432 + px = np.where(x < 0, + scu._ncx2_cdf(2*mu2, -2*x, 2*mu1), + 1 - scu._ncx2_cdf(2*mu1, 2*(x+1), 2*mu2)) + return px + + def _stats(self, mu1, mu2): + mean = mu1 - mu2 + var = mu1 + mu2 + g1 = mean / sqrt((var)**3) + g2 = 1 / var + return mean, var, g1, g2 + + +skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam') + + +class yulesimon_gen(rv_discrete): + r"""A Yule-Simon discrete random variable. + + %(before_notes)s + + Notes + ----- + + The probability mass function for the `yulesimon` is: + + .. math:: + + f(k) = \alpha B(k, \alpha+1) + + for :math:`k=1,2,3,...`, where :math:`\alpha>0`. + Here :math:`B` refers to the `scipy.special.beta` function. + + The sampling of random variates is based on pg 553, Section 6.3 of [1]_. + Our notation maps to the referenced logic via :math:`\alpha=a-1`. + + For details see the wikipedia entry [2]_. + + References + ---------- + .. [1] Devroye, Luc. "Non-uniform Random Variate Generation", + (1986) Springer, New York. + + .. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution + + %(after_notes)s + + %(example)s + + """ + def _shape_info(self): + return [_ShapeInfo("alpha", False, (0, np.inf), (False, False))] + + def _rvs(self, alpha, size=None, random_state=None): + E1 = random_state.standard_exponential(size) + E2 = random_state.standard_exponential(size) + ans = ceil(-E1 / log1p(-exp(-E2 / alpha))) + return ans + + def _pmf(self, x, alpha): + return alpha * special.beta(x, alpha + 1) + + def _argcheck(self, alpha): + return (alpha > 0) + + def _logpmf(self, x, alpha): + return log(alpha) + special.betaln(x, alpha + 1) + + def _cdf(self, x, alpha): + return 1 - x * special.beta(x, alpha + 1) + + def _sf(self, x, alpha): + return x * special.beta(x, alpha + 1) + + def _logsf(self, x, alpha): + return log(x) + special.betaln(x, alpha + 1) + + def _stats(self, alpha): + mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1)) + mu2 = np.where(alpha > 2, + alpha**2 / ((alpha - 2.0) * (alpha - 1)**2), + np.inf) + mu2 = np.where(alpha <= 1, np.nan, mu2) + g1 = np.where(alpha > 3, + sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)), + np.inf) + g1 = np.where(alpha <= 2, np.nan, g1) + g2 = np.where(alpha > 4, + alpha + 3 + ((11 * alpha**3 - 49 * alpha - 22) / + (alpha * (alpha - 4) * (alpha - 3))), + np.inf) + g2 = np.where(alpha <= 2, np.nan, g2) + return mu, mu2, g1, g2 + + +yulesimon = yulesimon_gen(name='yulesimon', a=1) + + +class _nchypergeom_gen(rv_discrete): + r"""A noncentral hypergeometric discrete random variable. + + For subclassing by nchypergeom_fisher_gen and nchypergeom_wallenius_gen. + + """ + + rvs_name = None + dist = None + + def _shape_info(self): + return [_ShapeInfo("M", True, (0, np.inf), (True, False)), + _ShapeInfo("n", True, (0, np.inf), (True, False)), + _ShapeInfo("N", True, (0, np.inf), (True, False)), + _ShapeInfo("odds", False, (0, np.inf), (False, False))] + + def _get_support(self, M, n, N, odds): + N, m1, n = M, n, N # follow Wikipedia notation + m2 = N - m1 + x_min = np.maximum(0, n - m2) + x_max = np.minimum(n, m1) + return x_min, x_max + + def _argcheck(self, M, n, N, odds): + M, n = np.asarray(M), np.asarray(n), + N, odds = np.asarray(N), np.asarray(odds) + cond1 = (M.astype(int) == M) & (M >= 0) + cond2 = (n.astype(int) == n) & (n >= 0) + cond3 = (N.astype(int) == N) & (N >= 0) + cond4 = odds > 0 + cond5 = N <= M + cond6 = n <= M + return cond1 & cond2 & cond3 & cond4 & cond5 & cond6 + + def _rvs(self, M, n, N, odds, size=None, random_state=None): + + @_vectorize_rvs_over_shapes + def _rvs1(M, n, N, odds, size, random_state): + length = np.prod(size) + urn = _PyStochasticLib3() + rv_gen = getattr(urn, self.rvs_name) + rvs = rv_gen(N, n, M, odds, length, random_state) + rvs = rvs.reshape(size) + return rvs + + return _rvs1(M, n, N, odds, size=size, random_state=random_state) + + def _pmf(self, x, M, n, N, odds): + + x, M, n, N, odds = np.broadcast_arrays(x, M, n, N, odds) + if x.size == 0: # np.vectorize doesn't work with zero size input + return np.empty_like(x) + + @np.vectorize + def _pmf1(x, M, n, N, odds): + urn = self.dist(N, n, M, odds, 1e-12) + return urn.probability(x) + + return _pmf1(x, M, n, N, odds) + + def _stats(self, M, n, N, odds, moments): + + @np.vectorize + def _moments1(M, n, N, odds): + urn = self.dist(N, n, M, odds, 1e-12) + return urn.moments() + + m, v = (_moments1(M, n, N, odds) if ("m" in moments or "v" in moments) + else (None, None)) + s, k = None, None + return m, v, s, k + + +class nchypergeom_fisher_gen(_nchypergeom_gen): + r"""A Fisher's noncentral hypergeometric discrete random variable. + + Fisher's noncentral hypergeometric distribution models drawing objects of + two types from a bin. `M` is the total number of objects, `n` is the + number of Type I objects, and `odds` is the odds ratio: the odds of + selecting a Type I object rather than a Type II object when there is only + one object of each type. + The random variate represents the number of Type I objects drawn if we + take a handful of objects from the bin at once and find out afterwards + that we took `N` objects. + + %(before_notes)s + + See Also + -------- + nchypergeom_wallenius, hypergeom, nhypergeom + + Notes + ----- + Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond + with parameters `N`, `n`, and `M` (respectively) as defined above. + + The probability mass function is defined as + + .. math:: + + p(x; M, n, N, \omega) = + \frac{\binom{n}{x}\binom{M - n}{N-x}\omega^x}{P_0}, + + for + :math:`x \in [x_l, x_u]`, + :math:`M \in {\mathbb N}`, + :math:`n \in [0, M]`, + :math:`N \in [0, M]`, + :math:`\omega > 0`, + where + :math:`x_l = \max(0, N - (M - n))`, + :math:`x_u = \min(N, n)`, + + .. math:: + + P_0 = \sum_{y=x_l}^{x_u} \binom{n}{y}\binom{M - n}{N-y}\omega^y, + + and the binomial coefficients are defined as + + .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. + + `nchypergeom_fisher` uses the BiasedUrn package by Agner Fog with + permission for it to be distributed under SciPy's license. + + The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not + universally accepted; they are chosen for consistency with `hypergeom`. + + Note that Fisher's noncentral hypergeometric distribution is distinct + from Wallenius' noncentral hypergeometric distribution, which models + drawing a pre-determined `N` objects from a bin one by one. + When the odds ratio is unity, however, both distributions reduce to the + ordinary hypergeometric distribution. + + %(after_notes)s + + References + ---------- + .. [1] Agner Fog, "Biased Urn Theory". + https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf + + .. [2] "Fisher's noncentral hypergeometric distribution", Wikipedia, + https://en.wikipedia.org/wiki/Fisher's_noncentral_hypergeometric_distribution + + %(example)s + + """ + + rvs_name = "rvs_fisher" + dist = _PyFishersNCHypergeometric + + +nchypergeom_fisher = nchypergeom_fisher_gen( + name='nchypergeom_fisher', + longname="A Fisher's noncentral hypergeometric") + + +class nchypergeom_wallenius_gen(_nchypergeom_gen): + r"""A Wallenius' noncentral hypergeometric discrete random variable. + + Wallenius' noncentral hypergeometric distribution models drawing objects of + two types from a bin. `M` is the total number of objects, `n` is the + number of Type I objects, and `odds` is the odds ratio: the odds of + selecting a Type I object rather than a Type II object when there is only + one object of each type. + The random variate represents the number of Type I objects drawn if we + draw a pre-determined `N` objects from a bin one by one. + + %(before_notes)s + + See Also + -------- + nchypergeom_fisher, hypergeom, nhypergeom + + Notes + ----- + Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond + with parameters `N`, `n`, and `M` (respectively) as defined above. + + The probability mass function is defined as + + .. math:: + + p(x; N, n, M) = \binom{n}{x} \binom{M - n}{N-x} + \int_0^1 \left(1-t^{\omega/D}\right)^x\left(1-t^{1/D}\right)^{N-x} dt + + for + :math:`x \in [x_l, x_u]`, + :math:`M \in {\mathbb N}`, + :math:`n \in [0, M]`, + :math:`N \in [0, M]`, + :math:`\omega > 0`, + where + :math:`x_l = \max(0, N - (M - n))`, + :math:`x_u = \min(N, n)`, + + .. math:: + + D = \omega(n - x) + ((M - n)-(N-x)), + + and the binomial coefficients are defined as + + .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. + + `nchypergeom_wallenius` uses the BiasedUrn package by Agner Fog with + permission for it to be distributed under SciPy's license. + + The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not + universally accepted; they are chosen for consistency with `hypergeom`. + + Note that Wallenius' noncentral hypergeometric distribution is distinct + from Fisher's noncentral hypergeometric distribution, which models + take a handful of objects from the bin at once, finding out afterwards + that `N` objects were taken. + When the odds ratio is unity, however, both distributions reduce to the + ordinary hypergeometric distribution. + + %(after_notes)s + + References + ---------- + .. [1] Agner Fog, "Biased Urn Theory". + https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf + + .. [2] "Wallenius' noncentral hypergeometric distribution", Wikipedia, + https://en.wikipedia.org/wiki/Wallenius'_noncentral_hypergeometric_distribution + + %(example)s + + """ + + rvs_name = "rvs_wallenius" + dist = _PyWalleniusNCHypergeometric + + +nchypergeom_wallenius = nchypergeom_wallenius_gen( + name='nchypergeom_wallenius', + longname="A Wallenius' noncentral hypergeometric") + + +# Collect names of classes and objects in this module. +pairs = list(globals().copy().items()) +_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete) + +__all__ = _distn_names + _distn_gen_names diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_distr_params.py b/parrot/lib/python3.10/site-packages/scipy/stats/_distr_params.py new file mode 100644 index 0000000000000000000000000000000000000000..65b82929fe623946b05fb02ef74214bcc0ab735e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_distr_params.py @@ -0,0 +1,292 @@ +""" +Sane parameters for stats.distributions. +""" +import numpy as np + +distcont = [ + ['alpha', (3.5704770516650459,)], + ['anglit', ()], + ['arcsine', ()], + ['argus', (1.0,)], + ['beta', (2.3098496451481823, 0.62687954300963677)], + ['betaprime', (5, 6)], + ['bradford', (0.29891359763170633,)], + ['burr', (10.5, 4.3)], + ['burr12', (10, 4)], + ['cauchy', ()], + ['chi', (78,)], + ['chi2', (55,)], + ['cosine', ()], + ['crystalball', (2.0, 3.0)], + ['dgamma', (1.1023326088288166,)], + ['dweibull', (2.0685080649914673,)], + ['erlang', (10,)], + ['expon', ()], + ['exponnorm', (1.5,)], + ['exponpow', (2.697119160358469,)], + ['exponweib', (2.8923945291034436, 1.9505288745913174)], + ['f', (29, 18)], + ['fatiguelife', (29,)], # correction numargs = 1 + ['fisk', (3.0857548622253179,)], + ['foldcauchy', (4.7164673455831894,)], + ['foldnorm', (1.9521253373555869,)], + ['gamma', (1.9932305483800778,)], + ['gausshyper', (13.763771604130699, 3.1189636648681431, + 2.5145980350183019, 5.1811649903971615)], # veryslow + ['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)], + ['genextreme', (-0.1,)], + ['gengamma', (4.4162385429431925, 3.1193091679242761)], + ['gengamma', (4.4162385429431925, -3.1193091679242761)], + ['genhalflogistic', (0.77274727809929322,)], + ['genhyperbolic', (0.5, 1.5, -0.5,)], + ['geninvgauss', (2.3, 1.5)], + ['genlogistic', (0.41192440799679475,)], + ['gennorm', (1.2988442399460265,)], + ['halfgennorm', (0.6748054997000371,)], + ['genpareto', (0.1,)], # use case with finite moments + ['gibrat', ()], + ['gompertz', (0.94743713075105251,)], + ['gumbel_l', ()], + ['gumbel_r', ()], + ['halfcauchy', ()], + ['halflogistic', ()], + ['halfnorm', ()], + ['hypsecant', ()], + ['invgamma', (4.0668996136993067,)], + ['invgauss', (0.14546264555347513,)], + ['invweibull', (10.58,)], + ['irwinhall', (10,)], + ['jf_skew_t', (8, 4)], + ['johnsonsb', (4.3172675099141058, 3.1837781130785063)], + ['johnsonsu', (2.554395574161155, 2.2482281679651965)], + ['kappa4', (0.0, 0.0)], + ['kappa4', (-0.1, 0.1)], + ['kappa4', (0.0, 0.1)], + ['kappa4', (0.1, 0.0)], + ['kappa3', (1.0,)], + ['ksone', (1000,)], # replace 22 by 100 to avoid failing range, ticket 956 + ['kstwo', (10,)], + ['kstwobign', ()], + ['laplace', ()], + ['laplace_asymmetric', (2,)], + ['levy', ()], + ['levy_l', ()], + ['levy_stable', (1.8, -0.5)], + ['loggamma', (0.41411931826052117,)], + ['logistic', ()], + ['loglaplace', (3.2505926592051435,)], + ['lognorm', (0.95368226960575331,)], + ['loguniform', (0.01, 1.25)], + ['lomax', (1.8771398388773268,)], + ['maxwell', ()], + ['mielke', (10.4, 4.6)], + ['moyal', ()], + ['nakagami', (4.9673794866666237,)], + ['ncf', (27, 27, 0.41578441799226107)], + ['nct', (14, 0.24045031331198066)], + ['ncx2', (21, 1.0560465975116415)], + ['norm', ()], + ['norminvgauss', (1.25, 0.5)], + ['pareto', (2.621716532144454,)], + ['pearson3', (0.1,)], + ['pearson3', (-2,)], + ['powerlaw', (1.6591133289905851,)], + ['powerlaw', (0.6591133289905851,)], + ['powerlognorm', (2.1413923530064087, 0.44639540782048337)], + ['powernorm', (4.4453652254590779,)], + ['rayleigh', ()], + ['rdist', (1.6,)], + ['recipinvgauss', (0.63004267809369119,)], + ['reciprocal', (0.01, 1.25)], + ['rel_breitwigner', (36.545206797050334, )], + ['rice', (0.7749725210111873,)], + ['semicircular', ()], + ['skewcauchy', (0.5,)], + ['skewnorm', (4.0,)], + ['studentized_range', (3.0, 10.0)], + ['t', (2.7433514990818093,)], + ['trapezoid', (0.2, 0.8)], + ['triang', (0.15785029824528218,)], + ['truncexpon', (4.6907725456810478,)], + ['truncnorm', (-1.0978730080013919, 2.7306754109031979)], + ['truncnorm', (0.1, 2.)], + ['truncpareto', (1.8, 5.3)], + ['truncpareto', (2, 5)], + ['truncweibull_min', (2.5, 0.25, 1.75)], + ['tukeylambda', (3.1321477856738267,)], + ['uniform', ()], + ['vonmises', (3.9939042581071398,)], + ['vonmises_line', (3.9939042581071398,)], + ['wald', ()], + ['weibull_max', (2.8687961709100187,)], + ['weibull_min', (1.7866166930421596,)], + ['wrapcauchy', (0.031071279018614728,)]] + + +distdiscrete = [ + ['bernoulli',(0.3,)], + ['betabinom', (5, 2.3, 0.63)], + ['betanbinom', (5, 9.3, 1)], + ['binom', (5, 0.4)], + ['boltzmann',(1.4, 19)], + ['dlaplace', (0.8,)], # 0.5 + ['geom', (0.5,)], + ['hypergeom',(30, 12, 6)], + ['hypergeom',(21,3,12)], # numpy.random (3,18,12) numpy ticket:921 + ['hypergeom',(21,18,11)], # numpy.random (18,3,11) numpy ticket:921 + ['nchypergeom_fisher', (140, 80, 60, 0.5)], + ['nchypergeom_wallenius', (140, 80, 60, 0.5)], + ['logser', (0.6,)], # re-enabled, numpy ticket:921 + ['nbinom', (0.4, 0.4)], # from tickets: 583 + ['nbinom', (5, 0.5)], + ['planck', (0.51,)], # 4.1 + ['poisson', (0.6,)], + ['randint', (7, 31)], + ['skellam', (15, 8)], + ['zipf', (6.6,)], + ['zipfian', (0.75, 15)], + ['zipfian', (1.25, 10)], + ['yulesimon', (11.0,)], + ['nhypergeom', (20, 7, 1)] +] + + +invdistdiscrete = [ + # In each of the following, at least one shape parameter is invalid + ['hypergeom', (3, 3, 4)], + ['nhypergeom', (5, 2, 8)], + ['nchypergeom_fisher', (3, 3, 4, 1)], + ['nchypergeom_wallenius', (3, 3, 4, 1)], + ['bernoulli', (1.5, )], + ['binom', (10, 1.5)], + ['betabinom', (10, -0.4, -0.5)], + ['betanbinom', (10, -0.4, -0.5)], + ['boltzmann', (-1, 4)], + ['dlaplace', (-0.5, )], + ['geom', (1.5, )], + ['logser', (1.5, )], + ['nbinom', (10, 1.5)], + ['planck', (-0.5, )], + ['poisson', (-0.5, )], + ['randint', (5, 2)], + ['skellam', (-5, -2)], + ['zipf', (-2, )], + ['yulesimon', (-2, )], + ['zipfian', (-0.75, 15)] +] + + +invdistcont = [ + # In each of the following, at least one shape parameter is invalid + ['alpha', (-1, )], + ['anglit', ()], + ['arcsine', ()], + ['argus', (-1, )], + ['beta', (-2, 2)], + ['betaprime', (-2, 2)], + ['bradford', (-1, )], + ['burr', (-1, 1)], + ['burr12', (-1, 1)], + ['cauchy', ()], + ['chi', (-1, )], + ['chi2', (-1, )], + ['cosine', ()], + ['crystalball', (-1, 2)], + ['dgamma', (-1, )], + ['dweibull', (-1, )], + ['erlang', (-1, )], + ['expon', ()], + ['exponnorm', (-1, )], + ['exponweib', (1, -1)], + ['exponpow', (-1, )], + ['f', (10, -10)], + ['fatiguelife', (-1, )], + ['fisk', (-1, )], + ['foldcauchy', (-1, )], + ['foldnorm', (-1, )], + ['genlogistic', (-1, )], + ['gennorm', (-1, )], + ['genpareto', (np.inf, )], + ['genexpon', (1, 2, -3)], + ['genextreme', (np.inf, )], + ['genhyperbolic', (0.5, -0.5, -1.5,)], + ['gausshyper', (1, 2, 3, -4)], + ['gamma', (-1, )], + ['gengamma', (-1, 0)], + ['genhalflogistic', (-1, )], + ['geninvgauss', (1, 0)], + ['gibrat', ()], + ['gompertz', (-1, )], + ['gumbel_r', ()], + ['gumbel_l', ()], + ['halfcauchy', ()], + ['halflogistic', ()], + ['halfnorm', ()], + ['halfgennorm', (-1, )], + ['hypsecant', ()], + ['invgamma', (-1, )], + ['invgauss', (-1, )], + ['invweibull', (-1, )], + ['irwinhall', (-1,)], + ['irwinhall', (0,)], + ['irwinhall', (2.5,)], + ['jf_skew_t', (-1, 0)], + ['johnsonsb', (1, -2)], + ['johnsonsu', (1, -2)], + ['kappa4', (np.nan, 0)], + ['kappa3', (-1, )], + ['ksone', (-1, )], + ['kstwo', (-1, )], + ['kstwobign', ()], + ['laplace', ()], + ['laplace_asymmetric', (-1, )], + ['levy', ()], + ['levy_l', ()], + ['levy_stable', (-1, 1)], + ['logistic', ()], + ['loggamma', (-1, )], + ['loglaplace', (-1, )], + ['lognorm', (-1, )], + ['loguniform', (10, 5)], + ['lomax', (-1, )], + ['maxwell', ()], + ['mielke', (1, -2)], + ['moyal', ()], + ['nakagami', (-1, )], + ['ncx2', (-1, 2)], + ['ncf', (10, 20, -1)], + ['nct', (-1, 2)], + ['norm', ()], + ['norminvgauss', (5, -10)], + ['pareto', (-1, )], + ['pearson3', (np.nan, )], + ['powerlaw', (-1, )], + ['powerlognorm', (1, -2)], + ['powernorm', (-1, )], + ['rdist', (-1, )], + ['rayleigh', ()], + ['rice', (-1, )], + ['recipinvgauss', (-1, )], + ['semicircular', ()], + ['skewnorm', (np.inf, )], + ['studentized_range', (-1, 1)], + ['rel_breitwigner', (-2, )], + ['t', (-1, )], + ['trapezoid', (0, 2)], + ['triang', (2, )], + ['truncexpon', (-1, )], + ['truncnorm', (10, 5)], + ['truncpareto', (-1, 5)], + ['truncpareto', (1.8, .5)], + ['truncweibull_min', (-2.5, 0.25, 1.75)], + ['tukeylambda', (np.nan, )], + ['uniform', ()], + ['vonmises', (-1, )], + ['vonmises_line', (-1, )], + ['wald', ()], + ['weibull_min', (-1, )], + ['weibull_max', (-1, )], + ['wrapcauchy', (2, )], + ['reciprocal', (15, 10)], + ['skewcauchy', (2, )] +] diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_entropy.py b/parrot/lib/python3.10/site-packages/scipy/stats/_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..1e6c8d9e1963fac5cba6c459767d6da89120f201 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_entropy.py @@ -0,0 +1,426 @@ +""" +Created on Fri Apr 2 09:06:05 2021 + +@author: matth +""" + +from __future__ import annotations +import math +import numpy as np +from scipy import special +from ._axis_nan_policy import _axis_nan_policy_factory, _broadcast_arrays +from scipy._lib._array_api import array_namespace + +__all__ = ['entropy', 'differential_entropy'] + + +@_axis_nan_policy_factory( + lambda x: x, + n_samples=lambda kwgs: ( + 2 if ("qk" in kwgs and kwgs["qk"] is not None) + else 1 + ), + n_outputs=1, result_to_tuple=lambda x: (x,), paired=True, + too_small=-1 # entropy doesn't have too small inputs +) +def entropy(pk: np.typing.ArrayLike, + qk: np.typing.ArrayLike | None = None, + base: float | None = None, + axis: int = 0 + ) -> np.number | np.ndarray: + """ + Calculate the Shannon entropy/relative entropy of given distribution(s). + + If only probabilities `pk` are given, the Shannon entropy is calculated as + ``H = -sum(pk * log(pk))``. + + If `qk` is not None, then compute the relative entropy + ``D = sum(pk * log(pk / qk))``. This quantity is also known + as the Kullback-Leibler divergence. + + This routine will normalize `pk` and `qk` if they don't sum to 1. + + Parameters + ---------- + pk : array_like + Defines the (discrete) distribution. Along each axis-slice of ``pk``, + element ``i`` is the (possibly unnormalized) probability of event + ``i``. + qk : array_like, optional + Sequence against which the relative entropy is computed. Should be in + the same format as `pk`. + base : float, optional + The logarithmic base to use, defaults to ``e`` (natural logarithm). + axis : int, optional + The axis along which the entropy is calculated. Default is 0. + + Returns + ------- + S : {float, array_like} + The calculated entropy. + + Notes + ----- + Informally, the Shannon entropy quantifies the expected uncertainty + inherent in the possible outcomes of a discrete random variable. + For example, + if messages consisting of sequences of symbols from a set are to be + encoded and transmitted over a noiseless channel, then the Shannon entropy + ``H(pk)`` gives a tight lower bound for the average number of units of + information needed per symbol if the symbols occur with frequencies + governed by the discrete distribution `pk` [1]_. The choice of base + determines the choice of units; e.g., ``e`` for nats, ``2`` for bits, etc. + + The relative entropy, ``D(pk|qk)``, quantifies the increase in the average + number of units of information needed per symbol if the encoding is + optimized for the probability distribution `qk` instead of the true + distribution `pk`. Informally, the relative entropy quantifies the expected + excess in surprise experienced if one believes the true distribution is + `qk` when it is actually `pk`. + + A related quantity, the cross entropy ``CE(pk, qk)``, satisfies the + equation ``CE(pk, qk) = H(pk) + D(pk|qk)`` and can also be calculated with + the formula ``CE = -sum(pk * log(qk))``. It gives the average + number of units of information needed per symbol if an encoding is + optimized for the probability distribution `qk` when the true distribution + is `pk`. It is not computed directly by `entropy`, but it can be computed + using two calls to the function (see Examples). + + See [2]_ for more information. + + References + ---------- + .. [1] Shannon, C.E. (1948), A Mathematical Theory of Communication. + Bell System Technical Journal, 27: 379-423. + https://doi.org/10.1002/j.1538-7305.1948.tb01338.x + .. [2] Thomas M. Cover and Joy A. Thomas. 2006. Elements of Information + Theory (Wiley Series in Telecommunications and Signal Processing). + Wiley-Interscience, USA. + + + Examples + -------- + The outcome of a fair coin is the most uncertain: + + >>> import numpy as np + >>> from scipy.stats import entropy + >>> base = 2 # work in units of bits + >>> pk = np.array([1/2, 1/2]) # fair coin + >>> H = entropy(pk, base=base) + >>> H + 1.0 + >>> H == -np.sum(pk * np.log(pk)) / np.log(base) + True + + The outcome of a biased coin is less uncertain: + + >>> qk = np.array([9/10, 1/10]) # biased coin + >>> entropy(qk, base=base) + 0.46899559358928117 + + The relative entropy between the fair coin and biased coin is calculated + as: + + >>> D = entropy(pk, qk, base=base) + >>> D + 0.7369655941662062 + >>> D == np.sum(pk * np.log(pk/qk)) / np.log(base) + True + + The cross entropy can be calculated as the sum of the entropy and + relative entropy`: + + >>> CE = entropy(pk, base=base) + entropy(pk, qk, base=base) + >>> CE + 1.736965594166206 + >>> CE == -np.sum(pk * np.log(qk)) / np.log(base) + True + + """ + if base is not None and base <= 0: + raise ValueError("`base` must be a positive number or `None`.") + + xp = array_namespace(pk) if qk is None else array_namespace(pk, qk) + + pk = xp.asarray(pk) + with np.errstate(invalid='ignore'): + pk = 1.0*pk / xp.sum(pk, axis=axis, keepdims=True) # type: ignore[operator] + if qk is None: + vec = special.entr(pk) + else: + qk = xp.asarray(qk) + pk, qk = _broadcast_arrays((pk, qk), axis=None, xp=xp) # don't ignore any axes + sum_kwargs = dict(axis=axis, keepdims=True) + qk = 1.0*qk / xp.sum(qk, **sum_kwargs) # type: ignore[operator, call-overload] + vec = special.rel_entr(pk, qk) + S = xp.sum(vec, axis=axis) + if base is not None: + S /= math.log(base) + return S + + +def _differential_entropy_is_too_small(samples, kwargs, axis=-1): + values = samples[0] + n = values.shape[axis] + window_length = kwargs.get("window_length", + math.floor(math.sqrt(n) + 0.5)) + if not 2 <= 2 * window_length < n: + return True + return False + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,), + too_small=_differential_entropy_is_too_small +) +def differential_entropy( + values: np.typing.ArrayLike, + *, + window_length: int | None = None, + base: float | None = None, + axis: int = 0, + method: str = "auto", +) -> np.number | np.ndarray: + r"""Given a sample of a distribution, estimate the differential entropy. + + Several estimation methods are available using the `method` parameter. By + default, a method is selected based the size of the sample. + + Parameters + ---------- + values : sequence + Sample from a continuous distribution. + window_length : int, optional + Window length for computing Vasicek estimate. Must be an integer + between 1 and half of the sample size. If ``None`` (the default), it + uses the heuristic value + + .. math:: + \left \lfloor \sqrt{n} + 0.5 \right \rfloor + + where :math:`n` is the sample size. This heuristic was originally + proposed in [2]_ and has become common in the literature. + base : float, optional + The logarithmic base to use, defaults to ``e`` (natural logarithm). + axis : int, optional + The axis along which the differential entropy is calculated. + Default is 0. + method : {'vasicek', 'van es', 'ebrahimi', 'correa', 'auto'}, optional + The method used to estimate the differential entropy from the sample. + Default is ``'auto'``. See Notes for more information. + + Returns + ------- + entropy : float + The calculated differential entropy. + + Notes + ----- + This function will converge to the true differential entropy in the limit + + .. math:: + n \to \infty, \quad m \to \infty, \quad \frac{m}{n} \to 0 + + The optimal choice of ``window_length`` for a given sample size depends on + the (unknown) distribution. Typically, the smoother the density of the + distribution, the larger the optimal value of ``window_length`` [1]_. + + The following options are available for the `method` parameter. + + * ``'vasicek'`` uses the estimator presented in [1]_. This is + one of the first and most influential estimators of differential entropy. + * ``'van es'`` uses the bias-corrected estimator presented in [3]_, which + is not only consistent but, under some conditions, asymptotically normal. + * ``'ebrahimi'`` uses an estimator presented in [4]_, which was shown + in simulation to have smaller bias and mean squared error than + the Vasicek estimator. + * ``'correa'`` uses the estimator presented in [5]_ based on local linear + regression. In a simulation study, it had consistently smaller mean + square error than the Vasiceck estimator, but it is more expensive to + compute. + * ``'auto'`` selects the method automatically (default). Currently, + this selects ``'van es'`` for very small samples (<10), ``'ebrahimi'`` + for moderate sample sizes (11-1000), and ``'vasicek'`` for larger + samples, but this behavior is subject to change in future versions. + + All estimators are implemented as described in [6]_. + + References + ---------- + .. [1] Vasicek, O. (1976). A test for normality based on sample entropy. + Journal of the Royal Statistical Society: + Series B (Methodological), 38(1), 54-59. + .. [2] Crzcgorzewski, P., & Wirczorkowski, R. (1999). Entropy-based + goodness-of-fit test for exponentiality. Communications in + Statistics-Theory and Methods, 28(5), 1183-1202. + .. [3] Van Es, B. (1992). Estimating functionals related to a density by a + class of statistics based on spacings. Scandinavian Journal of + Statistics, 61-72. + .. [4] Ebrahimi, N., Pflughoeft, K., & Soofi, E. S. (1994). Two measures + of sample entropy. Statistics & Probability Letters, 20(3), 225-234. + .. [5] Correa, J. C. (1995). A new estimator of entropy. Communications + in Statistics-Theory and Methods, 24(10), 2439-2449. + .. [6] Noughabi, H. A. (2015). Entropy Estimation Using Numerical Methods. + Annals of Data Science, 2(2), 231-241. + https://link.springer.com/article/10.1007/s40745-015-0045-9 + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import differential_entropy, norm + + Entropy of a standard normal distribution: + + >>> rng = np.random.default_rng() + >>> values = rng.standard_normal(100) + >>> differential_entropy(values) + 1.3407817436640392 + + Compare with the true entropy: + + >>> float(norm.entropy()) + 1.4189385332046727 + + For several sample sizes between 5 and 1000, compare the accuracy of + the ``'vasicek'``, ``'van es'``, and ``'ebrahimi'`` methods. Specifically, + compare the root mean squared error (over 1000 trials) between the estimate + and the true differential entropy of the distribution. + + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> + >>> + >>> def rmse(res, expected): + ... '''Root mean squared error''' + ... return np.sqrt(np.mean((res - expected)**2)) + >>> + >>> + >>> a, b = np.log10(5), np.log10(1000) + >>> ns = np.round(np.logspace(a, b, 10)).astype(int) + >>> reps = 1000 # number of repetitions for each sample size + >>> expected = stats.expon.entropy() + >>> + >>> method_errors = {'vasicek': [], 'van es': [], 'ebrahimi': []} + >>> for method in method_errors: + ... for n in ns: + ... rvs = stats.expon.rvs(size=(reps, n), random_state=rng) + ... res = stats.differential_entropy(rvs, method=method, axis=-1) + ... error = rmse(res, expected) + ... method_errors[method].append(error) + >>> + >>> for method, errors in method_errors.items(): + ... plt.loglog(ns, errors, label=method) + >>> + >>> plt.legend() + >>> plt.xlabel('sample size') + >>> plt.ylabel('RMSE (1000 trials)') + >>> plt.title('Entropy Estimator Error (Exponential Distribution)') + + """ + values = np.asarray(values) + values = np.moveaxis(values, axis, -1) + n = values.shape[-1] # number of observations + + if window_length is None: + window_length = math.floor(math.sqrt(n) + 0.5) + + if not 2 <= 2 * window_length < n: + raise ValueError( + f"Window length ({window_length}) must be positive and less " + f"than half the sample size ({n}).", + ) + + if base is not None and base <= 0: + raise ValueError("`base` must be a positive number or `None`.") + + sorted_data = np.sort(values, axis=-1) + + methods = {"vasicek": _vasicek_entropy, + "van es": _van_es_entropy, + "correa": _correa_entropy, + "ebrahimi": _ebrahimi_entropy, + "auto": _vasicek_entropy} + method = method.lower() + if method not in methods: + message = f"`method` must be one of {set(methods)}" + raise ValueError(message) + + if method == "auto": + if n <= 10: + method = 'van es' + elif n <= 1000: + method = 'ebrahimi' + else: + method = 'vasicek' + + res = methods[method](sorted_data, window_length) + + if base is not None: + res /= np.log(base) + + return res + + +def _pad_along_last_axis(X, m): + """Pad the data for computing the rolling window difference.""" + # scales a bit better than method in _vasicek_like_entropy + shape = np.array(X.shape) + shape[-1] = m + Xl = np.broadcast_to(X[..., [0]], shape) # [0] vs 0 to maintain shape + Xr = np.broadcast_to(X[..., [-1]], shape) + return np.concatenate((Xl, X, Xr), axis=-1) + + +def _vasicek_entropy(X, m): + """Compute the Vasicek estimator as described in [6] Eq. 1.3.""" + n = X.shape[-1] + X = _pad_along_last_axis(X, m) + differences = X[..., 2 * m:] - X[..., : -2 * m:] + logs = np.log(n/(2*m) * differences) + return np.mean(logs, axis=-1) + + +def _van_es_entropy(X, m): + """Compute the van Es estimator as described in [6].""" + # No equation number, but referred to as HVE_mn. + # Typo: there should be a log within the summation. + n = X.shape[-1] + difference = X[..., m:] - X[..., :-m] + term1 = 1/(n-m) * np.sum(np.log((n+1)/m * difference), axis=-1) + k = np.arange(m, n+1) + return term1 + np.sum(1/k) + np.log(m) - np.log(n+1) + + +def _ebrahimi_entropy(X, m): + """Compute the Ebrahimi estimator as described in [6].""" + # No equation number, but referred to as HE_mn + n = X.shape[-1] + X = _pad_along_last_axis(X, m) + + differences = X[..., 2 * m:] - X[..., : -2 * m:] + + i = np.arange(1, n+1).astype(float) + ci = np.ones_like(i)*2 + ci[i <= m] = 1 + (i[i <= m] - 1)/m + ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m + + logs = np.log(n * differences / (ci * m)) + return np.mean(logs, axis=-1) + + +def _correa_entropy(X, m): + """Compute the Correa estimator as described in [6].""" + # No equation number, but referred to as HC_mn + n = X.shape[-1] + X = _pad_along_last_axis(X, m) + + i = np.arange(1, n+1) + dj = np.arange(-m, m+1)[:, None] + j = i + dj + j0 = j + m - 1 # 0-indexed version of j + + Xibar = np.mean(X[..., j0], axis=-2, keepdims=True) + difference = X[..., j0] - Xibar + num = np.sum(difference*dj, axis=-2) # dj is d-i + den = n*np.sum(difference**2, axis=-2) + return -np.mean(np.log(num/den), axis=-1) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_fit.py b/parrot/lib/python3.10/site-packages/scipy/stats/_fit.py new file mode 100644 index 0000000000000000000000000000000000000000..9b3b17b757bc4abfea187a62b9fe8130672b3ca2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_fit.py @@ -0,0 +1,1354 @@ +import warnings +from collections import namedtuple +import numpy as np +from scipy import optimize, stats +from scipy._lib._util import check_random_state + + +def _combine_bounds(name, user_bounds, shape_domain, integral): + """Intersection of user-defined bounds and distribution PDF/PMF domain""" + + user_bounds = np.atleast_1d(user_bounds) + + if user_bounds[0] > user_bounds[1]: + message = (f"There are no values for `{name}` on the interval " + f"{list(user_bounds)}.") + raise ValueError(message) + + bounds = (max(user_bounds[0], shape_domain[0]), + min(user_bounds[1], shape_domain[1])) + + if integral and (np.ceil(bounds[0]) > np.floor(bounds[1])): + message = (f"There are no integer values for `{name}` on the interval " + f"defined by the user-provided bounds and the domain " + "of the distribution.") + raise ValueError(message) + elif not integral and (bounds[0] > bounds[1]): + message = (f"There are no values for `{name}` on the interval " + f"defined by the user-provided bounds and the domain " + "of the distribution.") + raise ValueError(message) + + if not np.all(np.isfinite(bounds)): + message = (f"The intersection of user-provided bounds for `{name}` " + f"and the domain of the distribution is not finite. Please " + f"provide finite bounds for shape `{name}` in `bounds`.") + raise ValueError(message) + + return bounds + + +class FitResult: + r"""Result of fitting a discrete or continuous distribution to data + + Attributes + ---------- + params : namedtuple + A namedtuple containing the maximum likelihood estimates of the + shape parameters, location, and (if applicable) scale of the + distribution. + success : bool or None + Whether the optimizer considered the optimization to terminate + successfully or not. + message : str or None + Any status message provided by the optimizer. + + """ + + def __init__(self, dist, data, discrete, res): + self._dist = dist + self._data = data + self.discrete = discrete + self.pxf = getattr(dist, "pmf", None) or getattr(dist, "pdf", None) + + shape_names = [] if dist.shapes is None else dist.shapes.split(", ") + if not discrete: + FitParams = namedtuple('FitParams', shape_names + ['loc', 'scale']) + else: + FitParams = namedtuple('FitParams', shape_names + ['loc']) + + self.params = FitParams(*res.x) + + # Optimizer can report success even when nllf is infinite + if res.success and not np.isfinite(self.nllf()): + res.success = False + res.message = ("Optimization converged to parameter values that " + "are inconsistent with the data.") + self.success = getattr(res, "success", None) + self.message = getattr(res, "message", None) + + def __repr__(self): + keys = ["params", "success", "message"] + m = max(map(len, keys)) + 1 + return '\n'.join([key.rjust(m) + ': ' + repr(getattr(self, key)) + for key in keys if getattr(self, key) is not None]) + + def nllf(self, params=None, data=None): + """Negative log-likelihood function + + Evaluates the negative of the log-likelihood function of the provided + data at the provided parameters. + + Parameters + ---------- + params : tuple, optional + The shape parameters, location, and (if applicable) scale of the + distribution as a single tuple. Default is the maximum likelihood + estimates (``self.params``). + data : array_like, optional + The data for which the log-likelihood function is to be evaluated. + Default is the data to which the distribution was fit. + + Returns + ------- + nllf : float + The negative of the log-likelihood function. + + """ + params = params if params is not None else self.params + data = data if data is not None else self._data + return self._dist.nnlf(theta=params, x=data) + + def plot(self, ax=None, *, plot_type="hist"): + """Visually compare the data against the fitted distribution. + + Available only if `matplotlib` is installed. + + Parameters + ---------- + ax : `matplotlib.axes.Axes` + Axes object to draw the plot onto, otherwise uses the current Axes. + plot_type : {"hist", "qq", "pp", "cdf"} + Type of plot to draw. Options include: + + - "hist": Superposes the PDF/PMF of the fitted distribution + over a normalized histogram of the data. + - "qq": Scatter plot of theoretical quantiles against the + empirical quantiles. Specifically, the x-coordinates are the + values of the fitted distribution PPF evaluated at the + percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is the + number of data points, and the y-coordinates are the sorted + data points. + - "pp": Scatter plot of theoretical percentiles against the + observed percentiles. Specifically, the x-coordinates are the + percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is + the number of data points, and the y-coordinates are the values + of the fitted distribution CDF evaluated at the sorted + data points. + - "cdf": Superposes the CDF of the fitted distribution over the + empirical CDF. Specifically, the x-coordinates of the empirical + CDF are the sorted data points, and the y-coordinates are the + percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is + the number of data points. + + Returns + ------- + ax : `matplotlib.axes.Axes` + The matplotlib Axes object on which the plot was drawn. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt # matplotlib must be installed + >>> rng = np.random.default_rng() + >>> data = stats.nbinom(5, 0.5).rvs(size=1000, random_state=rng) + >>> bounds = [(0, 30), (0, 1)] + >>> res = stats.fit(stats.nbinom, data, bounds) + >>> ax = res.plot() # save matplotlib Axes object + + The `matplotlib.axes.Axes` object can be used to customize the plot. + See `matplotlib.axes.Axes` documentation for details. + + >>> ax.set_xlabel('number of trials') # customize axis label + >>> ax.get_children()[0].set_linewidth(5) # customize line widths + >>> ax.legend() + >>> plt.show() + """ + try: + import matplotlib # noqa: F401 + except ModuleNotFoundError as exc: + message = "matplotlib must be installed to use method `plot`." + raise ModuleNotFoundError(message) from exc + + plots = {'histogram': self._hist_plot, 'qq': self._qq_plot, + 'pp': self._pp_plot, 'cdf': self._cdf_plot, + 'hist': self._hist_plot} + if plot_type.lower() not in plots: + message = f"`plot_type` must be one of {set(plots.keys())}" + raise ValueError(message) + plot = plots[plot_type.lower()] + + if ax is None: + import matplotlib.pyplot as plt + ax = plt.gca() + + fit_params = np.atleast_1d(self.params) + + return plot(ax=ax, fit_params=fit_params) + + def _hist_plot(self, ax, fit_params): + from matplotlib.ticker import MaxNLocator + + support = self._dist.support(*fit_params) + lb = support[0] if np.isfinite(support[0]) else min(self._data) + ub = support[1] if np.isfinite(support[1]) else max(self._data) + pxf = "PMF" if self.discrete else "PDF" + + if self.discrete: + x = np.arange(lb, ub + 2) + y = self.pxf(x, *fit_params) + ax.vlines(x[:-1], 0, y[:-1], label='Fitted Distribution PMF', + color='C0') + options = dict(density=True, bins=x, align='left', color='C1') + ax.xaxis.set_major_locator(MaxNLocator(integer=True)) + ax.set_xlabel('k') + ax.set_ylabel('PMF') + else: + x = np.linspace(lb, ub, 200) + y = self.pxf(x, *fit_params) + ax.plot(x, y, '--', label='Fitted Distribution PDF', color='C0') + options = dict(density=True, bins=50, align='mid', color='C1') + ax.set_xlabel('x') + ax.set_ylabel('PDF') + + if len(self._data) > 50 or self.discrete: + ax.hist(self._data, label="Histogram of Data", **options) + else: + ax.plot(self._data, np.zeros_like(self._data), "*", + label='Data', color='C1') + + ax.set_title(rf"Fitted $\tt {self._dist.name}$ {pxf} and Histogram") + ax.legend(*ax.get_legend_handles_labels()) + return ax + + def _qp_plot(self, ax, fit_params, qq): + data = np.sort(self._data) + ps = self._plotting_positions(len(self._data)) + + if qq: + qp = "Quantiles" + plot_type = 'Q-Q' + x = self._dist.ppf(ps, *fit_params) + y = data + else: + qp = "Percentiles" + plot_type = 'P-P' + x = ps + y = self._dist.cdf(data, *fit_params) + + ax.plot(x, y, '.', label=f'Fitted Distribution {plot_type}', + color='C0', zorder=1) + xlim = ax.get_xlim() + ylim = ax.get_ylim() + lim = [min(xlim[0], ylim[0]), max(xlim[1], ylim[1])] + if not qq: + lim = max(lim[0], 0), min(lim[1], 1) + + if self.discrete and qq: + q_min, q_max = int(lim[0]), int(lim[1]+1) + q_ideal = np.arange(q_min, q_max) + # q_ideal = np.unique(self._dist.ppf(ps, *fit_params)) + ax.plot(q_ideal, q_ideal, 'o', label='Reference', color='k', + alpha=0.25, markerfacecolor='none', clip_on=True) + elif self.discrete and not qq: + # The intent of this is to match the plot that would be produced + # if x were continuous on [0, 1] and y were cdf(ppf(x)). + # It can be approximated by letting x = np.linspace(0, 1, 1000), + # but this might not look great when zooming in. The vertical + # portions are included to indicate where the transition occurs + # where the data completely obscures the horizontal portions. + p_min, p_max = lim + a, b = self._dist.support(*fit_params) + p_min = max(p_min, 0 if np.isfinite(a) else 1e-3) + p_max = min(p_max, 1 if np.isfinite(b) else 1-1e-3) + q_min, q_max = self._dist.ppf([p_min, p_max], *fit_params) + qs = np.arange(q_min-1, q_max+1) + ps = self._dist.cdf(qs, *fit_params) + ax.step(ps, ps, '-', label='Reference', color='k', alpha=0.25, + clip_on=True) + else: + ax.plot(lim, lim, '-', label='Reference', color='k', alpha=0.25, + clip_on=True) + + ax.set_xlim(lim) + ax.set_ylim(lim) + ax.set_xlabel(rf"Fitted $\tt {self._dist.name}$ Theoretical {qp}") + ax.set_ylabel(f"Data {qp}") + ax.set_title(rf"Fitted $\tt {self._dist.name}$ {plot_type} Plot") + ax.legend(*ax.get_legend_handles_labels()) + ax.set_aspect('equal') + return ax + + def _qq_plot(self, **kwargs): + return self._qp_plot(qq=True, **kwargs) + + def _pp_plot(self, **kwargs): + return self._qp_plot(qq=False, **kwargs) + + def _plotting_positions(self, n, a=.5): + # See https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot#Plotting_positions + k = np.arange(1, n+1) + return (k-a) / (n + 1 - 2*a) + + def _cdf_plot(self, ax, fit_params): + data = np.sort(self._data) + ecdf = self._plotting_positions(len(self._data)) + ls = '--' if len(np.unique(data)) < 30 else '.' + xlabel = 'k' if self.discrete else 'x' + ax.step(data, ecdf, ls, label='Empirical CDF', color='C1', zorder=0) + + xlim = ax.get_xlim() + q = np.linspace(*xlim, 300) + tcdf = self._dist.cdf(q, *fit_params) + + ax.plot(q, tcdf, label='Fitted Distribution CDF', color='C0', zorder=1) + ax.set_xlim(xlim) + ax.set_ylim(0, 1) + ax.set_xlabel(xlabel) + ax.set_ylabel("CDF") + ax.set_title(rf"Fitted $\tt {self._dist.name}$ and Empirical CDF") + handles, labels = ax.get_legend_handles_labels() + ax.legend(handles[::-1], labels[::-1]) + return ax + + +def fit(dist, data, bounds=None, *, guess=None, method='mle', + optimizer=optimize.differential_evolution): + r"""Fit a discrete or continuous distribution to data + + Given a distribution, data, and bounds on the parameters of the + distribution, return maximum likelihood estimates of the parameters. + + Parameters + ---------- + dist : `scipy.stats.rv_continuous` or `scipy.stats.rv_discrete` + The object representing the distribution to be fit to the data. + data : 1D array_like + The data to which the distribution is to be fit. If the data contain + any of ``np.nan``, ``np.inf``, or -``np.inf``, the fit method will + raise a ``ValueError``. + bounds : dict or sequence of tuples, optional + If a dictionary, each key is the name of a parameter of the + distribution, and the corresponding value is a tuple containing the + lower and upper bound on that parameter. If the distribution is + defined only for a finite range of values of that parameter, no entry + for that parameter is required; e.g., some distributions have + parameters which must be on the interval [0, 1]. Bounds for parameters + location (``loc``) and scale (``scale``) are optional; by default, + they are fixed to 0 and 1, respectively. + + If a sequence, element *i* is a tuple containing the lower and upper + bound on the *i*\ th parameter of the distribution. In this case, + bounds for *all* distribution shape parameters must be provided. + Optionally, bounds for location and scale may follow the + distribution shape parameters. + + If a shape is to be held fixed (e.g. if it is known), the + lower and upper bounds may be equal. If a user-provided lower or upper + bound is beyond a bound of the domain for which the distribution is + defined, the bound of the distribution's domain will replace the + user-provided value. Similarly, parameters which must be integral + will be constrained to integral values within the user-provided bounds. + guess : dict or array_like, optional + If a dictionary, each key is the name of a parameter of the + distribution, and the corresponding value is a guess for the value + of the parameter. + + If a sequence, element *i* is a guess for the *i*\ th parameter of the + distribution. In this case, guesses for *all* distribution shape + parameters must be provided. + + If `guess` is not provided, guesses for the decision variables will + not be passed to the optimizer. If `guess` is provided, guesses for + any missing parameters will be set at the mean of the lower and + upper bounds. Guesses for parameters which must be integral will be + rounded to integral values, and guesses that lie outside the + intersection of the user-provided bounds and the domain of the + distribution will be clipped. + method : {'mle', 'mse'} + With ``method="mle"`` (default), the fit is computed by minimizing + the negative log-likelihood function. A large, finite penalty + (rather than infinite negative log-likelihood) is applied for + observations beyond the support of the distribution. + With ``method="mse"``, the fit is computed by minimizing + the negative log-product spacing function. The same penalty is applied + for observations beyond the support. We follow the approach of [1]_, + which is generalized for samples with repeated observations. + optimizer : callable, optional + `optimizer` is a callable that accepts the following positional + argument. + + fun : callable + The objective function to be optimized. `fun` accepts one argument + ``x``, candidate shape parameters of the distribution, and returns + the objective function value given ``x``, `dist`, and the provided + `data`. + The job of `optimizer` is to find values of the decision variables + that minimizes `fun`. + + `optimizer` must also accept the following keyword argument. + + bounds : sequence of tuples + The bounds on values of the decision variables; each element will + be a tuple containing the lower and upper bound on a decision + variable. + + If `guess` is provided, `optimizer` must also accept the following + keyword argument. + + x0 : array_like + The guesses for each decision variable. + + If the distribution has any shape parameters that must be integral or + if the distribution is discrete and the location parameter is not + fixed, `optimizer` must also accept the following keyword argument. + + integrality : array_like of bools + For each decision variable, True if the decision variable + must be constrained to integer values and False if the decision + variable is continuous. + + `optimizer` must return an object, such as an instance of + `scipy.optimize.OptimizeResult`, which holds the optimal values of + the decision variables in an attribute ``x``. If attributes + ``fun``, ``status``, or ``message`` are provided, they will be + included in the result object returned by `fit`. + + Returns + ------- + result : `~scipy.stats._result_classes.FitResult` + An object with the following fields. + + params : namedtuple + A namedtuple containing the maximum likelihood estimates of the + shape parameters, location, and (if applicable) scale of the + distribution. + success : bool or None + Whether the optimizer considered the optimization to terminate + successfully or not. + message : str or None + Any status message provided by the optimizer. + + The object has the following method: + + nllf(params=None, data=None) + By default, the negative log-likehood function at the fitted + `params` for the given `data`. Accepts a tuple containing + alternative shapes, location, and scale of the distribution and + an array of alternative data. + + plot(ax=None) + Superposes the PDF/PMF of the fitted distribution over a normalized + histogram of the data. + + See Also + -------- + rv_continuous, rv_discrete + + Notes + ----- + Optimization is more likely to converge to the maximum likelihood estimate + when the user provides tight bounds containing the maximum likelihood + estimate. For example, when fitting a binomial distribution to data, the + number of experiments underlying each sample may be known, in which case + the corresponding shape parameter ``n`` can be fixed. + + References + ---------- + .. [1] Shao, Yongzhao, and Marjorie G. Hahn. "Maximum product of spacings + method: a unified formulation with illustration of strong + consistency." Illinois Journal of Mathematics 43.3 (1999): 489-499. + + Examples + -------- + Suppose we wish to fit a distribution to the following data. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> dist = stats.nbinom + >>> shapes = (5, 0.5) + >>> data = dist.rvs(*shapes, size=1000, random_state=rng) + + Suppose we do not know how the data were generated, but we suspect that + it follows a negative binomial distribution with parameters *n* and *p*\. + (See `scipy.stats.nbinom`.) We believe that the parameter *n* was fewer + than 30, and we know that the parameter *p* must lie on the interval + [0, 1]. We record this information in a variable `bounds` and pass + this information to `fit`. + + >>> bounds = [(0, 30), (0, 1)] + >>> res = stats.fit(dist, data, bounds) + + `fit` searches within the user-specified `bounds` for the + values that best match the data (in the sense of maximum likelihood + estimation). In this case, it found shape values similar to those + from which the data were actually generated. + + >>> res.params + FitParams(n=5.0, p=0.5028157644634368, loc=0.0) # may vary + + We can visualize the results by superposing the probability mass function + of the distribution (with the shapes fit to the data) over a normalized + histogram of the data. + + >>> import matplotlib.pyplot as plt # matplotlib must be installed to plot + >>> res.plot() + >>> plt.show() + + Note that the estimate for *n* was exactly integral; this is because + the domain of the `nbinom` PMF includes only integral *n*, and the `nbinom` + object "knows" that. `nbinom` also knows that the shape *p* must be a + value between 0 and 1. In such a case - when the domain of the distribution + with respect to a parameter is finite - we are not required to specify + bounds for the parameter. + + >>> bounds = {'n': (0, 30)} # omit parameter p using a `dict` + >>> res2 = stats.fit(dist, data, bounds) + >>> res2.params + FitParams(n=5.0, p=0.5016492009232932, loc=0.0) # may vary + + If we wish to force the distribution to be fit with *n* fixed at 6, we can + set both the lower and upper bounds on *n* to 6. Note, however, that the + value of the objective function being optimized is typically worse (higher) + in this case. + + >>> bounds = {'n': (6, 6)} # fix parameter `n` + >>> res3 = stats.fit(dist, data, bounds) + >>> res3.params + FitParams(n=6.0, p=0.5486556076755706, loc=0.0) # may vary + >>> res3.nllf() > res.nllf() + True # may vary + + Note that the numerical results of the previous examples are typical, but + they may vary because the default optimizer used by `fit`, + `scipy.optimize.differential_evolution`, is stochastic. However, we can + customize the settings used by the optimizer to ensure reproducibility - + or even use a different optimizer entirely - using the `optimizer` + parameter. + + >>> from scipy.optimize import differential_evolution + >>> rng = np.random.default_rng(767585560716548) + >>> def optimizer(fun, bounds, *, integrality): + ... return differential_evolution(fun, bounds, strategy='best2bin', + ... seed=rng, integrality=integrality) + >>> bounds = [(0, 30), (0, 1)] + >>> res4 = stats.fit(dist, data, bounds, optimizer=optimizer) + >>> res4.params + FitParams(n=5.0, p=0.5015183149259951, loc=0.0) + + """ + # --- Input Validation / Standardization --- # + user_bounds = bounds + user_guess = guess + + # distribution input validation and information collection + if hasattr(dist, "pdf"): # can't use isinstance for types + default_bounds = {'loc': (0, 0), 'scale': (1, 1)} + discrete = False + elif hasattr(dist, "pmf"): + default_bounds = {'loc': (0, 0)} + discrete = True + else: + message = ("`dist` must be an instance of `rv_continuous` " + "or `rv_discrete.`") + raise ValueError(message) + + try: + param_info = dist._param_info() + except AttributeError as e: + message = (f"Distribution `{dist.name}` is not yet supported by " + "`scipy.stats.fit` because shape information has " + "not been defined.") + raise ValueError(message) from e + + # data input validation + data = np.asarray(data) + if data.ndim != 1: + message = "`data` must be exactly one-dimensional." + raise ValueError(message) + if not (np.issubdtype(data.dtype, np.number) + and np.all(np.isfinite(data))): + message = "All elements of `data` must be finite numbers." + raise ValueError(message) + + # bounds input validation and information collection + n_params = len(param_info) + n_shapes = n_params - (1 if discrete else 2) + param_list = [param.name for param in param_info] + param_names = ", ".join(param_list) + shape_names = ", ".join(param_list[:n_shapes]) + + if user_bounds is None: + user_bounds = {} + + if isinstance(user_bounds, dict): + default_bounds.update(user_bounds) + user_bounds = default_bounds + user_bounds_array = np.empty((n_params, 2)) + for i in range(n_params): + param_name = param_info[i].name + user_bound = user_bounds.pop(param_name, None) + if user_bound is None: + user_bound = param_info[i].domain + user_bounds_array[i] = user_bound + if user_bounds: + message = ("Bounds provided for the following unrecognized " + f"parameters will be ignored: {set(user_bounds)}") + warnings.warn(message, RuntimeWarning, stacklevel=2) + + else: + try: + user_bounds = np.asarray(user_bounds, dtype=float) + if user_bounds.size == 0: + user_bounds = np.empty((0, 2)) + except ValueError as e: + message = ("Each element of a `bounds` sequence must be a tuple " + "containing two elements: the lower and upper bound of " + "a distribution parameter.") + raise ValueError(message) from e + if (user_bounds.ndim != 2 or user_bounds.shape[1] != 2): + message = ("Each element of `bounds` must be a tuple specifying " + "the lower and upper bounds of a shape parameter") + raise ValueError(message) + if user_bounds.shape[0] < n_shapes: + message = (f"A `bounds` sequence must contain at least {n_shapes} " + "elements: tuples specifying the lower and upper " + f"bounds of all shape parameters {shape_names}.") + raise ValueError(message) + if user_bounds.shape[0] > n_params: + message = ("A `bounds` sequence may not contain more than " + f"{n_params} elements: tuples specifying the lower and " + "upper bounds of distribution parameters " + f"{param_names}.") + raise ValueError(message) + + user_bounds_array = np.empty((n_params, 2)) + user_bounds_array[n_shapes:] = list(default_bounds.values()) + user_bounds_array[:len(user_bounds)] = user_bounds + + user_bounds = user_bounds_array + validated_bounds = [] + for i in range(n_params): + name = param_info[i].name + user_bound = user_bounds_array[i] + param_domain = param_info[i].domain + integral = param_info[i].integrality + combined = _combine_bounds(name, user_bound, param_domain, integral) + validated_bounds.append(combined) + + bounds = np.asarray(validated_bounds) + integrality = [param.integrality for param in param_info] + + # guess input validation + + if user_guess is None: + guess_array = None + elif isinstance(user_guess, dict): + default_guess = {param.name: np.mean(bound) + for param, bound in zip(param_info, bounds)} + unrecognized = set(user_guess) - set(default_guess) + if unrecognized: + message = ("Guesses provided for the following unrecognized " + f"parameters will be ignored: {unrecognized}") + warnings.warn(message, RuntimeWarning, stacklevel=2) + default_guess.update(user_guess) + + message = ("Each element of `guess` must be a scalar " + "guess for a distribution parameter.") + try: + guess_array = np.asarray([default_guess[param.name] + for param in param_info], dtype=float) + except ValueError as e: + raise ValueError(message) from e + + else: + message = ("Each element of `guess` must be a scalar " + "guess for a distribution parameter.") + try: + user_guess = np.asarray(user_guess, dtype=float) + except ValueError as e: + raise ValueError(message) from e + if user_guess.ndim != 1: + raise ValueError(message) + if user_guess.shape[0] < n_shapes: + message = (f"A `guess` sequence must contain at least {n_shapes} " + "elements: scalar guesses for the distribution shape " + f"parameters {shape_names}.") + raise ValueError(message) + if user_guess.shape[0] > n_params: + message = ("A `guess` sequence may not contain more than " + f"{n_params} elements: scalar guesses for the " + f"distribution parameters {param_names}.") + raise ValueError(message) + + guess_array = np.mean(bounds, axis=1) + guess_array[:len(user_guess)] = user_guess + + if guess_array is not None: + guess_rounded = guess_array.copy() + + guess_rounded[integrality] = np.round(guess_rounded[integrality]) + rounded = np.where(guess_rounded != guess_array)[0] + for i in rounded: + message = (f"Guess for parameter `{param_info[i].name}` " + f"rounded from {guess_array[i]} to {guess_rounded[i]}.") + warnings.warn(message, RuntimeWarning, stacklevel=2) + + guess_clipped = np.clip(guess_rounded, bounds[:, 0], bounds[:, 1]) + clipped = np.where(guess_clipped != guess_rounded)[0] + for i in clipped: + message = (f"Guess for parameter `{param_info[i].name}` " + f"clipped from {guess_rounded[i]} to " + f"{guess_clipped[i]}.") + warnings.warn(message, RuntimeWarning, stacklevel=2) + + guess = guess_clipped + else: + guess = None + + # --- Fitting --- # + def nllf(free_params, data=data): # bind data NOW + with np.errstate(invalid='ignore', divide='ignore'): + return dist._penalized_nnlf(free_params, data) + + def nlpsf(free_params, data=data): # bind data NOW + with np.errstate(invalid='ignore', divide='ignore'): + return dist._penalized_nlpsf(free_params, data) + + methods = {'mle': nllf, 'mse': nlpsf} + objective = methods[method.lower()] + + with np.errstate(invalid='ignore', divide='ignore'): + kwds = {} + if bounds is not None: + kwds['bounds'] = bounds + if np.any(integrality): + kwds['integrality'] = integrality + if guess is not None: + kwds['x0'] = guess + res = optimizer(objective, **kwds) + + return FitResult(dist, data, discrete, res) + + +GoodnessOfFitResult = namedtuple('GoodnessOfFitResult', + ('fit_result', 'statistic', 'pvalue', + 'null_distribution')) + + +def goodness_of_fit(dist, data, *, known_params=None, fit_params=None, + guessed_params=None, statistic='ad', n_mc_samples=9999, + random_state=None): + r""" + Perform a goodness of fit test comparing data to a distribution family. + + Given a distribution family and data, perform a test of the null hypothesis + that the data were drawn from a distribution in that family. Any known + parameters of the distribution may be specified. Remaining parameters of + the distribution will be fit to the data, and the p-value of the test + is computed accordingly. Several statistics for comparing the distribution + to data are available. + + Parameters + ---------- + dist : `scipy.stats.rv_continuous` + The object representing the distribution family under the null + hypothesis. + data : 1D array_like + Finite, uncensored data to be tested. + known_params : dict, optional + A dictionary containing name-value pairs of known distribution + parameters. Monte Carlo samples are randomly drawn from the + null-hypothesized distribution with these values of the parameters. + Before the statistic is evaluated for each Monte Carlo sample, only + remaining unknown parameters of the null-hypothesized distribution + family are fit to the samples; the known parameters are held fixed. + If all parameters of the distribution family are known, then the step + of fitting the distribution family to each sample is omitted. + fit_params : dict, optional + A dictionary containing name-value pairs of distribution parameters + that have already been fit to the data, e.g. using `scipy.stats.fit` + or the ``fit`` method of `dist`. Monte Carlo samples are drawn from the + null-hypothesized distribution with these specified values of the + parameter. On those Monte Carlo samples, however, these and all other + unknown parameters of the null-hypothesized distribution family are + fit before the statistic is evaluated. + guessed_params : dict, optional + A dictionary containing name-value pairs of distribution parameters + which have been guessed. These parameters are always considered as + free parameters and are fit both to the provided `data` as well as + to the Monte Carlo samples drawn from the null-hypothesized + distribution. The purpose of these `guessed_params` is to be used as + initial values for the numerical fitting procedure. + statistic : {"ad", "ks", "cvm", "filliben"} or callable, optional + The statistic used to compare data to a distribution after fitting + unknown parameters of the distribution family to the data. The + Anderson-Darling ("ad") [1]_, Kolmogorov-Smirnov ("ks") [1]_, + Cramer-von Mises ("cvm") [1]_, and Filliben ("filliben") [7]_ + statistics are available. Alternatively, a callable with signature + ``(dist, data, axis)`` may be supplied to compute the statistic. Here + ``dist`` is a frozen distribution object (potentially with array + parameters), ``data`` is an array of Monte Carlo samples (of + compatible shape), and ``axis`` is the axis of ``data`` along which + the statistic must be computed. + n_mc_samples : int, default: 9999 + The number of Monte Carlo samples drawn from the null hypothesized + distribution to form the null distribution of the statistic. The + sample size of each is the same as the given `data`. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate the Monte Carlo + samples. + + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, then the provided instance is used. + + Returns + ------- + res : GoodnessOfFitResult + An object with the following attributes. + + fit_result : `~scipy.stats._result_classes.FitResult` + An object representing the fit of the provided `dist` to `data`. + This object includes the values of distribution family parameters + that fully define the null-hypothesized distribution, that is, + the distribution from which Monte Carlo samples are drawn. + statistic : float + The value of the statistic comparing provided `data` to the + null-hypothesized distribution. + pvalue : float + The proportion of elements in the null distribution with + statistic values at least as extreme as the statistic value of the + provided `data`. + null_distribution : ndarray + The value of the statistic for each Monte Carlo sample + drawn from the null-hypothesized distribution. + + Notes + ----- + This is a generalized Monte Carlo goodness-of-fit procedure, special cases + of which correspond with various Anderson-Darling tests, Lilliefors' test, + etc. The test is described in [2]_, [3]_, and [4]_ as a parametric + bootstrap test. This is a Monte Carlo test in which parameters that + specify the distribution from which samples are drawn have been estimated + from the data. We describe the test using "Monte Carlo" rather than + "parametric bootstrap" throughout to avoid confusion with the more familiar + nonparametric bootstrap, and describe how the test is performed below. + + *Traditional goodness of fit tests* + + Traditionally, critical values corresponding with a fixed set of + significance levels are pre-calculated using Monte Carlo methods. Users + perform the test by calculating the value of the test statistic only for + their observed `data` and comparing this value to tabulated critical + values. This practice is not very flexible, as tables are not available for + all distributions and combinations of known and unknown parameter values. + Also, results can be inaccurate when critical values are interpolated from + limited tabulated data to correspond with the user's sample size and + fitted parameter values. To overcome these shortcomings, this function + allows the user to perform the Monte Carlo trials adapted to their + particular data. + + *Algorithmic overview* + + In brief, this routine executes the following steps: + + 1. Fit unknown parameters to the given `data`, thereby forming the + "null-hypothesized" distribution, and compute the statistic of + this pair of data and distribution. + 2. Draw random samples from this null-hypothesized distribution. + 3. Fit the unknown parameters to each random sample. + 4. Calculate the statistic between each sample and the distribution that + has been fit to the sample. + 5. Compare the value of the statistic corresponding with `data` from (1) + against the values of the statistic corresponding with the random + samples from (4). The p-value is the proportion of samples with a + statistic value greater than or equal to the statistic of the observed + data. + + In more detail, the steps are as follows. + + First, any unknown parameters of the distribution family specified by + `dist` are fit to the provided `data` using maximum likelihood estimation. + (One exception is the normal distribution with unknown location and scale: + we use the bias-corrected standard deviation ``np.std(data, ddof=1)`` for + the scale as recommended in [1]_.) + These values of the parameters specify a particular member of the + distribution family referred to as the "null-hypothesized distribution", + that is, the distribution from which the data were sampled under the null + hypothesis. The `statistic`, which compares data to a distribution, is + computed between `data` and the null-hypothesized distribution. + + Next, many (specifically `n_mc_samples`) new samples, each containing the + same number of observations as `data`, are drawn from the + null-hypothesized distribution. All unknown parameters of the distribution + family `dist` are fit to *each resample*, and the `statistic` is computed + between each sample and its corresponding fitted distribution. These + values of the statistic form the Monte Carlo null distribution (not to be + confused with the "null-hypothesized distribution" above). + + The p-value of the test is the proportion of statistic values in the Monte + Carlo null distribution that are at least as extreme as the statistic value + of the provided `data`. More precisely, the p-value is given by + + .. math:: + + p = \frac{b + 1} + {m + 1} + + where :math:`b` is the number of statistic values in the Monte Carlo null + distribution that are greater than or equal to the statistic value + calculated for `data`, and :math:`m` is the number of elements in the + Monte Carlo null distribution (`n_mc_samples`). The addition of :math:`1` + to the numerator and denominator can be thought of as including the + value of the statistic corresponding with `data` in the null distribution, + but a more formal explanation is given in [5]_. + + *Limitations* + + The test can be very slow for some distribution families because unknown + parameters of the distribution family must be fit to each of the Monte + Carlo samples, and for most distributions in SciPy, distribution fitting + performed via numerical optimization. + + *Anti-Pattern* + + For this reason, it may be tempting + to treat parameters of the distribution pre-fit to `data` (by the user) + as though they were `known_params`, as specification of all parameters of + the distribution precludes the need to fit the distribution to each Monte + Carlo sample. (This is essentially how the original Kilmogorov-Smirnov + test is performed.) Although such a test can provide evidence against the + null hypothesis, the test is conservative in the sense that small p-values + will tend to (greatly) *overestimate* the probability of making a type I + error (that is, rejecting the null hypothesis although it is true), and the + power of the test is low (that is, it is less likely to reject the null + hypothesis even when the null hypothesis is false). + This is because the Monte Carlo samples are less likely to agree with the + null-hypothesized distribution as well as `data`. This tends to increase + the values of the statistic recorded in the null distribution, so that a + larger number of them exceed the value of statistic for `data`, thereby + inflating the p-value. + + References + ---------- + .. [1] M. A. Stephens (1974). "EDF Statistics for Goodness of Fit and + Some Comparisons." Journal of the American Statistical Association, + Vol. 69, pp. 730-737. + .. [2] W. Stute, W. G. Manteiga, and M. P. Quindimil (1993). + "Bootstrap based goodness-of-fit-tests." Metrika 40.1: 243-256. + .. [3] C. Genest, & B Rémillard. (2008). "Validity of the parametric + bootstrap for goodness-of-fit testing in semiparametric models." + Annales de l'IHP Probabilités et statistiques. Vol. 44. No. 6. + .. [4] I. Kojadinovic and J. Yan (2012). "Goodness-of-fit testing based on + a weighted bootstrap: A fast large-sample alternative to the + parametric bootstrap." Canadian Journal of Statistics 40.3: 480-500. + .. [5] B. Phipson and G. K. Smyth (2010). "Permutation P-values Should + Never Be Zero: Calculating Exact P-values When Permutations Are + Randomly Drawn." Statistical Applications in Genetics and Molecular + Biology 9.1. + .. [6] H. W. Lilliefors (1967). "On the Kolmogorov-Smirnov test for + normality with mean and variance unknown." Journal of the American + statistical Association 62.318: 399-402. + .. [7] Filliben, James J. "The probability plot correlation coefficient + test for normality." Technometrics 17.1 (1975): 111-117. + + Examples + -------- + A well-known test of the null hypothesis that data were drawn from a + given distribution is the Kolmogorov-Smirnov (KS) test, available in SciPy + as `scipy.stats.ks_1samp`. Suppose we wish to test whether the following + data: + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> x = stats.uniform.rvs(size=75, random_state=rng) + + were sampled from a normal distribution. To perform a KS test, the + empirical distribution function of the observed data will be compared + against the (theoretical) cumulative distribution function of a normal + distribution. Of course, to do this, the normal distribution under the null + hypothesis must be fully specified. This is commonly done by first fitting + the ``loc`` and ``scale`` parameters of the distribution to the observed + data, then performing the test. + + >>> loc, scale = np.mean(x), np.std(x, ddof=1) + >>> cdf = stats.norm(loc, scale).cdf + >>> stats.ks_1samp(x, cdf) + KstestResult(statistic=0.1119257570456813, + pvalue=0.2827756409939257, + statistic_location=0.7751845155861765, + statistic_sign=-1) + + An advantage of the KS-test is that the p-value - the probability of + obtaining a value of the test statistic under the null hypothesis as + extreme as the value obtained from the observed data - can be calculated + exactly and efficiently. `goodness_of_fit` can only approximate these + results. + + >>> known_params = {'loc': loc, 'scale': scale} + >>> res = stats.goodness_of_fit(stats.norm, x, known_params=known_params, + ... statistic='ks', random_state=rng) + >>> res.statistic, res.pvalue + (0.1119257570456813, 0.2788) + + The statistic matches exactly, but the p-value is estimated by forming + a "Monte Carlo null distribution", that is, by explicitly drawing random + samples from `scipy.stats.norm` with the provided parameters and + calculating the stastic for each. The fraction of these statistic values + at least as extreme as ``res.statistic`` approximates the exact p-value + calculated by `scipy.stats.ks_1samp`. + + However, in many cases, we would prefer to test only that the data were + sampled from one of *any* member of the normal distribution family, not + specifically from the normal distribution with the location and scale + fitted to the observed sample. In this case, Lilliefors [6]_ argued that + the KS test is far too conservative (that is, the p-value overstates + the actual probability of rejecting a true null hypothesis) and thus lacks + power - the ability to reject the null hypothesis when the null hypothesis + is actually false. + Indeed, our p-value above is approximately 0.28, which is far too large + to reject the null hypothesis at any common significance level. + + Consider why this might be. Note that in the KS test above, the statistic + always compares data against the CDF of a normal distribution fitted to the + *observed data*. This tends to reduce the value of the statistic for the + observed data, but it is "unfair" when computing the statistic for other + samples, such as those we randomly draw to form the Monte Carlo null + distribution. It is easy to correct for this: whenever we compute the KS + statistic of a sample, we use the CDF of a normal distribution fitted + to *that sample*. The null distribution in this case has not been + calculated exactly and is tyically approximated using Monte Carlo methods + as described above. This is where `goodness_of_fit` excels. + + >>> res = stats.goodness_of_fit(stats.norm, x, statistic='ks', + ... random_state=rng) + >>> res.statistic, res.pvalue + (0.1119257570456813, 0.0196) + + Indeed, this p-value is much smaller, and small enough to (correctly) + reject the null hypothesis at common significance levels, including 5% and + 2.5%. + + However, the KS statistic is not very sensitive to all deviations from + normality. The original advantage of the KS statistic was the ability + to compute the null distribution theoretically, but a more sensitive + statistic - resulting in a higher test power - can be used now that we can + approximate the null distribution + computationally. The Anderson-Darling statistic [1]_ tends to be more + sensitive, and critical values of the this statistic have been tabulated + for various significance levels and sample sizes using Monte Carlo methods. + + >>> res = stats.anderson(x, 'norm') + >>> print(res.statistic) + 1.2139573337497467 + >>> print(res.critical_values) + [0.549 0.625 0.75 0.875 1.041] + >>> print(res.significance_level) + [15. 10. 5. 2.5 1. ] + + Here, the observed value of the statistic exceeds the critical value + corresponding with a 1% significance level. This tells us that the p-value + of the observed data is less than 1%, but what is it? We could interpolate + from these (already-interpolated) values, but `goodness_of_fit` can + estimate it directly. + + >>> res = stats.goodness_of_fit(stats.norm, x, statistic='ad', + ... random_state=rng) + >>> res.statistic, res.pvalue + (1.2139573337497467, 0.0034) + + A further advantage is that use of `goodness_of_fit` is not limited to + a particular set of distributions or conditions on which parameters + are known versus which must be estimated from data. Instead, + `goodness_of_fit` can estimate p-values relatively quickly for any + distribution with a sufficiently fast and reliable ``fit`` method. For + instance, here we perform a goodness of fit test using the Cramer-von Mises + statistic against the Rayleigh distribution with known location and unknown + scale. + + >>> rng = np.random.default_rng() + >>> x = stats.chi(df=2.2, loc=0, scale=2).rvs(size=1000, random_state=rng) + >>> res = stats.goodness_of_fit(stats.rayleigh, x, statistic='cvm', + ... known_params={'loc': 0}, random_state=rng) + + This executes fairly quickly, but to check the reliability of the ``fit`` + method, we should inspect the fit result. + + >>> res.fit_result # location is as specified, and scale is reasonable + params: FitParams(loc=0.0, scale=2.1026719844231243) + success: True + message: 'The fit was performed successfully.' + >>> import matplotlib.pyplot as plt # matplotlib must be installed to plot + >>> res.fit_result.plot() + >>> plt.show() + + If the distribution is not fit to the observed data as well as possible, + the test may not control the type I error rate, that is, the chance of + rejecting the null hypothesis even when it is true. + + We should also look for extreme outliers in the null distribution that + may be caused by unreliable fitting. These do not necessarily invalidate + the result, but they tend to reduce the test's power. + + >>> _, ax = plt.subplots() + >>> ax.hist(np.log10(res.null_distribution)) + >>> ax.set_xlabel("log10 of CVM statistic under the null hypothesis") + >>> ax.set_ylabel("Frequency") + >>> ax.set_title("Histogram of the Monte Carlo null distribution") + >>> plt.show() + + This plot seems reassuring. + + If ``fit`` method is working reliably, and if the distribution of the test + statistic is not particularly sensitive to the values of the fitted + parameters, then the p-value provided by `goodness_of_fit` is expected to + be a good approximation. + + >>> res.statistic, res.pvalue + (0.2231991510248692, 0.0525) + + """ + args = _gof_iv(dist, data, known_params, fit_params, guessed_params, + statistic, n_mc_samples, random_state) + (dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params, + guessed_rfd_params, statistic, n_mc_samples_int, random_state) = args + + # Fit null hypothesis distribution to data + nhd_fit_fun = _get_fit_fun(dist, data, guessed_nhd_params, + fixed_nhd_params) + nhd_vals = nhd_fit_fun(data) + nhd_dist = dist(*nhd_vals) + + def rvs(size): + return nhd_dist.rvs(size=size, random_state=random_state) + + # Define statistic + fit_fun = _get_fit_fun(dist, data, guessed_rfd_params, fixed_rfd_params) + if callable(statistic): + compare_fun = statistic + else: + compare_fun = _compare_dict[statistic] + alternative = getattr(compare_fun, 'alternative', 'greater') + + def statistic_fun(data, axis): + # Make things simple by always working along the last axis. + data = np.moveaxis(data, axis, -1) + rfd_vals = fit_fun(data) + rfd_dist = dist(*rfd_vals) + return compare_fun(rfd_dist, data, axis=-1) + + res = stats.monte_carlo_test(data, rvs, statistic_fun, vectorized=True, + n_resamples=n_mc_samples, axis=-1, + alternative=alternative) + opt_res = optimize.OptimizeResult() + opt_res.success = True + opt_res.message = "The fit was performed successfully." + opt_res.x = nhd_vals + # Only continuous distributions for now, hence discrete=False + # There's no fundamental limitation; it's just that we're not using + # stats.fit, discrete distributions don't have `fit` method, and + # we haven't written any vectorized fit functions for a discrete + # distribution yet. + return GoodnessOfFitResult(FitResult(dist, data, False, opt_res), + res.statistic, res.pvalue, + res.null_distribution) + + +def _get_fit_fun(dist, data, guessed_params, fixed_params): + + shape_names = [] if dist.shapes is None else dist.shapes.split(", ") + param_names = shape_names + ['loc', 'scale'] + fparam_names = ['f'+name for name in param_names] + all_fixed = not set(fparam_names).difference(fixed_params) + guessed_shapes = [guessed_params.pop(x, None) + for x in shape_names if x in guessed_params] + + if all_fixed: + def fit_fun(data): + return [fixed_params[name] for name in fparam_names] + # Define statistic, including fitting distribution to data + elif dist in _fit_funs: + def fit_fun(data): + params = _fit_funs[dist](data, **fixed_params) + params = np.asarray(np.broadcast_arrays(*params)) + if params.ndim > 1: + params = params[..., np.newaxis] + return params + else: + def fit_fun_1d(data): + return dist.fit(data, *guessed_shapes, **guessed_params, + **fixed_params) + + def fit_fun(data): + params = np.apply_along_axis(fit_fun_1d, axis=-1, arr=data) + if params.ndim > 1: + params = params.T[..., np.newaxis] + return params + + return fit_fun + + +# Vectorized fitting functions. These are to accept ND `data` in which each +# row (slice along last axis) is a sample to fit and scalar fixed parameters. +# They return a tuple of shape parameter arrays, each of shape data.shape[:-1]. +def _fit_norm(data, floc=None, fscale=None): + loc = floc + scale = fscale + if loc is None and scale is None: + loc = np.mean(data, axis=-1) + scale = np.std(data, ddof=1, axis=-1) + elif loc is None: + loc = np.mean(data, axis=-1) + elif scale is None: + scale = np.sqrt(((data - loc)**2).mean(axis=-1)) + return loc, scale + + +_fit_funs = {stats.norm: _fit_norm} # type: ignore[attr-defined] + + +# Vectorized goodness of fit statistic functions. These accept a frozen +# distribution object and `data` in which each row (slice along last axis) is +# a sample. + + +def _anderson_darling(dist, data, axis): + x = np.sort(data, axis=-1) + n = data.shape[-1] + i = np.arange(1, n+1) + Si = (2*i - 1)/n * (dist.logcdf(x) + dist.logsf(x[..., ::-1])) + S = np.sum(Si, axis=-1) + return -n - S + + +def _compute_dplus(cdfvals): # adapted from _stats_py before gh-17062 + n = cdfvals.shape[-1] + return (np.arange(1.0, n + 1) / n - cdfvals).max(axis=-1) + + +def _compute_dminus(cdfvals): + n = cdfvals.shape[-1] + return (cdfvals - np.arange(0.0, n)/n).max(axis=-1) + + +def _kolmogorov_smirnov(dist, data, axis): + x = np.sort(data, axis=-1) + cdfvals = dist.cdf(x) + Dplus = _compute_dplus(cdfvals) # always works along last axis + Dminus = _compute_dminus(cdfvals) + return np.maximum(Dplus, Dminus) + + +def _corr(X, M): + # Correlation coefficient r, simplified and vectorized as we need it. + # See [7] Equation (2). Lemma 1/2 are only for distributions symmetric + # about 0. + Xm = X.mean(axis=-1, keepdims=True) + Mm = M.mean(axis=-1, keepdims=True) + num = np.sum((X - Xm) * (M - Mm), axis=-1) + den = np.sqrt(np.sum((X - Xm)**2, axis=-1) * np.sum((M - Mm)**2, axis=-1)) + return num/den + + +def _filliben(dist, data, axis): + # [7] Section 8 # 1 + X = np.sort(data, axis=-1) + + # [7] Section 8 # 2 + n = data.shape[-1] + k = np.arange(1, n+1) + # Filliben used an approximation for the uniform distribution order + # statistic medians. + # m = (k - .3175)/(n + 0.365) + # m[-1] = 0.5**(1/n) + # m[0] = 1 - m[-1] + # We can just as easily use the (theoretically) exact values. See e.g. + # https://en.wikipedia.org/wiki/Order_statistic + # "Order statistics sampled from a uniform distribution" + m = stats.beta(k, n + 1 - k).median() + + # [7] Section 8 # 3 + M = dist.ppf(m) + + # [7] Section 8 # 4 + return _corr(X, M) +_filliben.alternative = 'less' # type: ignore[attr-defined] + + +def _cramer_von_mises(dist, data, axis): + x = np.sort(data, axis=-1) + n = data.shape[-1] + cdfvals = dist.cdf(x) + u = (2*np.arange(1, n+1) - 1)/(2*n) + w = 1 / (12*n) + np.sum((u - cdfvals)**2, axis=-1) + return w + + +_compare_dict = {"ad": _anderson_darling, "ks": _kolmogorov_smirnov, + "cvm": _cramer_von_mises, "filliben": _filliben} + + +def _gof_iv(dist, data, known_params, fit_params, guessed_params, statistic, + n_mc_samples, random_state): + + if not isinstance(dist, stats.rv_continuous): + message = ("`dist` must be a (non-frozen) instance of " + "`stats.rv_continuous`.") + raise TypeError(message) + + data = np.asarray(data, dtype=float) + if not data.ndim == 1: + message = "`data` must be a one-dimensional array of numbers." + raise ValueError(message) + + # Leave validation of these key/value pairs to the `fit` method, + # but collect these into dictionaries that will be used + known_params = known_params or dict() + fit_params = fit_params or dict() + guessed_params = guessed_params or dict() + + known_params_f = {("f"+key): val for key, val in known_params.items()} + fit_params_f = {("f"+key): val for key, val in fit_params.items()} + + # These are the values of parameters of the null distribution family + # with which resamples are drawn + fixed_nhd_params = known_params_f.copy() + fixed_nhd_params.update(fit_params_f) + + # These are fixed when fitting the distribution family to resamples + fixed_rfd_params = known_params_f.copy() + + # These are used as guesses when fitting the distribution family to + # the original data + guessed_nhd_params = guessed_params.copy() + + # These are used as guesses when fitting the distribution family to + # resamples + guessed_rfd_params = fit_params.copy() + guessed_rfd_params.update(guessed_params) + + if not callable(statistic): + statistic = statistic.lower() + statistics = {'ad', 'ks', 'cvm', 'filliben'} + if statistic not in statistics: + message = f"`statistic` must be one of {statistics}." + raise ValueError(message) + + n_mc_samples_int = int(n_mc_samples) + if n_mc_samples_int != n_mc_samples: + message = "`n_mc_samples` must be an integer." + raise TypeError(message) + + random_state = check_random_state(random_state) + + return (dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params, + guessed_rfd_params, statistic, n_mc_samples_int, random_state) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_hypotests.py b/parrot/lib/python3.10/site-packages/scipy/stats/_hypotests.py new file mode 100644 index 0000000000000000000000000000000000000000..d445c5494fccbf99455218315a8caa1a675efeaf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_hypotests.py @@ -0,0 +1,2027 @@ +from collections import namedtuple +from dataclasses import dataclass +from math import comb +import numpy as np +import warnings +from itertools import combinations +import scipy.stats +from scipy.optimize import shgo +from . import distributions +from ._common import ConfidenceInterval +from ._continuous_distns import norm +from scipy.special import gamma, kv, gammaln +from scipy.fft import ifft +from ._stats_pythran import _a_ij_Aij_Dij2 +from ._stats_pythran import ( + _concordant_pairs as _P, _discordant_pairs as _Q +) +from ._axis_nan_policy import _axis_nan_policy_factory +from scipy.stats import _stats_py + +__all__ = ['epps_singleton_2samp', 'cramervonmises', 'somersd', + 'barnard_exact', 'boschloo_exact', 'cramervonmises_2samp', + 'tukey_hsd', 'poisson_means_test'] + +Epps_Singleton_2sampResult = namedtuple('Epps_Singleton_2sampResult', + ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(Epps_Singleton_2sampResult, n_samples=2, too_small=4) +def epps_singleton_2samp(x, y, t=(0.4, 0.8)): + """Compute the Epps-Singleton (ES) test statistic. + + Test the null hypothesis that two samples have the same underlying + probability distribution. + + Parameters + ---------- + x, y : array-like + The two samples of observations to be tested. Input must not have more + than one dimension. Samples can have different lengths, but both + must have at least five observations. + t : array-like, optional + The points (t1, ..., tn) where the empirical characteristic function is + to be evaluated. It should be positive distinct numbers. The default + value (0.4, 0.8) is proposed in [1]_. Input must not have more than + one dimension. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The associated p-value based on the asymptotic chi2-distribution. + + See Also + -------- + ks_2samp, anderson_ksamp + + Notes + ----- + Testing whether two samples are generated by the same underlying + distribution is a classical question in statistics. A widely used test is + the Kolmogorov-Smirnov (KS) test which relies on the empirical + distribution function. Epps and Singleton introduce a test based on the + empirical characteristic function in [1]_. + + One advantage of the ES test compared to the KS test is that is does + not assume a continuous distribution. In [1]_, the authors conclude + that the test also has a higher power than the KS test in many + examples. They recommend the use of the ES test for discrete samples as + well as continuous samples with at least 25 observations each, whereas + `anderson_ksamp` is recommended for smaller sample sizes in the + continuous case. + + The p-value is computed from the asymptotic distribution of the test + statistic which follows a `chi2` distribution. If the sample size of both + `x` and `y` is below 25, the small sample correction proposed in [1]_ is + applied to the test statistic. + + The default values of `t` are determined in [1]_ by considering + various distributions and finding good values that lead to a high power + of the test in general. Table III in [1]_ gives the optimal values for + the distributions tested in that study. The values of `t` are scaled by + the semi-interquartile range in the implementation, see [1]_. + + References + ---------- + .. [1] T. W. Epps and K. J. Singleton, "An omnibus test for the two-sample + problem using the empirical characteristic function", Journal of + Statistical Computation and Simulation 26, p. 177--203, 1986. + + .. [2] S. J. Goerg and J. Kaiser, "Nonparametric testing of distributions + - the Epps-Singleton two-sample test using the empirical characteristic + function", The Stata Journal 9(3), p. 454--465, 2009. + + """ + # x and y are converted to arrays by the decorator + t = np.asarray(t) + # check if x and y are valid inputs + nx, ny = len(x), len(y) + if (nx < 5) or (ny < 5): + raise ValueError('x and y should have at least 5 elements, but len(x) ' + f'= {nx} and len(y) = {ny}.') + if not np.isfinite(x).all(): + raise ValueError('x must not contain nonfinite values.') + if not np.isfinite(y).all(): + raise ValueError('y must not contain nonfinite values.') + n = nx + ny + + # check if t is valid + if t.ndim > 1: + raise ValueError(f't must be 1d, but t.ndim equals {t.ndim}.') + if np.less_equal(t, 0).any(): + raise ValueError('t must contain positive elements only.') + + # rescale t with semi-iqr as proposed in [1]; import iqr here to avoid + # circular import + from scipy.stats import iqr + sigma = iqr(np.hstack((x, y))) / 2 + ts = np.reshape(t, (-1, 1)) / sigma + + # covariance estimation of ES test + gx = np.vstack((np.cos(ts*x), np.sin(ts*x))).T # shape = (nx, 2*len(t)) + gy = np.vstack((np.cos(ts*y), np.sin(ts*y))).T + cov_x = np.cov(gx.T, bias=True) # the test uses biased cov-estimate + cov_y = np.cov(gy.T, bias=True) + est_cov = (n/nx)*cov_x + (n/ny)*cov_y + est_cov_inv = np.linalg.pinv(est_cov) + r = np.linalg.matrix_rank(est_cov_inv) + if r < 2*len(t): + warnings.warn('Estimated covariance matrix does not have full rank. ' + 'This indicates a bad choice of the input t and the ' + 'test might not be consistent.', # see p. 183 in [1]_ + stacklevel=2) + + # compute test statistic w distributed asympt. as chisquare with df=r + g_diff = np.mean(gx, axis=0) - np.mean(gy, axis=0) + w = n*np.dot(g_diff.T, np.dot(est_cov_inv, g_diff)) + + # apply small-sample correction + if (max(nx, ny) < 25): + corr = 1.0/(1.0 + n**(-0.45) + 10.1*(nx**(-1.7) + ny**(-1.7))) + w = corr * w + + chi2 = _stats_py._SimpleChi2(r) + p = _stats_py._get_pvalue(w, chi2, alternative='greater', symmetric=False, xp=np) + + return Epps_Singleton_2sampResult(w, p) + + +def poisson_means_test(k1, n1, k2, n2, *, diff=0, alternative='two-sided'): + r""" + Performs the Poisson means test, AKA the "E-test". + + This is a test of the null hypothesis that the difference between means of + two Poisson distributions is `diff`. The samples are provided as the + number of events `k1` and `k2` observed within measurement intervals + (e.g. of time, space, number of observations) of sizes `n1` and `n2`. + + Parameters + ---------- + k1 : int + Number of events observed from distribution 1. + n1: float + Size of sample from distribution 1. + k2 : int + Number of events observed from distribution 2. + n2 : float + Size of sample from distribution 2. + diff : float, default=0 + The hypothesized difference in means between the distributions + underlying the samples. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the difference between distribution means is not + equal to `diff` + * 'less': the difference between distribution means is less than + `diff` + * 'greater': the difference between distribution means is greater + than `diff` + + Returns + ------- + statistic : float + The test statistic (see [1]_ equation 3.3). + pvalue : float + The probability of achieving such an extreme value of the test + statistic under the null hypothesis. + + Notes + ----- + + Let: + + .. math:: X_1 \sim \mbox{Poisson}(\mathtt{n1}\lambda_1) + + be a random variable independent of + + .. math:: X_2 \sim \mbox{Poisson}(\mathtt{n2}\lambda_2) + + and let ``k1`` and ``k2`` be the observed values of :math:`X_1` + and :math:`X_2`, respectively. Then `poisson_means_test` uses the number + of observed events ``k1`` and ``k2`` from samples of size ``n1`` and + ``n2``, respectively, to test the null hypothesis that + + .. math:: + H_0: \lambda_1 - \lambda_2 = \mathtt{diff} + + A benefit of the E-test is that it has good power for small sample sizes, + which can reduce sampling costs [1]_. It has been evaluated and determined + to be more powerful than the comparable C-test, sometimes referred to as + the Poisson exact test. + + References + ---------- + .. [1] Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for + comparing two Poisson means. Journal of Statistical Planning and + Inference, 119(1), 23-35. + + .. [2] Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in + testing samples from Poisson series: With an application to testing + clover seed for dodder. Biometrika, 31(3/4), 313-323. + + Examples + -------- + + Suppose that a gardener wishes to test the number of dodder (weed) seeds + in a sack of clover seeds that they buy from a seed company. It has + previously been established that the number of dodder seeds in clover + follows the Poisson distribution. + + A 100 gram sample is drawn from the sack before being shipped to the + gardener. The sample is analyzed, and it is found to contain no dodder + seeds; that is, `k1` is 0. However, upon arrival, the gardener draws + another 100 gram sample from the sack. This time, three dodder seeds are + found in the sample; that is, `k2` is 3. The gardener would like to + know if the difference is significant and not due to chance. The + null hypothesis is that the difference between the two samples is merely + due to chance, or that :math:`\lambda_1 - \lambda_2 = \mathtt{diff}` + where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the + difference is not due to chance, or :math:`\lambda_1 - \lambda_2 \ne 0`. + The gardener selects a significance level of 5% to reject the null + hypothesis in favor of the alternative [2]_. + + >>> import scipy.stats as stats + >>> res = stats.poisson_means_test(0, 100, 3, 100) + >>> res.statistic, res.pvalue + (-1.7320508075688772, 0.08837900929018157) + + The p-value is .088, indicating a near 9% chance of observing a value of + the test statistic under the null hypothesis. This exceeds 5%, so the + gardener does not reject the null hypothesis as the difference cannot be + regarded as significant at this level. + """ + + _poisson_means_test_iv(k1, n1, k2, n2, diff, alternative) + + # "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4) + lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2)) + + # "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this + # case the null hypothesis cannot be rejected ... [and] it is not necessary + # to compute the p-value". [1] page 26 below eq. (3.6). + if lmbd_hat2 <= 0: + return _stats_py.SignificanceResult(0, 1) + + # The unbiased variance estimate [1] (3.2) + var = k1 / (n1 ** 2) + k2 / (n2 ** 2) + + # The _observed_ pivot statistic from the input. It follows the + # unnumbered equation following equation (3.3) This is used later in + # comparison with the computed pivot statistics in an indicator function. + t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var) + + # Equation (3.5) of [1] is lengthy, so it is broken into several parts, + # beginning here. Note that the probability mass function of poisson is + # exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted + # here as nlmbd_hat*. The strategy for evaluating the double summation in + # (3.5) is to create two arrays of the values of the two products inside + # the summation and then broadcast them together into a matrix, and then + # sum across the entire matrix. + + # Compute constants (as seen in the first and second separated products in + # (3.5).). (This is the shape (\mu) parameter of the poisson distribution.) + nlmbd_hat1 = n1 * (lmbd_hat2 + diff) + nlmbd_hat2 = n2 * lmbd_hat2 + + # Determine summation bounds for tail ends of distribution rather than + # summing to infinity. `x1*` is for the outer sum and `x2*` is the inner + # sum. + x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1) + x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2) + + # Construct arrays to function as the x_1 and x_2 counters on the summation + # in (3.5). `x1` is in columns and `x2` is in rows to allow for + # broadcasting. + x1 = np.arange(x1_lb, x1_ub + 1) + x2 = np.arange(x2_lb, x2_ub + 1)[:, None] + + # These are the two products in equation (3.5) with `prob_x1` being the + # first (left side) and `prob_x2` being the second (right side). (To + # make as clear as possible: the 1st contains a "+ d" term, the 2nd does + # not.) + prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1) + prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2) + + # compute constants for use in the "pivot statistic" per the + # unnumbered equation following (3.3). + lmbd_x1 = x1 / n1 + lmbd_x2 = x2 / n2 + lmbds_diff = lmbd_x1 - lmbd_x2 - diff + var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2 + + # This is the 'pivot statistic' for use in the indicator of the summation + # (left side of "I[.]"). + with np.errstate(invalid='ignore', divide='ignore'): + t_x1x2 = lmbds_diff / np.sqrt(var_x1x2) + + # `[indicator]` implements the "I[.] ... the indicator function" per + # the paragraph following equation (3.5). + if alternative == 'two-sided': + indicator = np.abs(t_x1x2) >= np.abs(t_k1k2) + elif alternative == 'less': + indicator = t_x1x2 <= t_k1k2 + else: + indicator = t_x1x2 >= t_k1k2 + + # Multiply all combinations of the products together, exclude terms + # based on the `indicator` and then sum. (3.5) + pvalue = np.sum((prob_x1 * prob_x2)[indicator]) + return _stats_py.SignificanceResult(t_k1k2, pvalue) + + +def _poisson_means_test_iv(k1, n1, k2, n2, diff, alternative): + # """check for valid types and values of input to `poisson_mean_test`.""" + if k1 != int(k1) or k2 != int(k2): + raise TypeError('`k1` and `k2` must be integers.') + + count_err = '`k1` and `k2` must be greater than or equal to 0.' + if k1 < 0 or k2 < 0: + raise ValueError(count_err) + + if n1 <= 0 or n2 <= 0: + raise ValueError('`n1` and `n2` must be greater than 0.') + + if diff < 0: + raise ValueError('diff must be greater than or equal to 0.') + + alternatives = {'two-sided', 'less', 'greater'} + if alternative.lower() not in alternatives: + raise ValueError(f"Alternative must be one of '{alternatives}'.") + + +class CramerVonMisesResult: + def __init__(self, statistic, pvalue): + self.statistic = statistic + self.pvalue = pvalue + + def __repr__(self): + return (f"{self.__class__.__name__}(statistic={self.statistic}, " + f"pvalue={self.pvalue})") + + +def _psi1_mod(x): + """ + psi1 is defined in equation 1.10 in Csörgő, S. and Faraway, J. (1996). + This implements a modified version by excluding the term V(x) / 12 + (here: _cdf_cvm_inf(x) / 12) to avoid evaluating _cdf_cvm_inf(x) + twice in _cdf_cvm. + + Implementation based on MAPLE code of Julian Faraway and R code of the + function pCvM in the package goftest (v1.1.1), permission granted + by Adrian Baddeley. Main difference in the implementation: the code + here keeps adding terms of the series until the terms are small enough. + """ + + def _ed2(y): + z = y**2 / 4 + b = kv(1/4, z) + kv(3/4, z) + return np.exp(-z) * (y/2)**(3/2) * b / np.sqrt(np.pi) + + def _ed3(y): + z = y**2 / 4 + c = np.exp(-z) / np.sqrt(np.pi) + return c * (y/2)**(5/2) * (2*kv(1/4, z) + 3*kv(3/4, z) - kv(5/4, z)) + + def _Ak(k, x): + m = 2*k + 1 + sx = 2 * np.sqrt(x) + y1 = x**(3/4) + y2 = x**(5/4) + + e1 = m * gamma(k + 1/2) * _ed2((4 * k + 3)/sx) / (9 * y1) + e2 = gamma(k + 1/2) * _ed3((4 * k + 1) / sx) / (72 * y2) + e3 = 2 * (m + 2) * gamma(k + 3/2) * _ed3((4 * k + 5) / sx) / (12 * y2) + e4 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 1) / sx) / (144 * y1) + e5 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 5) / sx) / (144 * y1) + + return e1 + e2 + e3 + e4 + e5 + + x = np.asarray(x) + tot = np.zeros_like(x, dtype='float') + cond = np.ones_like(x, dtype='bool') + k = 0 + while np.any(cond): + z = -_Ak(k, x[cond]) / (np.pi * gamma(k + 1)) + tot[cond] = tot[cond] + z + cond[cond] = np.abs(z) >= 1e-7 + k += 1 + + return tot + + +def _cdf_cvm_inf(x): + """ + Calculate the cdf of the Cramér-von Mises statistic (infinite sample size). + + See equation 1.2 in Csörgő, S. and Faraway, J. (1996). + + Implementation based on MAPLE code of Julian Faraway and R code of the + function pCvM in the package goftest (v1.1.1), permission granted + by Adrian Baddeley. Main difference in the implementation: the code + here keeps adding terms of the series until the terms are small enough. + + The function is not expected to be accurate for large values of x, say + x > 4, when the cdf is very close to 1. + """ + x = np.asarray(x) + + def term(x, k): + # this expression can be found in [2], second line of (1.3) + u = np.exp(gammaln(k + 0.5) - gammaln(k+1)) / (np.pi**1.5 * np.sqrt(x)) + y = 4*k + 1 + q = y**2 / (16*x) + b = kv(0.25, q) + return u * np.sqrt(y) * np.exp(-q) * b + + tot = np.zeros_like(x, dtype='float') + cond = np.ones_like(x, dtype='bool') + k = 0 + while np.any(cond): + z = term(x[cond], k) + tot[cond] = tot[cond] + z + cond[cond] = np.abs(z) >= 1e-7 + k += 1 + + return tot + + +def _cdf_cvm(x, n=None): + """ + Calculate the cdf of the Cramér-von Mises statistic for a finite sample + size n. If N is None, use the asymptotic cdf (n=inf). + + See equation 1.8 in Csörgő, S. and Faraway, J. (1996) for finite samples, + 1.2 for the asymptotic cdf. + + The function is not expected to be accurate for large values of x, say + x > 2, when the cdf is very close to 1 and it might return values > 1 + in that case, e.g. _cdf_cvm(2.0, 12) = 1.0000027556716846. Moreover, it + is not accurate for small values of n, especially close to the bounds of + the distribution's domain, [1/(12*n), n/3], where the value jumps to 0 + and 1, respectively. These are limitations of the approximation by Csörgő + and Faraway (1996) implemented in this function. + """ + x = np.asarray(x) + if n is None: + y = _cdf_cvm_inf(x) + else: + # support of the test statistic is [12/n, n/3], see 1.1 in [2] + y = np.zeros_like(x, dtype='float') + sup = (1./(12*n) < x) & (x < n/3.) + # note: _psi1_mod does not include the term _cdf_cvm_inf(x) / 12 + # therefore, we need to add it here + y[sup] = _cdf_cvm_inf(x[sup]) * (1 + 1./(12*n)) + _psi1_mod(x[sup]) / n + y[x >= n/3] = 1 + + if y.ndim == 0: + return y[()] + return y + + +def _cvm_result_to_tuple(res): + return res.statistic, res.pvalue + + +@_axis_nan_policy_factory(CramerVonMisesResult, n_samples=1, too_small=1, + result_to_tuple=_cvm_result_to_tuple) +def cramervonmises(rvs, cdf, args=()): + """Perform the one-sample Cramér-von Mises test for goodness of fit. + + This performs a test of the goodness of fit of a cumulative distribution + function (cdf) :math:`F` compared to the empirical distribution function + :math:`F_n` of observed random variates :math:`X_1, ..., X_n` that are + assumed to be independent and identically distributed ([1]_). + The null hypothesis is that the :math:`X_i` have cumulative distribution + :math:`F`. + + Parameters + ---------- + rvs : array_like + A 1-D array of observed values of the random variables :math:`X_i`. + The sample must contain at least two observations. + cdf : str or callable + The cumulative distribution function :math:`F` to test the + observations against. If a string, it should be the name of a + distribution in `scipy.stats`. If a callable, that callable is used + to calculate the cdf: ``cdf(x, *args) -> float``. + args : tuple, optional + Distribution parameters. These are assumed to be known; see Notes. + + Returns + ------- + res : object with attributes + statistic : float + Cramér-von Mises statistic. + pvalue : float + The p-value. + + See Also + -------- + kstest, cramervonmises_2samp + + Notes + ----- + .. versionadded:: 1.6.0 + + The p-value relies on the approximation given by equation 1.8 in [2]_. + It is important to keep in mind that the p-value is only accurate if + one tests a simple hypothesis, i.e. the parameters of the reference + distribution are known. If the parameters are estimated from the data + (composite hypothesis), the computed p-value is not reliable. + + References + ---------- + .. [1] Cramér-von Mises criterion, Wikipedia, + https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion + .. [2] Csörgő, S. and Faraway, J. (1996). The Exact and Asymptotic + Distribution of Cramér-von Mises Statistics. Journal of the + Royal Statistical Society, pp. 221-234. + + Examples + -------- + + Suppose we wish to test whether data generated by ``scipy.stats.norm.rvs`` + were, in fact, drawn from the standard normal distribution. We choose a + significance level of ``alpha=0.05``. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng(165417232101553420507139617764912913465) + >>> x = stats.norm.rvs(size=500, random_state=rng) + >>> res = stats.cramervonmises(x, 'norm') + >>> res.statistic, res.pvalue + (0.1072085112565724, 0.5508482238203407) + + The p-value exceeds our chosen significance level, so we do not + reject the null hypothesis that the observed sample is drawn from the + standard normal distribution. + + Now suppose we wish to check whether the same samples shifted by 2.1 is + consistent with being drawn from a normal distribution with a mean of 2. + + >>> y = x + 2.1 + >>> res = stats.cramervonmises(y, 'norm', args=(2,)) + >>> res.statistic, res.pvalue + (0.8364446265294695, 0.00596286797008283) + + Here we have used the `args` keyword to specify the mean (``loc``) + of the normal distribution to test the data against. This is equivalent + to the following, in which we create a frozen normal distribution with + mean 2.1, then pass its ``cdf`` method as an argument. + + >>> frozen_dist = stats.norm(loc=2) + >>> res = stats.cramervonmises(y, frozen_dist.cdf) + >>> res.statistic, res.pvalue + (0.8364446265294695, 0.00596286797008283) + + In either case, we would reject the null hypothesis that the observed + sample is drawn from a normal distribution with a mean of 2 (and default + variance of 1) because the p-value is less than our chosen + significance level. + + """ + if isinstance(cdf, str): + cdf = getattr(distributions, cdf).cdf + + vals = np.sort(np.asarray(rvs)) + + if vals.size <= 1: + raise ValueError('The sample must contain at least two observations.') + + n = len(vals) + cdfvals = cdf(vals, *args) + + u = (2*np.arange(1, n+1) - 1)/(2*n) + w = 1/(12*n) + np.sum((u - cdfvals)**2) + + # avoid small negative values that can occur due to the approximation + p = max(0, 1. - _cdf_cvm(w, n)) + + return CramerVonMisesResult(statistic=w, pvalue=p) + + +def _get_wilcoxon_distr(n): + """ + Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum + of ranks of positive differences). + Returns an array with the probabilities of all the possible ranks + r = 0, ..., n*(n+1)/2 + """ + c = np.ones(1, dtype=np.float64) + for k in range(1, n + 1): + prev_c = c + c = np.zeros(k * (k + 1) // 2 + 1, dtype=np.float64) + m = len(prev_c) + c[:m] = prev_c * 0.5 + c[-m:] += prev_c * 0.5 + return c + + +def _get_wilcoxon_distr2(n): + """ + Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum + of ranks of positive differences). + Returns an array with the probabilities of all the possible ranks + r = 0, ..., n*(n+1)/2 + This is a slower reference function + References + ---------- + .. [1] 1. Harris T, Hardin JW. Exact Wilcoxon Signed-Rank and Wilcoxon + Mann-Whitney Ranksum Tests. The Stata Journal. 2013;13(2):337-343. + """ + ai = np.arange(1, n+1)[:, None] + t = n*(n+1)/2 + q = 2*t + j = np.arange(q) + theta = 2*np.pi/q*j + phi_sp = np.prod(np.cos(theta*ai), axis=0) + phi_s = np.exp(1j*theta*t) * phi_sp + p = np.real(ifft(phi_s)) + res = np.zeros(int(t)+1) + res[:-1:] = p[::2] + res[0] /= 2 + res[-1] = res[0] + return res + + +def _tau_b(A): + """Calculate Kendall's tau-b and p-value from contingency table.""" + # See [2] 2.2 and 4.2 + + # contingency table must be truly 2D + if A.shape[0] == 1 or A.shape[1] == 1: + return np.nan, np.nan + + NA = A.sum() + PA = _P(A) + QA = _Q(A) + Sri2 = (A.sum(axis=1)**2).sum() + Scj2 = (A.sum(axis=0)**2).sum() + denominator = (NA**2 - Sri2)*(NA**2 - Scj2) + + tau = (PA-QA)/(denominator)**0.5 + + numerator = 4*(_a_ij_Aij_Dij2(A) - (PA - QA)**2 / NA) + s02_tau_b = numerator/denominator + if s02_tau_b == 0: # Avoid divide by zero + return tau, 0 + Z = tau/s02_tau_b**0.5 + p = 2*norm.sf(abs(Z)) # 2-sided p-value + + return tau, p + + +def _somers_d(A, alternative='two-sided'): + """Calculate Somers' D and p-value from contingency table.""" + # See [3] page 1740 + + # contingency table must be truly 2D + if A.shape[0] <= 1 or A.shape[1] <= 1: + return np.nan, np.nan + + NA = A.sum() + NA2 = NA**2 + PA = _P(A) + QA = _Q(A) + Sri2 = (A.sum(axis=1)**2).sum() + + d = (PA - QA)/(NA2 - Sri2) + + S = _a_ij_Aij_Dij2(A) - (PA-QA)**2/NA + + with np.errstate(divide='ignore'): + Z = (PA - QA)/(4*(S))**0.5 + + norm = _stats_py._SimpleNormal() + p = _stats_py._get_pvalue(Z, norm, alternative, xp=np) + + return d, p + + +@dataclass +class SomersDResult: + statistic: float + pvalue: float + table: np.ndarray + + +def somersd(x, y=None, alternative='two-sided'): + r"""Calculates Somers' D, an asymmetric measure of ordinal association. + + Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the + correspondence between two rankings. Both statistics consider the + difference between the number of concordant and discordant pairs in two + rankings :math:`X` and :math:`Y`, and both are normalized such that values + close to 1 indicate strong agreement and values close to -1 indicate + strong disagreement. They differ in how they are normalized. To show the + relationship, Somers' :math:`D` can be defined in terms of Kendall's + :math:`\tau_a`: + + .. math:: + D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)} + + Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the + second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of + :math:`n` rankings can also be viewed as an :math:`r \times s` contingency + table in which element :math:`i, j` is the number of rank pairs with rank + :math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`. + Accordingly, `somersd` also allows the input data to be supplied as a + single, 2D contingency table instead of as two separate, 1D rankings. + + Note that the definition of Somers' :math:`D` is asymmetric: in general, + :math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers' + :math:`D(Y|X)`: the "row" variable :math:`X` is treated as an independent + variable, and the "column" variable :math:`Y` is dependent. For Somers' + :math:`D(X|Y)`, swap the input lists or transpose the input table. + + Parameters + ---------- + x : array_like + 1D array of rankings, treated as the (row) independent variable. + Alternatively, a 2D contingency table. + y : array_like, optional + If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the + same length, treated as the (column) dependent variable. + If `x` is 2D, `y` is ignored. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + * 'two-sided': the rank correlation is nonzero + * 'less': the rank correlation is negative (less than zero) + * 'greater': the rank correlation is positive (greater than zero) + + Returns + ------- + res : SomersDResult + A `SomersDResult` object with the following fields: + + statistic : float + The Somers' :math:`D` statistic. + pvalue : float + The p-value for a hypothesis test whose null + hypothesis is an absence of association, :math:`D=0`. + See notes for more information. + table : 2D array + The contingency table formed from rankings `x` and `y` (or the + provided contingency table, if `x` is a 2D array) + + See Also + -------- + kendalltau : Calculates Kendall's tau, another correlation measure. + weightedtau : Computes a weighted version of Kendall's tau. + spearmanr : Calculates a Spearman rank-order correlation coefficient. + pearsonr : Calculates a Pearson correlation coefficient. + + Notes + ----- + This function follows the contingency table approach of [2]_ and + [3]_. *p*-values are computed based on an asymptotic approximation of + the test statistic distribution under the null hypothesis :math:`D=0`. + + Theoretically, hypothesis tests based on Kendall's :math:`tau` and Somers' + :math:`D` should be identical. + However, the *p*-values returned by `kendalltau` are based + on the null hypothesis of *independence* between :math:`X` and :math:`Y` + (i.e. the population from which pairs in :math:`X` and :math:`Y` are + sampled contains equal numbers of all possible pairs), which is more + specific than the null hypothesis :math:`D=0` used here. If the null + hypothesis of independence is desired, it is acceptable to use the + *p*-value returned by `kendalltau` with the statistic returned by + `somersd` and vice versa. For more information, see [2]_. + + Contingency tables are formatted according to the convention used by + SAS and R: the first ranking supplied (``x``) is the "row" variable, and + the second ranking supplied (``y``) is the "column" variable. This is + opposite the convention of Somers' original paper [1]_. + + References + ---------- + .. [1] Robert H. Somers, "A New Asymmetric Measure of Association for + Ordinal Variables", *American Sociological Review*, Vol. 27, No. 6, + pp. 799--811, 1962. + + .. [2] Morton B. Brown and Jacqueline K. Benedetti, "Sampling Behavior of + Tests for Correlation in Two-Way Contingency Tables", *Journal of + the American Statistical Association* Vol. 72, No. 358, pp. + 309--315, 1977. + + .. [3] SAS Institute, Inc., "The FREQ Procedure (Book Excerpt)", + *SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009. + + .. [4] Laerd Statistics, "Somers' d using SPSS Statistics", *SPSS + Statistics Tutorials and Statistical Guides*, + https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php, + Accessed July 31, 2020. + + Examples + -------- + We calculate Somers' D for the example given in [4]_, in which a hotel + chain owner seeks to determine the association between hotel room + cleanliness and customer satisfaction. The independent variable, hotel + room cleanliness, is ranked on an ordinal scale: "below average (1)", + "average (2)", or "above average (3)". The dependent variable, customer + satisfaction, is ranked on a second scale: "very dissatisfied (1)", + "moderately dissatisfied (2)", "neither dissatisfied nor satisfied (3)", + "moderately satisfied (4)", or "very satisfied (5)". 189 customers + respond to the survey, and the results are cast into a contingency table + with the hotel room cleanliness as the "row" variable and customer + satisfaction as the "column" variable. + + +-----+-----+-----+-----+-----+-----+ + | | (1) | (2) | (3) | (4) | (5) | + +=====+=====+=====+=====+=====+=====+ + | (1) | 27 | 25 | 14 | 7 | 0 | + +-----+-----+-----+-----+-----+-----+ + | (2) | 7 | 14 | 18 | 35 | 12 | + +-----+-----+-----+-----+-----+-----+ + | (3) | 1 | 3 | 2 | 7 | 17 | + +-----+-----+-----+-----+-----+-----+ + + For example, 27 customers assigned their room a cleanliness ranking of + "below average (1)" and a corresponding satisfaction of "very + dissatisfied (1)". We perform the analysis as follows. + + >>> from scipy.stats import somersd + >>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]] + >>> res = somersd(table) + >>> res.statistic + 0.6032766111513396 + >>> res.pvalue + 1.0007091191074533e-27 + + The value of the Somers' D statistic is approximately 0.6, indicating + a positive correlation between room cleanliness and customer satisfaction + in the sample. + The *p*-value is very small, indicating a very small probability of + observing such an extreme value of the statistic under the null + hypothesis that the statistic of the entire population (from which + our sample of 189 customers is drawn) is zero. This supports the + alternative hypothesis that the true value of Somers' D for the population + is nonzero. + + """ + x, y = np.array(x), np.array(y) + if x.ndim == 1: + if x.size != y.size: + raise ValueError("Rankings must be of equal length.") + table = scipy.stats.contingency.crosstab(x, y)[1] + elif x.ndim == 2: + if np.any(x < 0): + raise ValueError("All elements of the contingency table must be " + "non-negative.") + if np.any(x != x.astype(int)): + raise ValueError("All elements of the contingency table must be " + "integer.") + if x.nonzero()[0].size < 2: + raise ValueError("At least two elements of the contingency table " + "must be nonzero.") + table = x + else: + raise ValueError("x must be either a 1D or 2D array") + # The table type is converted to a float to avoid an integer overflow + d, p = _somers_d(table.astype(float), alternative) + + # add alias for consistency with other correlation functions + res = SomersDResult(d, p, table) + res.correlation = d + return res + + +# This could be combined with `_all_partitions` in `_resampling.py` +def _all_partitions(nx, ny): + """ + Partition a set of indices into two fixed-length sets in all possible ways + + Partition a set of indices 0 ... nx + ny - 1 into two sets of length nx and + ny in all possible ways (ignoring order of elements). + """ + z = np.arange(nx+ny) + for c in combinations(z, nx): + x = np.array(c) + mask = np.ones(nx+ny, bool) + mask[x] = False + y = z[mask] + yield x, y + + +def _compute_log_combinations(n): + """Compute all log combination of C(n, k).""" + gammaln_arr = gammaln(np.arange(n + 1) + 1) + return gammaln(n + 1) - gammaln_arr - gammaln_arr[::-1] + + +@dataclass +class BarnardExactResult: + statistic: float + pvalue: float + + +def barnard_exact(table, alternative="two-sided", pooled=True, n=32): + r"""Perform a Barnard exact test on a 2x2 contingency table. + + Parameters + ---------- + table : array_like of ints + A 2x2 contingency table. Elements should be non-negative integers. + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the null and alternative hypotheses. Default is 'two-sided'. + Please see explanations in the Notes section below. + + pooled : bool, optional + Whether to compute score statistic with pooled variance (as in + Student's t-test, for example) or unpooled variance (as in Welch's + t-test). Default is ``True``. + + n : int, optional + Number of sampling points used in the construction of the sampling + method. Note that this argument will automatically be converted to + the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to + select sample points. Default is 32. Must be positive. In most cases, + 32 points is enough to reach good precision. More points comes at + performance cost. + + Returns + ------- + ber : BarnardExactResult + A result object with the following attributes. + + statistic : float + The Wald statistic with pooled or unpooled variance, depending + on the user choice of `pooled`. + + pvalue : float + P-value, the probability of obtaining a distribution at least as + extreme as the one that was actually observed, assuming that the + null hypothesis is true. + + See Also + -------- + chi2_contingency : Chi-square test of independence of variables in a + contingency table. + fisher_exact : Fisher exact test on a 2x2 contingency table. + boschloo_exact : Boschloo's exact test on a 2x2 contingency table, + which is an uniformly more powerful alternative to Fisher's exact test. + + Notes + ----- + Barnard's test is an exact test used in the analysis of contingency + tables. It examines the association of two categorical variables, and + is a more powerful alternative than Fisher's exact test + for 2x2 contingency tables. + + Let's define :math:`X_0` a 2x2 matrix representing the observed sample, + where each column stores the binomial experiment, as in the example + below. Let's also define :math:`p_1, p_2` the theoretical binomial + probabilities for :math:`x_{11}` and :math:`x_{12}`. When using + Barnard exact test, we can assert three different null hypotheses : + + - :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`, + with `alternative` = "less" + + - :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`, + with `alternative` = "greater" + + - :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`, + with `alternative` = "two-sided" (default one) + + In order to compute Barnard's exact test, we are using the Wald + statistic [3]_ with pooled or unpooled variance. + Under the default assumption that both variances are equal + (``pooled = True``), the statistic is computed as: + + .. math:: + + T(X) = \frac{ + \hat{p}_1 - \hat{p}_2 + }{ + \sqrt{ + \hat{p}(1 - \hat{p}) + (\frac{1}{c_1} + + \frac{1}{c_2}) + } + } + + with :math:`\hat{p}_1, \hat{p}_2` and :math:`\hat{p}` the estimator of + :math:`p_1, p_2` and :math:`p`, the latter being the combined probability, + given the assumption that :math:`p_1 = p_2`. + + If this assumption is invalid (``pooled = False``), the statistic is: + + .. math:: + + T(X) = \frac{ + \hat{p}_1 - \hat{p}_2 + }{ + \sqrt{ + \frac{\hat{p}_1 (1 - \hat{p}_1)}{c_1} + + \frac{\hat{p}_2 (1 - \hat{p}_2)}{c_2} + } + } + + The p-value is then computed as: + + .. math:: + + \sum + \binom{c_1}{x_{11}} + \binom{c_2}{x_{12}} + \pi^{x_{11} + x_{12}} + (1 - \pi)^{t - x_{11} - x_{12}} + + where the sum is over all 2x2 contingency tables :math:`X` such that: + * :math:`T(X) \leq T(X_0)` when `alternative` = "less", + * :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or + * :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided". + Above, :math:`c_1, c_2` are the sum of the columns 1 and 2, + and :math:`t` the total (sum of the 4 sample's element). + + The returned p-value is the maximum p-value taken over the nuisance + parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`. + + This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the + number of sample points. + + References + ---------- + .. [1] Barnard, G. A. "Significance Tests for 2x2 Tables". *Biometrika*. + 34.1/2 (1947): 123-138. :doi:`dpgkg3` + + .. [2] Mehta, Cyrus R., and Pralay Senchaudhuri. "Conditional versus + unconditional exact tests for comparing two binomials." + *Cytel Software Corporation* 675 (2003): 1-5. + + .. [3] "Wald Test". *Wikipedia*. https://en.wikipedia.org/wiki/Wald_test + + Examples + -------- + An example use of Barnard's test is presented in [2]_. + + Consider the following example of a vaccine efficacy study + (Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were + inoculated with a recombinant DNA influenza vaccine and the 15 were + inoculated with a placebo. Twelve of the 15 subjects in the placebo + group (80%) eventually became infected with influenza whereas for the + vaccine group, only 7 of the 15 subjects (47%) became infected. The + data are tabulated as a 2 x 2 table:: + + Vaccine Placebo + Yes 7 12 + No 8 3 + + When working with statistical hypothesis testing, we usually use a + threshold probability or significance level upon which we decide + to reject the null hypothesis :math:`H_0`. Suppose we choose the common + significance level of 5%. + + Our alternative hypothesis is that the vaccine will lower the chance of + becoming infected with the virus; that is, the probability :math:`p_1` of + catching the virus with the vaccine will be *less than* the probability + :math:`p_2` of catching the virus without the vaccine. Therefore, we call + `barnard_exact` with the ``alternative="less"`` option: + + >>> import scipy.stats as stats + >>> res = stats.barnard_exact([[7, 12], [8, 3]], alternative="less") + >>> res.statistic + -1.894 + >>> res.pvalue + 0.03407 + + Under the null hypothesis that the vaccine will not lower the chance of + becoming infected, the probability of obtaining test results at least as + extreme as the observed data is approximately 3.4%. Since this p-value is + less than our chosen significance level, we have evidence to reject + :math:`H_0` in favor of the alternative. + + Suppose we had used Fisher's exact test instead: + + >>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less") + >>> pvalue + 0.0640 + + With the same threshold significance of 5%, we would not have been able + to reject the null hypothesis in favor of the alternative. As stated in + [2]_, Barnard's test is uniformly more powerful than Fisher's exact test + because Barnard's test does not condition on any margin. Fisher's test + should only be used when both sets of marginals are fixed. + + """ + if n <= 0: + raise ValueError( + "Number of points `n` must be strictly positive, " + f"found {n!r}" + ) + + table = np.asarray(table, dtype=np.int64) + + if not table.shape == (2, 2): + raise ValueError("The input `table` must be of shape (2, 2).") + + if np.any(table < 0): + raise ValueError("All values in `table` must be nonnegative.") + + if 0 in table.sum(axis=0): + # If both values in column are zero, the p-value is 1 and + # the score's statistic is NaN. + return BarnardExactResult(np.nan, 1.0) + + total_col_1, total_col_2 = table.sum(axis=0) + + x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(-1, 1) + x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(1, -1) + + # We need to calculate the wald statistics for each combination of x1 and + # x2. + p1, p2 = x1 / total_col_1, x2 / total_col_2 + + if pooled: + p = (x1 + x2) / (total_col_1 + total_col_2) + variances = p * (1 - p) * (1 / total_col_1 + 1 / total_col_2) + else: + variances = p1 * (1 - p1) / total_col_1 + p2 * (1 - p2) / total_col_2 + + # To avoid warning when dividing by 0 + with np.errstate(divide="ignore", invalid="ignore"): + wald_statistic = np.divide((p1 - p2), np.sqrt(variances)) + + wald_statistic[p1 == p2] = 0 # Removing NaN values + + wald_stat_obs = wald_statistic[table[0, 0], table[0, 1]] + + if alternative == "two-sided": + index_arr = np.abs(wald_statistic) >= abs(wald_stat_obs) + elif alternative == "less": + index_arr = wald_statistic <= wald_stat_obs + elif alternative == "greater": + index_arr = wald_statistic >= wald_stat_obs + else: + msg = ( + "`alternative` should be one of {'two-sided', 'less', 'greater'}," + f" found {alternative!r}" + ) + raise ValueError(msg) + + x1_sum_x2 = x1 + x2 + + x1_log_comb = _compute_log_combinations(total_col_1) + x2_log_comb = _compute_log_combinations(total_col_2) + x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2] + + result = shgo( + _get_binomial_log_p_value_with_nuisance_param, + args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr), + bounds=((0, 1),), + n=n, + sampling_method="sobol", + ) + + # result.fun is the negative log pvalue and therefore needs to be + # changed before return + p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1) + return BarnardExactResult(wald_stat_obs, p_value) + + +@dataclass +class BoschlooExactResult: + statistic: float + pvalue: float + + +def boschloo_exact(table, alternative="two-sided", n=32): + r"""Perform Boschloo's exact test on a 2x2 contingency table. + + Parameters + ---------- + table : array_like of ints + A 2x2 contingency table. Elements should be non-negative integers. + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the null and alternative hypotheses. Default is 'two-sided'. + Please see explanations in the Notes section below. + + n : int, optional + Number of sampling points used in the construction of the sampling + method. Note that this argument will automatically be converted to + the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to + select sample points. Default is 32. Must be positive. In most cases, + 32 points is enough to reach good precision. More points comes at + performance cost. + + Returns + ------- + ber : BoschlooExactResult + A result object with the following attributes. + + statistic : float + The statistic used in Boschloo's test; that is, the p-value + from Fisher's exact test. + + pvalue : float + P-value, the probability of obtaining a distribution at least as + extreme as the one that was actually observed, assuming that the + null hypothesis is true. + + See Also + -------- + chi2_contingency : Chi-square test of independence of variables in a + contingency table. + fisher_exact : Fisher exact test on a 2x2 contingency table. + barnard_exact : Barnard's exact test, which is a more powerful alternative + than Fisher's exact test for 2x2 contingency tables. + + Notes + ----- + Boschloo's test is an exact test used in the analysis of contingency + tables. It examines the association of two categorical variables, and + is a uniformly more powerful alternative to Fisher's exact test + for 2x2 contingency tables. + + Boschloo's exact test uses the p-value of Fisher's exact test as a + statistic, and Boschloo's p-value is the probability under the null + hypothesis of observing such an extreme value of this statistic. + + Let's define :math:`X_0` a 2x2 matrix representing the observed sample, + where each column stores the binomial experiment, as in the example + below. Let's also define :math:`p_1, p_2` the theoretical binomial + probabilities for :math:`x_{11}` and :math:`x_{12}`. When using + Boschloo exact test, we can assert three different alternative hypotheses: + + - :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 < p_2`, + with `alternative` = "less" + + - :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 > p_2`, + with `alternative` = "greater" + + - :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 \neq p_2`, + with `alternative` = "two-sided" (default) + + There are multiple conventions for computing a two-sided p-value when the + null distribution is asymmetric. Here, we apply the convention that the + p-value of a two-sided test is twice the minimum of the p-values of the + one-sided tests (clipped to 1.0). Note that `fisher_exact` follows a + different convention, so for a given `table`, the statistic reported by + `boschloo_exact` may differ from the p-value reported by `fisher_exact` + when ``alternative='two-sided'``. + + .. versionadded:: 1.7.0 + + References + ---------- + .. [1] R.D. Boschloo. "Raised conditional level of significance for the + 2 x 2-table when testing the equality of two probabilities", + Statistica Neerlandica, 24(1), 1970 + + .. [2] "Boschloo's test", Wikipedia, + https://en.wikipedia.org/wiki/Boschloo%27s_test + + .. [3] Lise M. Saari et al. "Employee attitudes and job satisfaction", + Human Resource Management, 43(4), 395-407, 2004, + :doi:`10.1002/hrm.20032`. + + Examples + -------- + In the following example, we consider the article "Employee + attitudes and job satisfaction" [3]_ + which reports the results of a survey from 63 scientists and 117 college + professors. Of the 63 scientists, 31 said they were very satisfied with + their jobs, whereas 74 of the college professors were very satisfied + with their work. Is this significant evidence that college + professors are happier with their work than scientists? + The following table summarizes the data mentioned above:: + + college professors scientists + Very Satisfied 74 31 + Dissatisfied 43 32 + + When working with statistical hypothesis testing, we usually use a + threshold probability or significance level upon which we decide + to reject the null hypothesis :math:`H_0`. Suppose we choose the common + significance level of 5%. + + Our alternative hypothesis is that college professors are truly more + satisfied with their work than scientists. Therefore, we expect + :math:`p_1` the proportion of very satisfied college professors to be + greater than :math:`p_2`, the proportion of very satisfied scientists. + We thus call `boschloo_exact` with the ``alternative="greater"`` option: + + >>> import scipy.stats as stats + >>> res = stats.boschloo_exact([[74, 31], [43, 32]], alternative="greater") + >>> res.statistic + 0.0483 + >>> res.pvalue + 0.0355 + + Under the null hypothesis that scientists are happier in their work than + college professors, the probability of obtaining test + results at least as extreme as the observed data is approximately 3.55%. + Since this p-value is less than our chosen significance level, we have + evidence to reject :math:`H_0` in favor of the alternative hypothesis. + + """ + hypergeom = distributions.hypergeom + + if n <= 0: + raise ValueError( + "Number of points `n` must be strictly positive," + f" found {n!r}" + ) + + table = np.asarray(table, dtype=np.int64) + + if not table.shape == (2, 2): + raise ValueError("The input `table` must be of shape (2, 2).") + + if np.any(table < 0): + raise ValueError("All values in `table` must be nonnegative.") + + if 0 in table.sum(axis=0): + # If both values in column are zero, the p-value is 1 and + # the score's statistic is NaN. + return BoschlooExactResult(np.nan, np.nan) + + total_col_1, total_col_2 = table.sum(axis=0) + total = total_col_1 + total_col_2 + x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1) + x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1) + x1_sum_x2 = x1 + x2 + + if alternative == 'less': + pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T + elif alternative == 'greater': + # Same formula as the 'less' case, but with the second column. + pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T + elif alternative == 'two-sided': + boschloo_less = boschloo_exact(table, alternative="less", n=n) + boschloo_greater = boschloo_exact(table, alternative="greater", n=n) + + res = ( + boschloo_less if boschloo_less.pvalue < boschloo_greater.pvalue + else boschloo_greater + ) + + # Two-sided p-value is defined as twice the minimum of the one-sided + # p-values + pvalue = np.clip(2 * res.pvalue, a_min=0, a_max=1) + return BoschlooExactResult(res.statistic, pvalue) + else: + msg = ( + f"`alternative` should be one of {'two-sided', 'less', 'greater'}," + f" found {alternative!r}" + ) + raise ValueError(msg) + + fisher_stat = pvalues[table[0, 0], table[0, 1]] + + # fisher_stat * (1+1e-13) guards us from small numerical error. It is + # equivalent to np.isclose with relative tol of 1e-13 and absolute tol of 0 + # For more throughout explanations, see gh-14178 + index_arr = pvalues <= fisher_stat * (1+1e-13) + + x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T + x1_log_comb = _compute_log_combinations(total_col_1) + x2_log_comb = _compute_log_combinations(total_col_2) + x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2] + + result = shgo( + _get_binomial_log_p_value_with_nuisance_param, + args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr), + bounds=((0, 1),), + n=n, + sampling_method="sobol", + ) + + # result.fun is the negative log pvalue and therefore needs to be + # changed before return + p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1) + return BoschlooExactResult(fisher_stat, p_value) + + +def _get_binomial_log_p_value_with_nuisance_param( + nuisance_param, x1_sum_x2, x1_sum_x2_log_comb, index_arr +): + r""" + Compute the log pvalue in respect of a nuisance parameter considering + a 2x2 sample space. + + Parameters + ---------- + nuisance_param : float + nuisance parameter used in the computation of the maximisation of + the p-value. Must be between 0 and 1 + + x1_sum_x2 : ndarray + Sum of x1 and x2 inside barnard_exact + + x1_sum_x2_log_comb : ndarray + sum of the log combination of x1 and x2 + + index_arr : ndarray of boolean + + Returns + ------- + p_value : float + Return the maximum p-value considering every nuisance parameter + between 0 and 1 + + Notes + ----- + + Both Barnard's test and Boschloo's test iterate over a nuisance parameter + :math:`\pi \in [0, 1]` to find the maximum p-value. To search this + maxima, this function return the negative log pvalue with respect to the + nuisance parameter passed in params. This negative log p-value is then + used in `shgo` to find the minimum negative pvalue which is our maximum + pvalue. + + Also, to compute the different combination used in the + p-values' computation formula, this function uses `gammaln` which is + more tolerant for large value than `scipy.special.comb`. `gammaln` gives + a log combination. For the little precision loss, performances are + improved a lot. + """ + t1, t2 = x1_sum_x2.shape + n = t1 + t2 - 2 + with np.errstate(divide="ignore", invalid="ignore"): + log_nuisance = np.log( + nuisance_param, + out=np.zeros_like(nuisance_param), + where=nuisance_param >= 0, + ) + log_1_minus_nuisance = np.log( + 1 - nuisance_param, + out=np.zeros_like(nuisance_param), + where=1 - nuisance_param >= 0, + ) + + nuisance_power_x1_x2 = log_nuisance * x1_sum_x2 + nuisance_power_x1_x2[(x1_sum_x2 == 0)[:, :]] = 0 + + nuisance_power_n_minus_x1_x2 = log_1_minus_nuisance * (n - x1_sum_x2) + nuisance_power_n_minus_x1_x2[(x1_sum_x2 == n)[:, :]] = 0 + + tmp_log_values_arr = ( + x1_sum_x2_log_comb + + nuisance_power_x1_x2 + + nuisance_power_n_minus_x1_x2 + ) + + tmp_values_from_index = tmp_log_values_arr[index_arr] + + # To avoid dividing by zero in log function and getting inf value, + # values are centered according to the max + max_value = tmp_values_from_index.max() + + # To have better result's precision, the log pvalue is taken here. + # Indeed, pvalue is included inside [0, 1] interval. Passing the + # pvalue to log makes the interval a lot bigger ([-inf, 0]), and thus + # help us to achieve better precision + with np.errstate(divide="ignore", invalid="ignore"): + log_probs = np.exp(tmp_values_from_index - max_value).sum() + log_pvalue = max_value + np.log( + log_probs, + out=np.full_like(log_probs, -np.inf), + where=log_probs > 0, + ) + + # Since shgo find the minima, minus log pvalue is returned + return -log_pvalue + + +def _pval_cvm_2samp_exact(s, m, n): + """ + Compute the exact p-value of the Cramer-von Mises two-sample test + for a given value s of the test statistic. + m and n are the sizes of the samples. + + [1] Y. Xiao, A. Gordon, and A. Yakovlev, "A C++ Program for + the Cramér-Von Mises Two-Sample Test", J. Stat. Soft., + vol. 17, no. 8, pp. 1-15, Dec. 2006. + [2] T. W. Anderson "On the Distribution of the Two-Sample Cramer-von Mises + Criterion," The Annals of Mathematical Statistics, Ann. Math. Statist. + 33(3), 1148-1159, (September, 1962) + """ + + # [1, p. 3] + lcm = np.lcm(m, n) + # [1, p. 4], below eq. 3 + a = lcm // m + b = lcm // n + # Combine Eq. 9 in [2] with Eq. 2 in [1] and solve for $\zeta$ + # Hint: `s` is $U$ in [2], and $T_2$ in [1] is $T$ in [2] + mn = m * n + zeta = lcm ** 2 * (m + n) * (6 * s - mn * (4 * mn - 1)) // (6 * mn ** 2) + + # bound maximum value that may appear in `gs` (remember both rows!) + zeta_bound = lcm**2 * (m + n) # bound elements in row 1 + combinations = comb(m + n, m) # sum of row 2 + max_gs = max(zeta_bound, combinations) + dtype = np.min_scalar_type(max_gs) + + # the frequency table of $g_{u, v}^+$ defined in [1, p. 6] + gs = ([np.array([[0], [1]], dtype=dtype)] + + [np.empty((2, 0), dtype=dtype) for _ in range(m)]) + for u in range(n + 1): + next_gs = [] + tmp = np.empty((2, 0), dtype=dtype) + for v, g in enumerate(gs): + # Calculate g recursively with eq. 11 in [1]. Even though it + # doesn't look like it, this also does 12/13 (all of Algorithm 1). + vi, i0, i1 = np.intersect1d(tmp[0], g[0], return_indices=True) + tmp = np.concatenate([ + np.stack([vi, tmp[1, i0] + g[1, i1]]), + np.delete(tmp, i0, 1), + np.delete(g, i1, 1) + ], 1) + res = (a * v - b * u) ** 2 + tmp[0] += res.astype(dtype) + next_gs.append(tmp) + gs = next_gs + value, freq = gs[m] + return np.float64(np.sum(freq[value >= zeta]) / combinations) + + +@_axis_nan_policy_factory(CramerVonMisesResult, n_samples=2, too_small=1, + result_to_tuple=_cvm_result_to_tuple) +def cramervonmises_2samp(x, y, method='auto'): + """Perform the two-sample Cramér-von Mises test for goodness of fit. + + This is the two-sample version of the Cramér-von Mises test ([1]_): + for two independent samples :math:`X_1, ..., X_n` and + :math:`Y_1, ..., Y_m`, the null hypothesis is that the samples + come from the same (unspecified) continuous distribution. + + Parameters + ---------- + x : array_like + A 1-D array of observed values of the random variables :math:`X_i`. + Must contain at least two observations. + y : array_like + A 1-D array of observed values of the random variables :math:`Y_i`. + Must contain at least two observations. + method : {'auto', 'asymptotic', 'exact'}, optional + The method used to compute the p-value, see Notes for details. + The default is 'auto'. + + Returns + ------- + res : object with attributes + statistic : float + Cramér-von Mises statistic. + pvalue : float + The p-value. + + See Also + -------- + cramervonmises, anderson_ksamp, epps_singleton_2samp, ks_2samp + + Notes + ----- + .. versionadded:: 1.7.0 + + The statistic is computed according to equation 9 in [2]_. The + calculation of the p-value depends on the keyword `method`: + + - ``asymptotic``: The p-value is approximated by using the limiting + distribution of the test statistic. + - ``exact``: The exact p-value is computed by enumerating all + possible combinations of the test statistic, see [2]_. + + If ``method='auto'``, the exact approach is used + if both samples contain equal to or less than 20 observations, + otherwise the asymptotic distribution is used. + + If the underlying distribution is not continuous, the p-value is likely to + be conservative (Section 6.2 in [3]_). When ranking the data to compute + the test statistic, midranks are used if there are ties. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Cramer-von_Mises_criterion + .. [2] Anderson, T.W. (1962). On the distribution of the two-sample + Cramer-von-Mises criterion. The Annals of Mathematical + Statistics, pp. 1148-1159. + .. [3] Conover, W.J., Practical Nonparametric Statistics, 1971. + + Examples + -------- + + Suppose we wish to test whether two samples generated by + ``scipy.stats.norm.rvs`` have the same distribution. We choose a + significance level of alpha=0.05. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> x = stats.norm.rvs(size=100, random_state=rng) + >>> y = stats.norm.rvs(size=70, random_state=rng) + >>> res = stats.cramervonmises_2samp(x, y) + >>> res.statistic, res.pvalue + (0.29376470588235293, 0.1412873014573014) + + The p-value exceeds our chosen significance level, so we do not + reject the null hypothesis that the observed samples are drawn from the + same distribution. + + For small sample sizes, one can compute the exact p-values: + + >>> x = stats.norm.rvs(size=7, random_state=rng) + >>> y = stats.t.rvs(df=2, size=6, random_state=rng) + >>> res = stats.cramervonmises_2samp(x, y, method='exact') + >>> res.statistic, res.pvalue + (0.197802197802198, 0.31643356643356646) + + The p-value based on the asymptotic distribution is a good approximation + even though the sample size is small. + + >>> res = stats.cramervonmises_2samp(x, y, method='asymptotic') + >>> res.statistic, res.pvalue + (0.197802197802198, 0.2966041181527128) + + Independent of the method, one would not reject the null hypothesis at the + chosen significance level in this example. + + """ + xa = np.sort(np.asarray(x)) + ya = np.sort(np.asarray(y)) + + if xa.size <= 1 or ya.size <= 1: + raise ValueError('x and y must contain at least two observations.') + if method not in ['auto', 'exact', 'asymptotic']: + raise ValueError('method must be either auto, exact or asymptotic.') + + nx = len(xa) + ny = len(ya) + + if method == 'auto': + if max(nx, ny) > 20: + method = 'asymptotic' + else: + method = 'exact' + + # get ranks of x and y in the pooled sample + z = np.concatenate([xa, ya]) + # in case of ties, use midrank (see [1]) + r = scipy.stats.rankdata(z, method='average') + rx = r[:nx] + ry = r[nx:] + + # compute U (eq. 10 in [2]) + u = nx * np.sum((rx - np.arange(1, nx+1))**2) + u += ny * np.sum((ry - np.arange(1, ny+1))**2) + + # compute T (eq. 9 in [2]) + k, N = nx*ny, nx + ny + t = u / (k*N) - (4*k - 1)/(6*N) + + if method == 'exact': + p = _pval_cvm_2samp_exact(u, nx, ny) + else: + # compute expected value and variance of T (eq. 11 and 14 in [2]) + et = (1 + 1/N)/6 + vt = (N+1) * (4*k*N - 3*(nx**2 + ny**2) - 2*k) + vt = vt / (45 * N**2 * 4 * k) + + # computed the normalized statistic (eq. 15 in [2]) + tn = 1/6 + (t - et) / np.sqrt(45 * vt) + + # approximate distribution of tn with limiting distribution + # of the one-sample test statistic + # if tn < 0.003, the _cdf_cvm_inf(tn) < 1.28*1e-18, return 1.0 directly + if tn < 0.003: + p = 1.0 + else: + p = max(0, 1. - _cdf_cvm_inf(tn)) + + return CramerVonMisesResult(statistic=t, pvalue=p) + + +class TukeyHSDResult: + """Result of `scipy.stats.tukey_hsd`. + + Attributes + ---------- + statistic : float ndarray + The computed statistic of the test for each comparison. The element + at index ``(i, j)`` is the statistic for the comparison between groups + ``i`` and ``j``. + pvalue : float ndarray + The associated p-value from the studentized range distribution. The + element at index ``(i, j)`` is the p-value for the comparison + between groups ``i`` and ``j``. + + Notes + ----- + The string representation of this object displays the most recently + calculated confidence interval, and if none have been previously + calculated, it will evaluate ``confidence_interval()``. + + References + ---------- + .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's + Method." + https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm, + 28 November 2020. + """ + + def __init__(self, statistic, pvalue, _nobs, _ntreatments, _stand_err): + self.statistic = statistic + self.pvalue = pvalue + self._ntreatments = _ntreatments + self._nobs = _nobs + self._stand_err = _stand_err + self._ci = None + self._ci_cl = None + + def __str__(self): + # Note: `__str__` prints the confidence intervals from the most + # recent call to `confidence_interval`. If it has not been called, + # it will be called with the default CL of .95. + if self._ci is None: + self.confidence_interval(confidence_level=.95) + s = ("Tukey's HSD Pairwise Group Comparisons" + f" ({self._ci_cl*100:.1f}% Confidence Interval)\n") + s += "Comparison Statistic p-value Lower CI Upper CI\n" + for i in range(self.pvalue.shape[0]): + for j in range(self.pvalue.shape[0]): + if i != j: + s += (f" ({i} - {j}) {self.statistic[i, j]:>10.3f}" + f"{self.pvalue[i, j]:>10.3f}" + f"{self._ci.low[i, j]:>10.3f}" + f"{self._ci.high[i, j]:>10.3f}\n") + return s + + def confidence_interval(self, confidence_level=.95): + """Compute the confidence interval for the specified confidence level. + + Parameters + ---------- + confidence_level : float, optional + Confidence level for the computed confidence interval + of the estimated proportion. Default is .95. + + Returns + ------- + ci : ``ConfidenceInterval`` object + The object has attributes ``low`` and ``high`` that hold the + lower and upper bounds of the confidence intervals for each + comparison. The high and low values are accessible for each + comparison at index ``(i, j)`` between groups ``i`` and ``j``. + + References + ---------- + .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. + Tukey's Method." + https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm, + 28 November 2020. + + Examples + -------- + >>> from scipy.stats import tukey_hsd + >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9] + >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1] + >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8] + >>> result = tukey_hsd(group0, group1, group2) + >>> ci = result.confidence_interval() + >>> ci.low + array([[-3.649159, -8.249159, -3.909159], + [ 0.950841, -3.649159, 0.690841], + [-3.389159, -7.989159, -3.649159]]) + >>> ci.high + array([[ 3.649159, -0.950841, 3.389159], + [ 8.249159, 3.649159, 7.989159], + [ 3.909159, -0.690841, 3.649159]]) + """ + # check to see if the supplied confidence level matches that of the + # previously computed CI. + if (self._ci is not None and self._ci_cl is not None and + confidence_level == self._ci_cl): + return self._ci + + if not 0 < confidence_level < 1: + raise ValueError("Confidence level must be between 0 and 1.") + # determine the critical value of the studentized range using the + # appropriate confidence level, number of treatments, and degrees + # of freedom as determined by the number of data less the number of + # treatments. ("Confidence limits for Tukey's method")[1]. Note that + # in the cases of unequal sample sizes there will be a criterion for + # each group comparison. + params = (confidence_level, self._nobs, self._ntreatments - self._nobs) + srd = distributions.studentized_range.ppf(*params) + # also called maximum critical value, the Tukey criterion is the + # studentized range critical value * the square root of mean square + # error over the sample size. + tukey_criterion = srd * self._stand_err + # the confidence levels are determined by the + # `mean_differences` +- `tukey_criterion` + upper_conf = self.statistic + tukey_criterion + lower_conf = self.statistic - tukey_criterion + self._ci = ConfidenceInterval(low=lower_conf, high=upper_conf) + self._ci_cl = confidence_level + return self._ci + + +def _tukey_hsd_iv(args): + if (len(args)) < 2: + raise ValueError("There must be more than 1 treatment.") + args = [np.asarray(arg) for arg in args] + for arg in args: + if arg.ndim != 1: + raise ValueError("Input samples must be one-dimensional.") + if arg.size <= 1: + raise ValueError("Input sample size must be greater than one.") + if np.isinf(arg).any(): + raise ValueError("Input samples must be finite.") + return args + + +def tukey_hsd(*args): + """Perform Tukey's HSD test for equality of means over multiple treatments. + + Tukey's honestly significant difference (HSD) test performs pairwise + comparison of means for a set of samples. Whereas ANOVA (e.g. `f_oneway`) + assesses whether the true means underlying each sample are identical, + Tukey's HSD is a post hoc test used to compare the mean of each sample + to the mean of each other sample. + + The null hypothesis is that the distributions underlying the samples all + have the same mean. The test statistic, which is computed for every + possible pairing of samples, is simply the difference between the sample + means. For each pair, the p-value is the probability under the null + hypothesis (and other assumptions; see notes) of observing such an extreme + value of the statistic, considering that many pairwise comparisons are + being performed. Confidence intervals for the difference between each pair + of means are also available. + + Parameters + ---------- + sample1, sample2, ... : array_like + The sample measurements for each group. There must be at least + two arguments. + + Returns + ------- + result : `~scipy.stats._result_classes.TukeyHSDResult` instance + The return value is an object with the following attributes: + + statistic : float ndarray + The computed statistic of the test for each comparison. The element + at index ``(i, j)`` is the statistic for the comparison between + groups ``i`` and ``j``. + pvalue : float ndarray + The computed p-value of the test for each comparison. The element + at index ``(i, j)`` is the p-value for the comparison between + groups ``i`` and ``j``. + + The object has the following methods: + + confidence_interval(confidence_level=0.95): + Compute the confidence interval for the specified confidence level. + + See Also + -------- + dunnett : performs comparison of means against a control group. + + Notes + ----- + The use of this test relies on several assumptions. + + 1. The observations are independent within and among groups. + 2. The observations within each group are normally distributed. + 3. The distributions from which the samples are drawn have the same finite + variance. + + The original formulation of the test was for samples of equal size [6]_. + In case of unequal sample sizes, the test uses the Tukey-Kramer method + [4]_. + + References + ---------- + .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's + Method." + https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm, + 28 November 2020. + .. [2] Abdi, Herve & Williams, Lynne. (2021). "Tukey's Honestly Significant + Difference (HSD) Test." + https://personal.utdallas.edu/~herve/abdi-HSD2010-pretty.pdf + .. [3] "One-Way ANOVA Using SAS PROC ANOVA & PROC GLM." SAS + Tutorials, 2007, www.stattutorials.com/SAS/TUTORIAL-PROC-GLM.htm. + .. [4] Kramer, Clyde Young. "Extension of Multiple Range Tests to Group + Means with Unequal Numbers of Replications." Biometrics, vol. 12, + no. 3, 1956, pp. 307-310. JSTOR, www.jstor.org/stable/3001469. + Accessed 25 May 2021. + .. [5] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.3.3. + The ANOVA table and tests of hypotheses about means" + https://www.itl.nist.gov/div898/handbook/prc/section4/prc433.htm, + 2 June 2021. + .. [6] Tukey, John W. "Comparing Individual Means in the Analysis of + Variance." Biometrics, vol. 5, no. 2, 1949, pp. 99-114. JSTOR, + www.jstor.org/stable/3001913. Accessed 14 June 2021. + + + Examples + -------- + Here are some data comparing the time to relief of three brands of + headache medicine, reported in minutes. Data adapted from [3]_. + + >>> import numpy as np + >>> from scipy.stats import tukey_hsd + >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9] + >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1] + >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8] + + We would like to see if the means between any of the groups are + significantly different. First, visually examine a box and whisker plot. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + >>> ax.boxplot([group0, group1, group2]) + >>> ax.set_xticklabels(["group0", "group1", "group2"]) # doctest: +SKIP + >>> ax.set_ylabel("mean") # doctest: +SKIP + >>> plt.show() + + From the box and whisker plot, we can see overlap in the interquartile + ranges group 1 to group 2 and group 3, but we can apply the ``tukey_hsd`` + test to determine if the difference between means is significant. We + set a significance level of .05 to reject the null hypothesis. + + >>> res = tukey_hsd(group0, group1, group2) + >>> print(res) + Tukey's HSD Pairwise Group Comparisons (95.0% Confidence Interval) + Comparison Statistic p-value Lower CI Upper CI + (0 - 1) -4.600 0.014 -8.249 -0.951 + (0 - 2) -0.260 0.980 -3.909 3.389 + (1 - 0) 4.600 0.014 0.951 8.249 + (1 - 2) 4.340 0.020 0.691 7.989 + (2 - 0) 0.260 0.980 -3.389 3.909 + (2 - 1) -4.340 0.020 -7.989 -0.691 + + The null hypothesis is that each group has the same mean. The p-value for + comparisons between ``group0`` and ``group1`` as well as ``group1`` and + ``group2`` do not exceed .05, so we reject the null hypothesis that they + have the same means. The p-value of the comparison between ``group0`` + and ``group2`` exceeds .05, so we accept the null hypothesis that there + is not a significant difference between their means. + + We can also compute the confidence interval associated with our chosen + confidence level. + + >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9] + >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1] + >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8] + >>> result = tukey_hsd(group0, group1, group2) + >>> conf = res.confidence_interval(confidence_level=.99) + >>> for ((i, j), l) in np.ndenumerate(conf.low): + ... # filter out self comparisons + ... if i != j: + ... h = conf.high[i,j] + ... print(f"({i} - {j}) {l:>6.3f} {h:>6.3f}") + (0 - 1) -9.480 0.280 + (0 - 2) -5.140 4.620 + (1 - 0) -0.280 9.480 + (1 - 2) -0.540 9.220 + (2 - 0) -4.620 5.140 + (2 - 1) -9.220 0.540 + """ + args = _tukey_hsd_iv(args) + ntreatments = len(args) + means = np.asarray([np.mean(arg) for arg in args]) + nsamples_treatments = np.asarray([a.size for a in args]) + nobs = np.sum(nsamples_treatments) + + # determine mean square error [5]. Note that this is sometimes called + # mean square error within. + mse = (np.sum([np.var(arg, ddof=1) for arg in args] * + (nsamples_treatments - 1)) / (nobs - ntreatments)) + + # The calculation of the standard error differs when treatments differ in + # size. See ("Unequal sample sizes")[1]. + if np.unique(nsamples_treatments).size == 1: + # all input groups are the same length, so only one value needs to be + # calculated [1]. + normalize = 2 / nsamples_treatments[0] + else: + # to compare groups of differing sizes, we must compute a variance + # value for each individual comparison. Use broadcasting to get the + # resulting matrix. [3], verified against [4] (page 308). + normalize = 1 / nsamples_treatments + 1 / nsamples_treatments[None].T + + # the standard error is used in the computation of the tukey criterion and + # finding the p-values. + stand_err = np.sqrt(normalize * mse / 2) + + # the mean difference is the test statistic. + mean_differences = means[None].T - means + + # Calculate the t-statistic to use within the survival function of the + # studentized range to get the p-value. + t_stat = np.abs(mean_differences) / stand_err + + params = t_stat, ntreatments, nobs - ntreatments + pvalues = distributions.studentized_range.sf(*params) + + return TukeyHSDResult(mean_differences, pvalues, ntreatments, + nobs, stand_err) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_kde.py b/parrot/lib/python3.10/site-packages/scipy/stats/_kde.py new file mode 100644 index 0000000000000000000000000000000000000000..40ff35934e217a01a80a649af40166debadba5d2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_kde.py @@ -0,0 +1,725 @@ +#------------------------------------------------------------------------------- +# +# Define classes for (uni/multi)-variate kernel density estimation. +# +# Currently, only Gaussian kernels are implemented. +# +# Written by: Robert Kern +# +# Date: 2004-08-09 +# +# Modified: 2005-02-10 by Robert Kern. +# Contributed to SciPy +# 2005-10-07 by Robert Kern. +# Some fixes to match the new scipy_core +# +# Copyright 2004-2005 by Enthought, Inc. +# +#------------------------------------------------------------------------------- + +# Standard library imports. +import warnings + +# SciPy imports. +from scipy import linalg, special +from scipy._lib._util import check_random_state + +from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, exp, pi, + sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, + ones, cov) +import numpy as np + +# Local imports. +from . import _mvn +from ._stats import gaussian_kernel_estimate, gaussian_kernel_estimate_log + + +__all__ = ['gaussian_kde'] + + +class gaussian_kde: + """Representation of a kernel-density estimate using Gaussian kernels. + + Kernel density estimation is a way to estimate the probability density + function (PDF) of a random variable in a non-parametric way. + `gaussian_kde` works for both uni-variate and multi-variate data. It + includes automatic bandwidth determination. The estimation works best for + a unimodal distribution; bimodal or multi-modal distributions tend to be + oversmoothed. + + Parameters + ---------- + dataset : array_like + Datapoints to estimate from. In case of univariate data this is a 1-D + array, otherwise a 2-D array with shape (# of dims, # of data). + bw_method : str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. If a scalar, + this will be used directly as `kde.factor`. If a callable, it should + take a `gaussian_kde` instance as only parameter and return a scalar. + If None (default), 'scott' is used. See Notes for more details. + weights : array_like, optional + weights of datapoints. This must be the same shape as dataset. + If None (default), the samples are assumed to be equally weighted + + Attributes + ---------- + dataset : ndarray + The dataset with which `gaussian_kde` was initialized. + d : int + Number of dimensions. + n : int + Number of datapoints. + neff : int + Effective number of datapoints. + + .. versionadded:: 1.2.0 + factor : float + The bandwidth factor, obtained from `kde.covariance_factor`. The square + of `kde.factor` multiplies the covariance matrix of the data in the kde + estimation. + covariance : ndarray + The covariance matrix of `dataset`, scaled by the calculated bandwidth + (`kde.factor`). + inv_cov : ndarray + The inverse of `covariance`. + + Methods + ------- + evaluate + __call__ + integrate_gaussian + integrate_box_1d + integrate_box + integrate_kde + pdf + logpdf + resample + set_bandwidth + covariance_factor + + Notes + ----- + Bandwidth selection strongly influences the estimate obtained from the KDE + (much more so than the actual shape of the kernel). Bandwidth selection + can be done by a "rule of thumb", by cross-validation, by "plug-in + methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde` + uses a rule of thumb, the default is Scott's Rule. + + Scott's Rule [1]_, implemented as `scotts_factor`, is:: + + n**(-1./(d+4)), + + with ``n`` the number of data points and ``d`` the number of dimensions. + In the case of unequally weighted points, `scotts_factor` becomes:: + + neff**(-1./(d+4)), + + with ``neff`` the effective number of datapoints. + Silverman's Rule [2]_, implemented as `silverman_factor`, is:: + + (n * (d + 2) / 4.)**(-1. / (d + 4)). + + or in the case of unequally weighted points:: + + (neff * (d + 2) / 4.)**(-1. / (d + 4)). + + Good general descriptions of kernel density estimation can be found in [1]_ + and [2]_, the mathematics for this multi-dimensional implementation can be + found in [1]_. + + With a set of weighted samples, the effective number of datapoints ``neff`` + is defined by:: + + neff = sum(weights)^2 / sum(weights^2) + + as detailed in [5]_. + + `gaussian_kde` does not currently support data that lies in a + lower-dimensional subspace of the space in which it is expressed. For such + data, consider performing principle component analysis / dimensionality + reduction and using `gaussian_kde` with the transformed data. + + References + ---------- + .. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and + Visualization", John Wiley & Sons, New York, Chicester, 1992. + .. [2] B.W. Silverman, "Density Estimation for Statistics and Data + Analysis", Vol. 26, Monographs on Statistics and Applied Probability, + Chapman and Hall, London, 1986. + .. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A + Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993. + .. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel + conditional density estimation", Computational Statistics & Data + Analysis, Vol. 36, pp. 279-298, 2001. + .. [5] Gray P. G., 1969, Journal of the Royal Statistical Society. + Series A (General), 132, 272 + + Examples + -------- + Generate some random two-dimensional data: + + >>> import numpy as np + >>> from scipy import stats + >>> def measure(n): + ... "Measurement model, return two coupled measurements." + ... m1 = np.random.normal(size=n) + ... m2 = np.random.normal(scale=0.5, size=n) + ... return m1+m2, m1-m2 + + >>> m1, m2 = measure(2000) + >>> xmin = m1.min() + >>> xmax = m1.max() + >>> ymin = m2.min() + >>> ymax = m2.max() + + Perform a kernel density estimate on the data: + + >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] + >>> positions = np.vstack([X.ravel(), Y.ravel()]) + >>> values = np.vstack([m1, m2]) + >>> kernel = stats.gaussian_kde(values) + >>> Z = np.reshape(kernel(positions).T, X.shape) + + Plot the results: + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, + ... extent=[xmin, xmax, ymin, ymax]) + >>> ax.plot(m1, m2, 'k.', markersize=2) + >>> ax.set_xlim([xmin, xmax]) + >>> ax.set_ylim([ymin, ymax]) + >>> plt.show() + + """ + def __init__(self, dataset, bw_method=None, weights=None): + self.dataset = atleast_2d(asarray(dataset)) + if not self.dataset.size > 1: + raise ValueError("`dataset` input should have multiple elements.") + + self.d, self.n = self.dataset.shape + + if weights is not None: + self._weights = atleast_1d(weights).astype(float) + self._weights /= sum(self._weights) + if self.weights.ndim != 1: + raise ValueError("`weights` input should be one-dimensional.") + if len(self._weights) != self.n: + raise ValueError("`weights` input should be of length n") + self._neff = 1/sum(self._weights**2) + + # This can be converted to a warning once gh-10205 is resolved + if self.d > self.n: + msg = ("Number of dimensions is greater than number of samples. " + "This results in a singular data covariance matrix, which " + "cannot be treated using the algorithms implemented in " + "`gaussian_kde`. Note that `gaussian_kde` interprets each " + "*column* of `dataset` to be a point; consider transposing " + "the input to `dataset`.") + raise ValueError(msg) + + try: + self.set_bandwidth(bw_method=bw_method) + except linalg.LinAlgError as e: + msg = ("The data appears to lie in a lower-dimensional subspace " + "of the space in which it is expressed. This has resulted " + "in a singular data covariance matrix, which cannot be " + "treated using the algorithms implemented in " + "`gaussian_kde`. Consider performing principle component " + "analysis / dimensionality reduction and using " + "`gaussian_kde` with the transformed data.") + raise linalg.LinAlgError(msg) from e + + def evaluate(self, points): + """Evaluate the estimated pdf on a set of points. + + Parameters + ---------- + points : (# of dimensions, # of points)-array + Alternatively, a (# of dimensions,) vector can be passed in and + treated as a single point. + + Returns + ------- + values : (# of points,)-array + The values at each point. + + Raises + ------ + ValueError : if the dimensionality of the input points is different than + the dimensionality of the KDE. + + """ + points = atleast_2d(asarray(points)) + + d, m = points.shape + if d != self.d: + if d == 1 and m == self.d: + # points was passed in as a row vector + points = reshape(points, (self.d, 1)) + m = 1 + else: + msg = (f"points have dimension {d}, " + f"dataset has dimension {self.d}") + raise ValueError(msg) + + output_dtype, spec = _get_output_dtype(self.covariance, points) + result = gaussian_kernel_estimate[spec]( + self.dataset.T, self.weights[:, None], + points.T, self.cho_cov, output_dtype) + + return result[:, 0] + + __call__ = evaluate + + def integrate_gaussian(self, mean, cov): + """ + Multiply estimated density by a multivariate Gaussian and integrate + over the whole space. + + Parameters + ---------- + mean : aray_like + A 1-D array, specifying the mean of the Gaussian. + cov : array_like + A 2-D array, specifying the covariance matrix of the Gaussian. + + Returns + ------- + result : scalar + The value of the integral. + + Raises + ------ + ValueError + If the mean or covariance of the input Gaussian differs from + the KDE's dimensionality. + + """ + mean = atleast_1d(squeeze(mean)) + cov = atleast_2d(cov) + + if mean.shape != (self.d,): + raise ValueError("mean does not have dimension %s" % self.d) + if cov.shape != (self.d, self.d): + raise ValueError("covariance does not have dimension %s" % self.d) + + # make mean a column vector + mean = mean[:, newaxis] + + sum_cov = self.covariance + cov + + # This will raise LinAlgError if the new cov matrix is not s.p.d + # cho_factor returns (ndarray, bool) where bool is a flag for whether + # or not ndarray is upper or lower triangular + sum_cov_chol = linalg.cho_factor(sum_cov) + + diff = self.dataset - mean + tdiff = linalg.cho_solve(sum_cov_chol, diff) + + sqrt_det = np.prod(np.diagonal(sum_cov_chol[0])) + norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det + + energies = sum(diff * tdiff, axis=0) / 2.0 + result = sum(exp(-energies)*self.weights, axis=0) / norm_const + + return result + + def integrate_box_1d(self, low, high): + """ + Computes the integral of a 1D pdf between two bounds. + + Parameters + ---------- + low : scalar + Lower bound of integration. + high : scalar + Upper bound of integration. + + Returns + ------- + value : scalar + The result of the integral. + + Raises + ------ + ValueError + If the KDE is over more than one dimension. + + """ + if self.d != 1: + raise ValueError("integrate_box_1d() only handles 1D pdfs") + + stdev = ravel(sqrt(self.covariance))[0] + + normalized_low = ravel((low - self.dataset) / stdev) + normalized_high = ravel((high - self.dataset) / stdev) + + value = np.sum(self.weights*( + special.ndtr(normalized_high) - + special.ndtr(normalized_low))) + return value + + def integrate_box(self, low_bounds, high_bounds, maxpts=None): + """Computes the integral of a pdf over a rectangular interval. + + Parameters + ---------- + low_bounds : array_like + A 1-D array containing the lower bounds of integration. + high_bounds : array_like + A 1-D array containing the upper bounds of integration. + maxpts : int, optional + The maximum number of points to use for integration. + + Returns + ------- + value : scalar + The result of the integral. + + """ + if maxpts is not None: + extra_kwds = {'maxpts': maxpts} + else: + extra_kwds = {} + + value, inform = _mvn.mvnun_weighted(low_bounds, high_bounds, + self.dataset, self.weights, + self.covariance, **extra_kwds) + if inform: + msg = ('An integral in _mvn.mvnun requires more points than %s' % + (self.d * 1000)) + warnings.warn(msg, stacklevel=2) + + return value + + def integrate_kde(self, other): + """ + Computes the integral of the product of this kernel density estimate + with another. + + Parameters + ---------- + other : gaussian_kde instance + The other kde. + + Returns + ------- + value : scalar + The result of the integral. + + Raises + ------ + ValueError + If the KDEs have different dimensionality. + + """ + if other.d != self.d: + raise ValueError("KDEs are not the same dimensionality") + + # we want to iterate over the smallest number of points + if other.n < self.n: + small = other + large = self + else: + small = self + large = other + + sum_cov = small.covariance + large.covariance + sum_cov_chol = linalg.cho_factor(sum_cov) + result = 0.0 + for i in range(small.n): + mean = small.dataset[:, i, newaxis] + diff = large.dataset - mean + tdiff = linalg.cho_solve(sum_cov_chol, diff) + + energies = sum(diff * tdiff, axis=0) / 2.0 + result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i] + + sqrt_det = np.prod(np.diagonal(sum_cov_chol[0])) + norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det + + result /= norm_const + + return result + + def resample(self, size=None, seed=None): + """Randomly sample a dataset from the estimated pdf. + + Parameters + ---------- + size : int, optional + The number of samples to draw. If not provided, then the size is + the same as the effective number of samples in the underlying + dataset. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Returns + ------- + resample : (self.d, `size`) ndarray + The sampled dataset. + + """ # numpy/numpydoc#87 # noqa: E501 + if size is None: + size = int(self.neff) + + random_state = check_random_state(seed) + norm = transpose(random_state.multivariate_normal( + zeros((self.d,), float), self.covariance, size=size + )) + indices = random_state.choice(self.n, size=size, p=self.weights) + means = self.dataset[:, indices] + + return means + norm + + def scotts_factor(self): + """Compute Scott's factor. + + Returns + ------- + s : float + Scott's factor. + """ + return power(self.neff, -1./(self.d+4)) + + def silverman_factor(self): + """Compute the Silverman factor. + + Returns + ------- + s : float + The silverman factor. + """ + return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4)) + + # Default method to calculate bandwidth, can be overwritten by subclass + covariance_factor = scotts_factor + covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that + multiplies the data covariance matrix to obtain the kernel covariance + matrix. The default is `scotts_factor`. A subclass can overwrite this + method to provide a different method, or set it through a call to + `kde.set_bandwidth`.""" + + def set_bandwidth(self, bw_method=None): + """Compute the estimator bandwidth with given method. + + The new bandwidth calculated after a call to `set_bandwidth` is used + for subsequent evaluations of the estimated density. + + Parameters + ---------- + bw_method : str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. If a + scalar, this will be used directly as `kde.factor`. If a callable, + it should take a `gaussian_kde` instance as only parameter and + return a scalar. If None (default), nothing happens; the current + `kde.covariance_factor` method is kept. + + Notes + ----- + .. versionadded:: 0.11 + + Examples + -------- + >>> import numpy as np + >>> import scipy.stats as stats + >>> x1 = np.array([-7, -5, 1, 4, 5.]) + >>> kde = stats.gaussian_kde(x1) + >>> xs = np.linspace(-10, 10, num=50) + >>> y1 = kde(xs) + >>> kde.set_bandwidth(bw_method='silverman') + >>> y2 = kde(xs) + >>> kde.set_bandwidth(bw_method=kde.factor / 3.) + >>> y3 = kde(xs) + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo', + ... label='Data points (rescaled)') + >>> ax.plot(xs, y1, label='Scott (default)') + >>> ax.plot(xs, y2, label='Silverman') + >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)') + >>> ax.legend() + >>> plt.show() + + """ + if bw_method is None: + pass + elif bw_method == 'scott': + self.covariance_factor = self.scotts_factor + elif bw_method == 'silverman': + self.covariance_factor = self.silverman_factor + elif np.isscalar(bw_method) and not isinstance(bw_method, str): + self._bw_method = 'use constant' + self.covariance_factor = lambda: bw_method + elif callable(bw_method): + self._bw_method = bw_method + self.covariance_factor = lambda: self._bw_method(self) + else: + msg = "`bw_method` should be 'scott', 'silverman', a scalar " \ + "or a callable." + raise ValueError(msg) + + self._compute_covariance() + + def _compute_covariance(self): + """Computes the covariance matrix for each Gaussian kernel using + covariance_factor(). + """ + self.factor = self.covariance_factor() + # Cache covariance and Cholesky decomp of covariance + if not hasattr(self, '_data_cho_cov'): + self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, + bias=False, + aweights=self.weights)) + self._data_cho_cov = linalg.cholesky(self._data_covariance, + lower=True) + + self.covariance = self._data_covariance * self.factor**2 + self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64) + self.log_det = 2*np.log(np.diag(self.cho_cov + * np.sqrt(2*pi))).sum() + + @property + def inv_cov(self): + # Re-compute from scratch each time because I'm not sure how this is + # used in the wild. (Perhaps users change the `dataset`, since it's + # not a private attribute?) `_compute_covariance` used to recalculate + # all these, so we'll recalculate everything now that this is a + # a property. + self.factor = self.covariance_factor() + self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, + bias=False, aweights=self.weights)) + return linalg.inv(self._data_covariance) / self.factor**2 + + def pdf(self, x): + """ + Evaluate the estimated pdf on a provided set of points. + + Notes + ----- + This is an alias for `gaussian_kde.evaluate`. See the ``evaluate`` + docstring for more details. + + """ + return self.evaluate(x) + + def logpdf(self, x): + """ + Evaluate the log of the estimated pdf on a provided set of points. + """ + points = atleast_2d(x) + + d, m = points.shape + if d != self.d: + if d == 1 and m == self.d: + # points was passed in as a row vector + points = reshape(points, (self.d, 1)) + m = 1 + else: + msg = (f"points have dimension {d}, " + f"dataset has dimension {self.d}") + raise ValueError(msg) + + output_dtype, spec = _get_output_dtype(self.covariance, points) + result = gaussian_kernel_estimate_log[spec]( + self.dataset.T, self.weights[:, None], + points.T, self.cho_cov, output_dtype) + + return result[:, 0] + + def marginal(self, dimensions): + """Return a marginal KDE distribution + + Parameters + ---------- + dimensions : int or 1-d array_like + The dimensions of the multivariate distribution corresponding + with the marginal variables, that is, the indices of the dimensions + that are being retained. The other dimensions are marginalized out. + + Returns + ------- + marginal_kde : gaussian_kde + An object representing the marginal distribution. + + Notes + ----- + .. versionadded:: 1.10.0 + + """ + + dims = np.atleast_1d(dimensions) + + if not np.issubdtype(dims.dtype, np.integer): + msg = ("Elements of `dimensions` must be integers - the indices " + "of the marginal variables being retained.") + raise ValueError(msg) + + n = len(self.dataset) # number of dimensions + original_dims = dims.copy() + + dims[dims < 0] = n + dims[dims < 0] + + if len(np.unique(dims)) != len(dims): + msg = ("All elements of `dimensions` must be unique.") + raise ValueError(msg) + + i_invalid = (dims < 0) | (dims >= n) + if np.any(i_invalid): + msg = (f"Dimensions {original_dims[i_invalid]} are invalid " + f"for a distribution in {n} dimensions.") + raise ValueError(msg) + + dataset = self.dataset[dims] + weights = self.weights + + return gaussian_kde(dataset, bw_method=self.covariance_factor(), + weights=weights) + + @property + def weights(self): + try: + return self._weights + except AttributeError: + self._weights = ones(self.n)/self.n + return self._weights + + @property + def neff(self): + try: + return self._neff + except AttributeError: + self._neff = 1/sum(self.weights**2) + return self._neff + + +def _get_output_dtype(covariance, points): + """ + Calculates the output dtype and the "spec" (=C type name). + + This was necessary in order to deal with the fused types in the Cython + routine `gaussian_kernel_estimate`. See gh-10824 for details. + """ + output_dtype = np.common_type(covariance, points) + itemsize = np.dtype(output_dtype).itemsize + if itemsize == 4: + spec = 'float' + elif itemsize == 8: + spec = 'double' + elif itemsize in (12, 16): + spec = 'long double' + else: + raise ValueError( + f"{output_dtype} has unexpected item size: {itemsize}" + ) + + return output_dtype, spec diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_ksstats.py b/parrot/lib/python3.10/site-packages/scipy/stats/_ksstats.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc60da7bba862a1b16f4b41c66e523f985ac415 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_ksstats.py @@ -0,0 +1,600 @@ +# Compute the two-sided one-sample Kolmogorov-Smirnov Prob(Dn <= d) where: +# D_n = sup_x{|F_n(x) - F(x)|}, +# F_n(x) is the empirical CDF for a sample of size n {x_i: i=1,...,n}, +# F(x) is the CDF of a probability distribution. +# +# Exact methods: +# Prob(D_n >= d) can be computed via a matrix algorithm of Durbin[1] +# or a recursion algorithm due to Pomeranz[2]. +# Marsaglia, Tsang & Wang[3] gave a computation-efficient way to perform +# the Durbin algorithm. +# D_n >= d <==> D_n+ >= d or D_n- >= d (the one-sided K-S statistics), hence +# Prob(D_n >= d) = 2*Prob(D_n+ >= d) - Prob(D_n+ >= d and D_n- >= d). +# For d > 0.5, the latter intersection probability is 0. +# +# Approximate methods: +# For d close to 0.5, ignoring that intersection term may still give a +# reasonable approximation. +# Li-Chien[4] and Korolyuk[5] gave an asymptotic formula extending +# Kolmogorov's initial asymptotic, suitable for large d. (See +# scipy.special.kolmogorov for that asymptotic) +# Pelz-Good[6] used the functional equation for Jacobi theta functions to +# transform the Li-Chien/Korolyuk formula produce a computational formula +# suitable for small d. +# +# Simard and L'Ecuyer[7] provided an algorithm to decide when to use each of +# the above approaches and it is that which is used here. +# +# Other approaches: +# Carvalho[8] optimizes Durbin's matrix algorithm for large values of d. +# Moscovich and Nadler[9] use FFTs to compute the convolutions. + +# References: +# [1] Durbin J (1968). +# "The Probability that the Sample Distribution Function Lies Between Two +# Parallel Straight Lines." +# Annals of Mathematical Statistics, 39, 398-411. +# [2] Pomeranz J (1974). +# "Exact Cumulative Distribution of the Kolmogorov-Smirnov Statistic for +# Small Samples (Algorithm 487)." +# Communications of the ACM, 17(12), 703-704. +# [3] Marsaglia G, Tsang WW, Wang J (2003). +# "Evaluating Kolmogorov's Distribution." +# Journal of Statistical Software, 8(18), 1-4. +# [4] LI-CHIEN, C. (1956). +# "On the exact distribution of the statistics of A. N. Kolmogorov and +# their asymptotic expansion." +# Acta Matematica Sinica, 6, 55-81. +# [5] KOROLYUK, V. S. (1960). +# "Asymptotic analysis of the distribution of the maximum deviation in +# the Bernoulli scheme." +# Theor. Probability Appl., 4, 339-366. +# [6] Pelz W, Good IJ (1976). +# "Approximating the Lower Tail-areas of the Kolmogorov-Smirnov One-sample +# Statistic." +# Journal of the Royal Statistical Society, Series B, 38(2), 152-156. +# [7] Simard, R., L'Ecuyer, P. (2011) +# "Computing the Two-Sided Kolmogorov-Smirnov Distribution", +# Journal of Statistical Software, Vol 39, 11, 1-18. +# [8] Carvalho, Luis (2015) +# "An Improved Evaluation of Kolmogorov's Distribution" +# Journal of Statistical Software, Code Snippets; Vol 65(3), 1-8. +# [9] Amit Moscovich, Boaz Nadler (2017) +# "Fast calculation of boundary crossing probabilities for Poisson +# processes", +# Statistics & Probability Letters, Vol 123, 177-182. + + +import numpy as np +import scipy.special +import scipy.special._ufuncs as scu +from scipy._lib._finite_differences import _derivative + +_E128 = 128 +_EP128 = np.ldexp(np.longdouble(1), _E128) +_EM128 = np.ldexp(np.longdouble(1), -_E128) + +_SQRT2PI = np.sqrt(2 * np.pi) +_LOG_2PI = np.log(2 * np.pi) +_MIN_LOG = -708 +_SQRT3 = np.sqrt(3) +_PI_SQUARED = np.pi ** 2 +_PI_FOUR = np.pi ** 4 +_PI_SIX = np.pi ** 6 + +# [Lifted from _loggamma.pxd.] If B_m are the Bernoulli numbers, +# then Stirling coeffs are B_{2j}/(2j)/(2j-1) for j=8,...1. +_STIRLING_COEFFS = [-2.955065359477124183e-2, 6.4102564102564102564e-3, + -1.9175269175269175269e-3, 8.4175084175084175084e-4, + -5.952380952380952381e-4, 7.9365079365079365079e-4, + -2.7777777777777777778e-3, 8.3333333333333333333e-2] + + +def _log_nfactorial_div_n_pow_n(n): + # Computes n! / n**n + # = (n-1)! / n**(n-1) + # Uses Stirling's approximation, but removes n*log(n) up-front to + # avoid subtractive cancellation. + # = log(n)/2 - n + log(sqrt(2pi)) + sum B_{2j}/(2j)/(2j-1)/n**(2j-1) + rn = 1.0/n + return np.log(n)/2 - n + _LOG_2PI/2 + rn * np.polyval(_STIRLING_COEFFS, rn/n) + + +def _clip_prob(p): + """clips a probability to range 0<=p<=1.""" + return np.clip(p, 0.0, 1.0) + + +def _select_and_clip_prob(cdfprob, sfprob, cdf=True): + """Selects either the CDF or SF, and then clips to range 0<=p<=1.""" + p = np.where(cdf, cdfprob, sfprob) + return _clip_prob(p) + + +def _kolmogn_DMTW(n, d, cdf=True): + r"""Computes the Kolmogorov CDF: Pr(D_n <= d) using the MTW approach to + the Durbin matrix algorithm. + + Durbin (1968); Marsaglia, Tsang, Wang (2003). [1], [3]. + """ + # Write d = (k-h)/n, where k is positive integer and 0 <= h < 1 + # Generate initial matrix H of size m*m where m=(2k-1) + # Compute k-th row of (n!/n^n) * H^n, scaling intermediate results. + # Requires memory O(m^2) and computation O(m^2 log(n)). + # Most suitable for small m. + + if d >= 1.0: + return _select_and_clip_prob(1.0, 0.0, cdf) + nd = n * d + if nd <= 0.5: + return _select_and_clip_prob(0.0, 1.0, cdf) + k = int(np.ceil(nd)) + h = k - nd + m = 2 * k - 1 + + H = np.zeros([m, m]) + + # Initialize: v is first column (and last row) of H + # v[j] = (1-h^(j+1)/(j+1)! (except for v[-1]) + # w[j] = 1/(j)! + # q = k-th row of H (actually i!/n^i*H^i) + intm = np.arange(1, m + 1) + v = 1.0 - h ** intm + w = np.empty(m) + fac = 1.0 + for j in intm: + w[j - 1] = fac + fac /= j # This might underflow. Isn't a problem. + v[j - 1] *= fac + tt = max(2 * h - 1.0, 0)**m - 2*h**m + v[-1] = (1.0 + tt) * fac + + for i in range(1, m): + H[i - 1:, i] = w[:m - i + 1] + H[:, 0] = v + H[-1, :] = np.flip(v, axis=0) + + Hpwr = np.eye(np.shape(H)[0]) # Holds intermediate powers of H + nn = n + expnt = 0 # Scaling of Hpwr + Hexpnt = 0 # Scaling of H + while nn > 0: + if nn % 2: + Hpwr = np.matmul(Hpwr, H) + expnt += Hexpnt + H = np.matmul(H, H) + Hexpnt *= 2 + # Scale as needed. + if np.abs(H[k - 1, k - 1]) > _EP128: + H /= _EP128 + Hexpnt += _E128 + nn = nn // 2 + + p = Hpwr[k - 1, k - 1] + + # Multiply by n!/n^n + for i in range(1, n + 1): + p = i * p / n + if np.abs(p) < _EM128: + p *= _EP128 + expnt -= _E128 + + # unscale + if expnt != 0: + p = np.ldexp(p, expnt) + + return _select_and_clip_prob(p, 1.0-p, cdf) + + +def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf): + """Compute the endpoints of the interval for row i.""" + if i == 0: + j1, j2 = -ll - ceilf - 1, ll + ceilf - 1 + else: + # i + 1 = 2*ip1div2 + ip1mod2 + ip1div2, ip1mod2 = divmod(i + 1, 2) + if ip1mod2 == 0: # i is odd + if ip1div2 == n + 1: + j1, j2 = n - ll - ceilf - 1, n + ll + ceilf - 1 + else: + j1, j2 = ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1 + else: + j1, j2 = ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1 + + return max(j1 + 2, 0), min(j2, n) + + +def _kolmogn_Pomeranz(n, x, cdf=True): + r"""Computes Pr(D_n <= d) using the Pomeranz recursion algorithm. + + Pomeranz (1974) [2] + """ + + # V is n*(2n+2) matrix. + # Each row is convolution of the previous row and probabilities from a + # Poisson distribution. + # Desired CDF probability is n! V[n-1, 2n+1] (final entry in final row). + # Only two rows are needed at any given stage: + # - Call them V0 and V1. + # - Swap each iteration + # Only a few (contiguous) entries in each row can be non-zero. + # - Keep track of start and end (j1 and j2 below) + # - V0s and V1s track the start in the two rows + # Scale intermediate results as needed. + # Only a few different Poisson distributions can occur + t = n * x + ll = int(np.floor(t)) + f = 1.0 * (t - ll) # fractional part of t + g = min(f, 1.0 - f) + ceilf = (1 if f > 0 else 0) + roundf = (1 if f > 0.5 else 0) + npwrs = 2 * (ll + 1) # Maximum number of powers needed in convolutions + gpower = np.empty(npwrs) # gpower = (g/n)^m/m! + twogpower = np.empty(npwrs) # twogpower = (2g/n)^m/m! + onem2gpower = np.empty(npwrs) # onem2gpower = ((1-2g)/n)^m/m! + # gpower etc are *almost* Poisson probs, just missing normalizing factor. + + gpower[0] = 1.0 + twogpower[0] = 1.0 + onem2gpower[0] = 1.0 + expnt = 0 + g_over_n, two_g_over_n, one_minus_two_g_over_n = g/n, 2*g/n, (1 - 2*g)/n + for m in range(1, npwrs): + gpower[m] = gpower[m - 1] * g_over_n / m + twogpower[m] = twogpower[m - 1] * two_g_over_n / m + onem2gpower[m] = onem2gpower[m - 1] * one_minus_two_g_over_n / m + + V0 = np.zeros([npwrs]) + V1 = np.zeros([npwrs]) + V1[0] = 1 # first row + V0s, V1s = 0, 0 # start indices of the two rows + + j1, j2 = _pomeranz_compute_j1j2(0, n, ll, ceilf, roundf) + for i in range(1, 2 * n + 2): + # Preserve j1, V1, V1s, V0s from last iteration + k1 = j1 + V0, V1 = V1, V0 + V0s, V1s = V1s, V0s + V1.fill(0.0) + j1, j2 = _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf) + if i == 1 or i == 2 * n + 1: + pwrs = gpower + else: + pwrs = (twogpower if i % 2 else onem2gpower) + ln2 = j2 - k1 + 1 + if ln2 > 0: + conv = np.convolve(V0[k1 - V0s:k1 - V0s + ln2], pwrs[:ln2]) + conv_start = j1 - k1 # First index to use from conv + conv_len = j2 - j1 + 1 # Number of entries to use from conv + V1[:conv_len] = conv[conv_start:conv_start + conv_len] + # Scale to avoid underflow. + if 0 < np.max(V1) < _EM128: + V1 *= _EP128 + expnt -= _E128 + V1s = V0s + j1 - k1 + + # multiply by n! + ans = V1[n - V1s] + for m in range(1, n + 1): + if np.abs(ans) > _EP128: + ans *= _EM128 + expnt += _E128 + ans *= m + + # Undo any intermediate scaling + if expnt != 0: + ans = np.ldexp(ans, expnt) + ans = _select_and_clip_prob(ans, 1.0 - ans, cdf) + return ans + + +def _kolmogn_PelzGood(n, x, cdf=True): + """Computes the Pelz-Good approximation to Prob(Dn <= x) with 0<=x<=1. + + Start with Li-Chien, Korolyuk approximation: + Prob(Dn <= x) ~ K0(z) + K1(z)/sqrt(n) + K2(z)/n + K3(z)/n**1.5 + where z = x*sqrt(n). + Transform each K_(z) using Jacobi theta functions into a form suitable + for small z. + Pelz-Good (1976). [6] + """ + if x <= 0.0: + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + if x >= 1.0: + return _select_and_clip_prob(1.0, 0.0, cdf=cdf) + + z = np.sqrt(n) * x + zsquared, zthree, zfour, zsix = z**2, z**3, z**4, z**6 + + qlog = -_PI_SQUARED / 8 / zsquared + if qlog < _MIN_LOG: # z ~ 0.041743441416853426 + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + + q = np.exp(qlog) + + # Coefficients of terms in the sums for K1, K2 and K3 + k1a = -zsquared + k1b = _PI_SQUARED / 4 + + k2a = 6 * zsix + 2 * zfour + k2b = (2 * zfour - 5 * zsquared) * _PI_SQUARED / 4 + k2c = _PI_FOUR * (1 - 2 * zsquared) / 16 + + k3d = _PI_SIX * (5 - 30 * zsquared) / 64 + k3c = _PI_FOUR * (-60 * zsquared + 212 * zfour) / 16 + k3b = _PI_SQUARED * (135 * zfour - 96 * zsix) / 4 + k3a = -30 * zsix - 90 * z**8 + + K0to3 = np.zeros(4) + # Use a Horner scheme to evaluate sum c_i q^(i^2) + # Reduces to a sum over odd integers. + maxk = int(np.ceil(16 * z / np.pi)) + for k in range(maxk, 0, -1): + m = 2 * k - 1 + msquared, mfour, msix = m**2, m**4, m**6 + qpower = np.power(q, 8 * k) + coeffs = np.array([1.0, + k1a + k1b*msquared, + k2a + k2b*msquared + k2c*mfour, + k3a + k3b*msquared + k3c*mfour + k3d*msix]) + K0to3 *= qpower + K0to3 += coeffs + K0to3 *= q + K0to3 *= _SQRT2PI + # z**10 > 0 as z > 0.04 + K0to3 /= np.array([z, 6 * zfour, 72 * z**7, 6480 * z**10]) + + # Now do the other sum over the other terms, all integers k + # K_2: (pi^2 k^2) q^(k^2), + # K_3: (3pi^2 k^2 z^2 - pi^4 k^4)*q^(k^2) + # Don't expect much subtractive cancellation so use direct calculation + q = np.exp(-_PI_SQUARED / 2 / zsquared) + ks = np.arange(maxk, 0, -1) + ksquared = ks ** 2 + sqrt3z = _SQRT3 * z + kspi = np.pi * ks + qpwers = q ** ksquared + k2extra = np.sum(ksquared * qpwers) + k2extra *= _PI_SQUARED * _SQRT2PI/(-36 * zthree) + K0to3[2] += k2extra + k3extra = np.sum((sqrt3z + kspi) * (sqrt3z - kspi) * ksquared * qpwers) + k3extra *= _PI_SQUARED * _SQRT2PI/(216 * zsix) + K0to3[3] += k3extra + powers_of_n = np.power(n * 1.0, np.arange(len(K0to3)) / 2.0) + K0to3 /= powers_of_n + + if not cdf: + K0to3 *= -1 + K0to3[0] += 1 + + Ksum = sum(K0to3) + return Ksum + + +def _kolmogn(n, x, cdf=True): + """Computes the CDF(or SF) for the two-sided Kolmogorov-Smirnov statistic. + + x must be of type float, n of type integer. + + Simard & L'Ecuyer (2011) [7]. + """ + if np.isnan(n): + return n # Keep the same type of nan + if int(n) != n or n <= 0: + return np.nan + if x >= 1.0: + return _select_and_clip_prob(1.0, 0.0, cdf=cdf) + if x <= 0.0: + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + t = n * x + if t <= 1.0: # Ruben-Gambino: 1/2n <= x <= 1/n + if t <= 0.5: + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + if n <= 140: + prob = np.prod(np.arange(1, n+1) * (1.0/n) * (2*t - 1)) + else: + prob = np.exp(_log_nfactorial_div_n_pow_n(n) + n * np.log(2*t-1)) + return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf) + if t >= n - 1: # Ruben-Gambino + prob = 2 * (1.0 - x)**n + return _select_and_clip_prob(1 - prob, prob, cdf=cdf) + if x >= 0.5: # Exact: 2 * smirnov + prob = 2 * scipy.special.smirnov(n, x) + return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf) + + nxsquared = t * x + if n <= 140: + if nxsquared <= 0.754693: + prob = _kolmogn_DMTW(n, x, cdf=True) + return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf) + if nxsquared <= 4: + prob = _kolmogn_Pomeranz(n, x, cdf=True) + return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf) + # Now use Miller approximation of 2*smirnov + prob = 2 * scipy.special.smirnov(n, x) + return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf) + + # Split CDF and SF as they have different cutoffs on nxsquared. + if not cdf: + if nxsquared >= 370.0: + return 0.0 + if nxsquared >= 2.2: + prob = 2 * scipy.special.smirnov(n, x) + return _clip_prob(prob) + # Fall through and compute the SF as 1.0-CDF + if nxsquared >= 18.0: + cdfprob = 1.0 + elif n <= 100000 and n * x**1.5 <= 1.4: + cdfprob = _kolmogn_DMTW(n, x, cdf=True) + else: + cdfprob = _kolmogn_PelzGood(n, x, cdf=True) + return _select_and_clip_prob(cdfprob, 1.0 - cdfprob, cdf=cdf) + + +def _kolmogn_p(n, x): + """Computes the PDF for the two-sided Kolmogorov-Smirnov statistic. + + x must be of type float, n of type integer. + """ + if np.isnan(n): + return n # Keep the same type of nan + if int(n) != n or n <= 0: + return np.nan + if x >= 1.0 or x <= 0: + return 0 + t = n * x + if t <= 1.0: + # Ruben-Gambino: n!/n^n * (2t-1)^n -> 2 n!/n^n * n^2 * (2t-1)^(n-1) + if t <= 0.5: + return 0.0 + if n <= 140: + prd = np.prod(np.arange(1, n) * (1.0 / n) * (2 * t - 1)) + else: + prd = np.exp(_log_nfactorial_div_n_pow_n(n) + (n-1) * np.log(2 * t - 1)) + return prd * 2 * n**2 + if t >= n - 1: + # Ruben-Gambino : 1-2(1-x)**n -> 2n*(1-x)**(n-1) + return 2 * (1.0 - x) ** (n-1) * n + if x >= 0.5: + return 2 * scipy.stats.ksone.pdf(x, n) + + # Just take a small delta. + # Ideally x +/- delta would stay within [i/n, (i+1)/n] for some integer a. + # as the CDF is a piecewise degree n polynomial. + # It has knots at 1/n, 2/n, ... (n-1)/n + # and is not a C-infinity function at the knots + delta = x / 2.0**16 + delta = min(delta, x - 1.0/n) + delta = min(delta, 0.5 - x) + + def _kk(_x): + return kolmogn(n, _x) + + return _derivative(_kk, x, dx=delta, order=5) + + +def _kolmogni(n, p, q): + """Computes the PPF/ISF of kolmogn. + + n of type integer, n>= 1 + p is the CDF, q the SF, p+q=1 + """ + if np.isnan(n): + return n # Keep the same type of nan + if int(n) != n or n <= 0: + return np.nan + if p <= 0: + return 1.0/n + if q <= 0: + return 1.0 + delta = np.exp((np.log(p) - scipy.special.loggamma(n+1))/n) + if delta <= 1.0/n: + return (delta + 1.0 / n) / 2 + x = -np.expm1(np.log(q/2.0)/n) + if x >= 1 - 1.0/n: + return x + x1 = scu._kolmogci(p)/np.sqrt(n) + x1 = min(x1, 1.0 - 1.0/n) + + def _f(x): + return _kolmogn(n, x) - p + + return scipy.optimize.brentq(_f, 1.0/n, x1, xtol=1e-14) + + +def kolmogn(n, x, cdf=True): + """Computes the CDF for the two-sided Kolmogorov-Smirnov distribution. + + The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x), + for a sample of size n drawn from a distribution with CDF F(t), where + :math:`D_n &= sup_t |F_n(t) - F(t)|`, and + :math:`F_n(t)` is the Empirical Cumulative Distribution Function of the sample. + + Parameters + ---------- + n : integer, array_like + the number of samples + x : float, array_like + The K-S statistic, float between 0 and 1 + cdf : bool, optional + whether to compute the CDF(default=true) or the SF. + + Returns + ------- + cdf : ndarray + CDF (or SF it cdf is False) at the specified locations. + + The return value has shape the result of numpy broadcasting n and x. + """ + it = np.nditer([n, x, cdf, None], + op_dtypes=[None, np.float64, np.bool_, np.float64]) + for _n, _x, _cdf, z in it: + if np.isnan(_n): + z[...] = _n + continue + if int(_n) != _n: + raise ValueError(f'n is not integral: {_n}') + z[...] = _kolmogn(int(_n), _x, cdf=_cdf) + result = it.operands[-1] + return result + + +def kolmognp(n, x): + """Computes the PDF for the two-sided Kolmogorov-Smirnov distribution. + + Parameters + ---------- + n : integer, array_like + the number of samples + x : float, array_like + The K-S statistic, float between 0 and 1 + + Returns + ------- + pdf : ndarray + The PDF at the specified locations + + The return value has shape the result of numpy broadcasting n and x. + """ + it = np.nditer([n, x, None]) + for _n, _x, z in it: + if np.isnan(_n): + z[...] = _n + continue + if int(_n) != _n: + raise ValueError(f'n is not integral: {_n}') + z[...] = _kolmogn_p(int(_n), _x) + result = it.operands[-1] + return result + + +def kolmogni(n, q, cdf=True): + """Computes the PPF(or ISF) for the two-sided Kolmogorov-Smirnov distribution. + + Parameters + ---------- + n : integer, array_like + the number of samples + q : float, array_like + Probabilities, float between 0 and 1 + cdf : bool, optional + whether to compute the PPF(default=true) or the ISF. + + Returns + ------- + ppf : ndarray + PPF (or ISF if cdf is False) at the specified locations + + The return value has shape the result of numpy broadcasting n and x. + """ + it = np.nditer([n, q, cdf, None]) + for _n, _q, _cdf, z in it: + if np.isnan(_n): + z[...] = _n + continue + if int(_n) != _n: + raise ValueError(f'n is not integral: {_n}') + _pcdf, _psf = (_q, 1-_q) if _cdf else (1-_q, _q) + z[...] = _kolmogni(int(_n), _pcdf, _psf) + result = it.operands[-1] + return result diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py b/parrot/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py new file mode 100644 index 0000000000000000000000000000000000000000..19b7ce3883bd8d78d42f5ebe93a79da24c24e21c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py @@ -0,0 +1,494 @@ +import numpy as np +from collections import namedtuple +from scipy import special +from scipy import stats +from scipy.stats._stats_py import _rankdata +from ._axis_nan_policy import _axis_nan_policy_factory + + +def _broadcast_concatenate(x, y, axis): + '''Broadcast then concatenate arrays, leaving concatenation axis last''' + x = np.moveaxis(x, axis, -1) + y = np.moveaxis(y, axis, -1) + z = np.broadcast(x[..., 0], y[..., 0]) + x = np.broadcast_to(x, z.shape + (x.shape[-1],)) + y = np.broadcast_to(y, z.shape + (y.shape[-1],)) + z = np.concatenate((x, y), axis=-1) + return x, y, z + + +class _MWU: + '''Distribution of MWU statistic under the null hypothesis''' + + def __init__(self, n1, n2): + self._reset(n1, n2) + + def set_shapes(self, n1, n2): + n1, n2 = min(n1, n2), max(n1, n2) + if (n1, n2) == (self.n1, self.n2): + return + + self.n1 = n1 + self.n2 = n2 + self.s_array = np.zeros(0, dtype=int) + self.configurations = np.zeros(0, dtype=np.uint64) + + def reset(self): + self._reset(self.n1, self.n2) + + def _reset(self, n1, n2): + self.n1 = None + self.n2 = None + self.set_shapes(n1, n2) + + def pmf(self, k): + + # In practice, `pmf` is never called with k > m*n/2. + # If it were, we'd exploit symmetry here: + # k = np.array(k, copy=True) + # k2 = m*n - k + # i = k2 < k + # k[i] = k2[i] + + pmfs = self.build_u_freqs_array(np.max(k)) + return pmfs[k] + + def cdf(self, k): + '''Cumulative distribution function''' + + # In practice, `cdf` is never called with k > m*n/2. + # If it were, we'd exploit symmetry here rather than in `sf` + pmfs = self.build_u_freqs_array(np.max(k)) + cdfs = np.cumsum(pmfs) + return cdfs[k] + + def sf(self, k): + '''Survival function''' + # Note that both CDF and SF include the PMF at k. The p-value is + # calculated from the SF and should include the mass at k, so this + # is desirable + + # Use the fact that the distribution is symmetric and sum from the left + kc = np.asarray(self.n1*self.n2 - k) # complement of k + i = k < kc + if np.any(i): + kc[i] = k[i] + cdfs = np.asarray(self.cdf(kc)) + cdfs[i] = 1. - cdfs[i] + self.pmf(kc[i]) + else: + cdfs = np.asarray(self.cdf(kc)) + return cdfs[()] + + # build_sigma_array and build_u_freqs_array adapted from code + # by @toobaz with permission. Thanks to @andreasloe for the suggestion. + # See https://github.com/scipy/scipy/pull/4933#issuecomment-1898082691 + def build_sigma_array(self, a): + n1, n2 = self.n1, self.n2 + if a + 1 <= self.s_array.size: + return self.s_array[1:a+1] + + s_array = np.zeros(a + 1, dtype=int) + + for d in np.arange(1, n1 + 1): + # All multiples of d, except 0: + indices = np.arange(d, a + 1, d) + # \epsilon_d = 1: + s_array[indices] += d + + for d in np.arange(n2 + 1, n2 + n1 + 1): + # All multiples of d, except 0: + indices = np.arange(d, a + 1, d) + # \epsilon_d = -1: + s_array[indices] -= d + + # We don't need 0: + self.s_array = s_array + return s_array[1:] + + def build_u_freqs_array(self, maxu): + """ + Build all the array of frequencies for u from 0 to maxu. + Assumptions: + n1 <= n2 + maxu <= n1 * n2 / 2 + """ + n1, n2 = self.n1, self.n2 + total = special.binom(n1 + n2, n1) + + if maxu + 1 <= self.configurations.size: + return self.configurations[:maxu + 1] / total + + s_array = self.build_sigma_array(maxu) + + # Start working with ints, for maximum precision and efficiency: + configurations = np.zeros(maxu + 1, dtype=np.uint64) + configurations_is_uint = True + uint_max = np.iinfo(np.uint64).max + # How many ways to have U=0? 1 + configurations[0] = 1 + + for u in np.arange(1, maxu + 1): + coeffs = s_array[u - 1::-1] + new_val = np.dot(configurations[:u], coeffs) / u + if new_val > uint_max and configurations_is_uint: + # OK, we got into numbers too big for uint64. + # So now we start working with floats. + # By doing this since the beginning, we would have lost precision. + # (And working on python long ints would be unbearably slow) + configurations = configurations.astype(float) + configurations_is_uint = False + configurations[u] = new_val + + self.configurations = configurations + return configurations / total + + +_mwu_state = _MWU(0, 0) + + +def _get_mwu_z(U, n1, n2, t, axis=0, continuity=True): + '''Standardized MWU statistic''' + # Follows mannwhitneyu [2] + mu = n1 * n2 / 2 + n = n1 + n2 + + # Tie correction according to [2], "Normal approximation and tie correction" + # "A more computationally-efficient form..." + tie_term = (t**3 - t).sum(axis=-1) + s = np.sqrt(n1*n2/12 * ((n + 1) - tie_term/(n*(n-1)))) + + numerator = U - mu + + # Continuity correction. + # Because SF is always used to calculate the p-value, we can always + # _subtract_ 0.5 for the continuity correction. This always increases the + # p-value to account for the rest of the probability mass _at_ q = U. + if continuity: + numerator -= 0.5 + + # no problem evaluating the norm SF at an infinity + with np.errstate(divide='ignore', invalid='ignore'): + z = numerator / s + return z + + +def _mwu_input_validation(x, y, use_continuity, alternative, axis, method): + ''' Input validation and standardization for mannwhitneyu ''' + # Would use np.asarray_chkfinite, but infs are OK + x, y = np.atleast_1d(x), np.atleast_1d(y) + if np.isnan(x).any() or np.isnan(y).any(): + raise ValueError('`x` and `y` must not contain NaNs.') + if np.size(x) == 0 or np.size(y) == 0: + raise ValueError('`x` and `y` must be of nonzero size.') + + bools = {True, False} + if use_continuity not in bools: + raise ValueError(f'`use_continuity` must be one of {bools}.') + + alternatives = {"two-sided", "less", "greater"} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f'`alternative` must be one of {alternatives}.') + + axis_int = int(axis) + if axis != axis_int: + raise ValueError('`axis` must be an integer.') + + if not isinstance(method, stats.PermutationMethod): + methods = {"asymptotic", "exact", "auto"} + method = method.lower() + if method not in methods: + raise ValueError(f'`method` must be one of {methods}.') + + return x, y, use_continuity, alternative, axis_int, method + + +def _mwu_choose_method(n1, n2, ties): + """Choose method 'asymptotic' or 'exact' depending on input size, ties""" + + # if both inputs are large, asymptotic is OK + if n1 > 8 and n2 > 8: + return "asymptotic" + + # if there are any ties, asymptotic is preferred + if ties: + return "asymptotic" + + return "exact" + + +MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(MannwhitneyuResult, n_samples=2) +def mannwhitneyu(x, y, use_continuity=True, alternative="two-sided", + axis=0, method="auto"): + r'''Perform the Mann-Whitney U rank test on two independent samples. + + The Mann-Whitney U test is a nonparametric test of the null hypothesis + that the distribution underlying sample `x` is the same as the + distribution underlying sample `y`. It is often used as a test of + difference in location between distributions. + + Parameters + ---------- + x, y : array-like + N-d arrays of samples. The arrays must be broadcastable except along + the dimension given by `axis`. + use_continuity : bool, optional + Whether a continuity correction (1/2) should be applied. + Default is True when `method` is ``'asymptotic'``; has no effect + otherwise. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + Let *F(u)* and *G(u)* be the cumulative distribution functions of the + distributions underlying `x` and `y`, respectively. Then the following + alternative hypotheses are available: + + * 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for + at least one *u*. + * 'less': the distribution underlying `x` is stochastically less + than the distribution underlying `y`, i.e. *F(u) > G(u)* for all *u*. + * 'greater': the distribution underlying `x` is stochastically greater + than the distribution underlying `y`, i.e. *F(u) < G(u)* for all *u*. + + Note that the mathematical expressions in the alternative hypotheses + above describe the CDFs of the underlying distributions. The directions + of the inequalities appear inconsistent with the natural language + description at first glance, but they are not. For example, suppose + *X* and *Y* are random variables that follow distributions with CDFs + *F* and *G*, respectively. If *F(u) > G(u)* for all *u*, samples drawn + from *X* tend to be less than those drawn from *Y*. + + Under a more restrictive set of assumptions, the alternative hypotheses + can be expressed in terms of the locations of the distributions; + see [5] section 5.1. + axis : int, optional + Axis along which to perform the test. Default is 0. + method : {'auto', 'asymptotic', 'exact'} or `PermutationMethod` instance, optional + Selects the method used to calculate the *p*-value. + Default is 'auto'. The following options are available. + + * ``'asymptotic'``: compares the standardized test statistic + against the normal distribution, correcting for ties. + * ``'exact'``: computes the exact *p*-value by comparing the observed + :math:`U` statistic against the exact distribution of the :math:`U` + statistic under the null hypothesis. No correction is made for ties. + * ``'auto'``: chooses ``'exact'`` when the size of one of the samples + is less than or equal to 8 and there are no ties; + chooses ``'asymptotic'`` otherwise. + * `PermutationMethod` instance. In this case, the p-value + is computed using `permutation_test` with the provided + configuration options and other appropriate settings. + + Returns + ------- + res : MannwhitneyuResult + An object containing attributes: + + statistic : float + The Mann-Whitney U statistic corresponding with sample `x`. See + Notes for the test statistic corresponding with sample `y`. + pvalue : float + The associated *p*-value for the chosen `alternative`. + + Notes + ----- + If ``U1`` is the statistic corresponding with sample `x`, then the + statistic corresponding with sample `y` is + ``U2 = x.shape[axis] * y.shape[axis] - U1``. + + `mannwhitneyu` is for independent samples. For related / paired samples, + consider `scipy.stats.wilcoxon`. + + `method` ``'exact'`` is recommended when there are no ties and when either + sample size is less than 8 [1]_. The implementation follows the algorithm + reported in [3]_. + Note that the exact method is *not* corrected for ties, but + `mannwhitneyu` will not raise errors or warnings if there are ties in the + data. If there are ties and either samples is small (fewer than ~10 + observations), consider passing an instance of `PermutationMethod` + as the `method` to perform a permutation test. + + The Mann-Whitney U test is a non-parametric version of the t-test for + independent samples. When the means of samples from the populations + are normally distributed, consider `scipy.stats.ttest_ind`. + + See Also + -------- + scipy.stats.wilcoxon, scipy.stats.ranksums, scipy.stats.ttest_ind + + References + ---------- + .. [1] H.B. Mann and D.R. Whitney, "On a test of whether one of two random + variables is stochastically larger than the other", The Annals of + Mathematical Statistics, Vol. 18, pp. 50-60, 1947. + .. [2] Mann-Whitney U Test, Wikipedia, + http://en.wikipedia.org/wiki/Mann-Whitney_U_test + .. [3] Andreas Löffler, + "Über eine Partition der nat. Zahlen und ihr Anwendung beim U-Test", + Wiss. Z. Univ. Halle, XXXII'83 pp. 87-89. + .. [4] Rosie Shier, "Statistics: 2.3 The Mann-Whitney U Test", Mathematics + Learning Support Centre, 2004. + .. [5] Michael P. Fay and Michael A. Proschan. "Wilcoxon-Mann-Whitney + or t-test? On assumptions for hypothesis tests and multiple \ + interpretations of decision rules." Statistics surveys, Vol. 4, pp. + 1-39, 2010. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2857732/ + + Examples + -------- + We follow the example from [4]_: nine randomly sampled young adults were + diagnosed with type II diabetes at the ages below. + + >>> males = [19, 22, 16, 29, 24] + >>> females = [20, 11, 17, 12] + + We use the Mann-Whitney U test to assess whether there is a statistically + significant difference in the diagnosis age of males and females. + The null hypothesis is that the distribution of male diagnosis ages is + the same as the distribution of female diagnosis ages. We decide + that a confidence level of 95% is required to reject the null hypothesis + in favor of the alternative that the distributions are different. + Since the number of samples is very small and there are no ties in the + data, we can compare the observed test statistic against the *exact* + distribution of the test statistic under the null hypothesis. + + >>> from scipy.stats import mannwhitneyu + >>> U1, p = mannwhitneyu(males, females, method="exact") + >>> print(U1) + 17.0 + + `mannwhitneyu` always reports the statistic associated with the first + sample, which, in this case, is males. This agrees with :math:`U_M = 17` + reported in [4]_. The statistic associated with the second statistic + can be calculated: + + >>> nx, ny = len(males), len(females) + >>> U2 = nx*ny - U1 + >>> print(U2) + 3.0 + + This agrees with :math:`U_F = 3` reported in [4]_. The two-sided + *p*-value can be calculated from either statistic, and the value produced + by `mannwhitneyu` agrees with :math:`p = 0.11` reported in [4]_. + + >>> print(p) + 0.1111111111111111 + + The exact distribution of the test statistic is asymptotically normal, so + the example continues by comparing the exact *p*-value against the + *p*-value produced using the normal approximation. + + >>> _, pnorm = mannwhitneyu(males, females, method="asymptotic") + >>> print(pnorm) + 0.11134688653314041 + + Here `mannwhitneyu`'s reported *p*-value appears to conflict with the + value :math:`p = 0.09` given in [4]_. The reason is that [4]_ + does not apply the continuity correction performed by `mannwhitneyu`; + `mannwhitneyu` reduces the distance between the test statistic and the + mean :math:`\mu = n_x n_y / 2` by 0.5 to correct for the fact that the + discrete statistic is being compared against a continuous distribution. + Here, the :math:`U` statistic used is less than the mean, so we reduce + the distance by adding 0.5 in the numerator. + + >>> import numpy as np + >>> from scipy.stats import norm + >>> U = min(U1, U2) + >>> N = nx + ny + >>> z = (U - nx*ny/2 + 0.5) / np.sqrt(nx*ny * (N + 1)/ 12) + >>> p = 2 * norm.cdf(z) # use CDF to get p-value from smaller statistic + >>> print(p) + 0.11134688653314041 + + If desired, we can disable the continuity correction to get a result + that agrees with that reported in [4]_. + + >>> _, pnorm = mannwhitneyu(males, females, use_continuity=False, + ... method="asymptotic") + >>> print(pnorm) + 0.0864107329737 + + Regardless of whether we perform an exact or asymptotic test, the + probability of the test statistic being as extreme or more extreme by + chance exceeds 5%, so we do not consider the results statistically + significant. + + Suppose that, before seeing the data, we had hypothesized that females + would tend to be diagnosed at a younger age than males. + In that case, it would be natural to provide the female ages as the + first input, and we would have performed a one-sided test using + ``alternative = 'less'``: females are diagnosed at an age that is + stochastically less than that of males. + + >>> res = mannwhitneyu(females, males, alternative="less", method="exact") + >>> print(res) + MannwhitneyuResult(statistic=3.0, pvalue=0.05555555555555555) + + Again, the probability of getting a sufficiently low value of the + test statistic by chance under the null hypothesis is greater than 5%, + so we do not reject the null hypothesis in favor of our alternative. + + If it is reasonable to assume that the means of samples from the + populations are normally distributed, we could have used a t-test to + perform the analysis. + + >>> from scipy.stats import ttest_ind + >>> res = ttest_ind(females, males, alternative="less") + >>> print(res) + TtestResult(statistic=-2.239334696520584, + pvalue=0.030068441095757924, + df=7.0) + + Under this assumption, the *p*-value would be low enough to reject the + null hypothesis in favor of the alternative. + + ''' + + x, y, use_continuity, alternative, axis_int, method = ( + _mwu_input_validation(x, y, use_continuity, alternative, axis, method)) + + x, y, xy = _broadcast_concatenate(x, y, axis) + + n1, n2 = x.shape[-1], y.shape[-1] + + # Follows [2] + ranks, t = _rankdata(xy, 'average', return_ties=True) # method 2, step 1 + R1 = ranks[..., :n1].sum(axis=-1) # method 2, step 2 + U1 = R1 - n1*(n1+1)/2 # method 2, step 3 + U2 = n1 * n2 - U1 # as U1 + U2 = n1 * n2 + + if alternative == "greater": + U, f = U1, 1 # U is the statistic to use for p-value, f is a factor + elif alternative == "less": + U, f = U2, 1 # Due to symmetry, use SF of U2 rather than CDF of U1 + else: + U, f = np.maximum(U1, U2), 2 # multiply SF by two for two-sided test + + if method == "auto": + method = _mwu_choose_method(n1, n2, np.any(t > 1)) + + if method == "exact": + _mwu_state.set_shapes(n1, n2) + p = _mwu_state.sf(U.astype(int)) + elif method == "asymptotic": + z = _get_mwu_z(U, n1, n2, t, continuity=use_continuity) + p = stats.norm.sf(z) + else: # `PermutationMethod` instance (already validated) + def statistic(x, y, axis): + return mannwhitneyu(x, y, use_continuity=use_continuity, + alternative=alternative, axis=axis, + method="asymptotic").statistic + + res = stats.permutation_test((x, y), statistic, axis=axis, + **method._asdict(), alternative=alternative) + p = res.pvalue + f = 1 + + p *= f + + # Ensure that test statistic is not greater than 1 + # This could happen for exact test when U = m*n/2 + p = np.clip(p, 0, 1) + + return MannwhitneyuResult(U1, p) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_morestats.py b/parrot/lib/python3.10/site-packages/scipy/stats/_morestats.py new file mode 100644 index 0000000000000000000000000000000000000000..19f24929bb477a3174afd51c2c71075b86e3684d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_morestats.py @@ -0,0 +1,4997 @@ +from __future__ import annotations +import math +import warnings +from collections import namedtuple + +import numpy as np +from numpy import (isscalar, r_, log, around, unique, asarray, zeros, + arange, sort, amin, amax, sqrt, array, + pi, exp, ravel, count_nonzero) + +from scipy import optimize, special, interpolate, stats +from scipy._lib._bunch import _make_tuple_bunch +from scipy._lib._util import _rename_parameter, _contains_nan, _get_nan +from scipy._lib._array_api import (array_namespace, xp_minimum, size as xp_size, + xp_moveaxis_to_end) + +from ._ansari_swilk_statistics import gscale, swilk +from . import _stats_py, _wilcoxon +from ._fit import FitResult +from ._stats_py import (find_repeats, _get_pvalue, SignificanceResult, # noqa:F401 + _SimpleNormal, _SimpleChi2) +from .contingency import chi2_contingency +from . import distributions +from ._distn_infrastructure import rv_generic +from ._axis_nan_policy import _axis_nan_policy_factory, _broadcast_arrays + + +__all__ = ['mvsdist', + 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', + 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', + 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', + 'fligner', 'mood', 'wilcoxon', 'median_test', + 'circmean', 'circvar', 'circstd', 'anderson_ksamp', + 'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax', + 'yeojohnson_normplot', 'directional_stats', + 'false_discovery_control' + ] + + +Mean = namedtuple('Mean', ('statistic', 'minmax')) +Variance = namedtuple('Variance', ('statistic', 'minmax')) +Std_dev = namedtuple('Std_dev', ('statistic', 'minmax')) + + +def bayes_mvs(data, alpha=0.90): + r""" + Bayesian confidence intervals for the mean, var, and std. + + Parameters + ---------- + data : array_like + Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. + Requires 2 or more data points. + alpha : float, optional + Probability that the returned confidence interval contains + the true parameter. + + Returns + ------- + mean_cntr, var_cntr, std_cntr : tuple + The three results are for the mean, variance and standard deviation, + respectively. Each result is a tuple of the form:: + + (center, (lower, upper)) + + with `center` the mean of the conditional pdf of the value given the + data, and `(lower, upper)` a confidence interval, centered on the + median, containing the estimate to a probability ``alpha``. + + See Also + -------- + mvsdist + + Notes + ----- + Each tuple of mean, variance, and standard deviation estimates represent + the (center, (lower, upper)) with center the mean of the conditional pdf + of the value given the data and (lower, upper) is a confidence interval + centered on the median, containing the estimate to a probability + ``alpha``. + + Converts data to 1-D and assumes all data has the same mean and variance. + Uses Jeffrey's prior for variance and std. + + Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` + + References + ---------- + T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and + standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278, + 2006. + + Examples + -------- + First a basic example to demonstrate the outputs: + + >>> from scipy import stats + >>> data = [6, 9, 12, 7, 8, 8, 13] + >>> mean, var, std = stats.bayes_mvs(data) + >>> mean + Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467)) + >>> var + Variance(statistic=10.0, minmax=(3.176724206, 24.45910382)) + >>> std + Std_dev(statistic=2.9724954732045084, + minmax=(1.7823367265645143, 4.945614605014631)) + + Now we generate some normally distributed random data, and get estimates of + mean and standard deviation with 95% confidence intervals for those + estimates: + + >>> n_samples = 100000 + >>> data = stats.norm.rvs(size=n_samples) + >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95) + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.hist(data, bins=100, density=True, label='Histogram of data') + >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean') + >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r', + ... alpha=0.2, label=r'Estimated mean (95% limits)') + >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale') + >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2, + ... label=r'Estimated scale (95% limits)') + + >>> ax.legend(fontsize=10) + >>> ax.set_xlim([-4, 4]) + >>> ax.set_ylim([0, 0.5]) + >>> plt.show() + + """ + m, v, s = mvsdist(data) + if alpha >= 1 or alpha <= 0: + raise ValueError(f"0 < alpha < 1 is required, but {alpha=} was given.") + + m_res = Mean(m.mean(), m.interval(alpha)) + v_res = Variance(v.mean(), v.interval(alpha)) + s_res = Std_dev(s.mean(), s.interval(alpha)) + + return m_res, v_res, s_res + + +def mvsdist(data): + """ + 'Frozen' distributions for mean, variance, and standard deviation of data. + + Parameters + ---------- + data : array_like + Input array. Converted to 1-D using ravel. + Requires 2 or more data-points. + + Returns + ------- + mdist : "frozen" distribution object + Distribution object representing the mean of the data. + vdist : "frozen" distribution object + Distribution object representing the variance of the data. + sdist : "frozen" distribution object + Distribution object representing the standard deviation of the data. + + See Also + -------- + bayes_mvs + + Notes + ----- + The return values from ``bayes_mvs(data)`` is equivalent to + ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. + + In other words, calling ``.mean()`` and ``.interval(0.90)`` + on the three distribution objects returned from this function will give + the same results that are returned from `bayes_mvs`. + + References + ---------- + T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and + standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278, + 2006. + + Examples + -------- + >>> from scipy import stats + >>> data = [6, 9, 12, 7, 8, 8, 13] + >>> mean, var, std = stats.mvsdist(data) + + We now have frozen distribution objects "mean", "var" and "std" that we can + examine: + + >>> mean.mean() + 9.0 + >>> mean.interval(0.95) + (6.6120585482655692, 11.387941451734431) + >>> mean.std() + 1.1952286093343936 + + """ + x = ravel(data) + n = len(x) + if n < 2: + raise ValueError("Need at least 2 data-points.") + xbar = x.mean() + C = x.var() + if n > 1000: # gaussian approximations for large n + mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n)) + sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n))) + vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C) + else: + nm1 = n - 1 + fac = n * C / 2. + val = nm1 / 2. + mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1)) + sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac)) + vdist = distributions.invgamma(val, scale=fac) + return mdist, vdist, sdist + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None +) +def kstat(data, n=2, *, axis=None): + r""" + Return the `n` th k-statistic ( ``1<=n<=4`` so far). + + The `n` th k-statistic ``k_n`` is the unique symmetric unbiased estimator of the + `n` th cumulant :math:`\kappa_n` [1]_ [2]_. + + Parameters + ---------- + data : array_like + Input array. + n : int, {1, 2, 3, 4}, optional + Default is equal to 2. + axis : int or None, default: None + If an int, the axis of the input along which to compute the statistic. + The statistic of each axis-slice (e.g. row) of the input will appear + in a corresponding element of the output. If ``None``, the input will + be raveled before computing the statistic. + + Returns + ------- + kstat : float + The `n` th k-statistic. + + See Also + -------- + kstatvar : Returns an unbiased estimator of the variance of the k-statistic + moment : Returns the n-th central moment about the mean for a sample. + + Notes + ----- + For a sample size :math:`n`, the first few k-statistics are given by + + .. math:: + + k_1 &= \frac{S_1}{n}, \\ + k_2 &= \frac{nS_2 - S_1^2}{n(n-1)}, \\ + k_3 &= \frac{2S_1^3 - 3nS_1S_2 + n^2S_3}{n(n-1)(n-2)}, \\ + k_4 &= \frac{-6S_1^4 + 12nS_1^2S_2 - 3n(n-1)S_2^2 - 4n(n+1)S_1S_3 + + n^2(n+1)S_4}{n (n-1)(n-2)(n-3)}, + + where + + .. math:: + + S_r \equiv \sum_{i=1}^n X_i^r, + + and :math:`X_i` is the :math:`i` th data point. + + References + ---------- + .. [1] http://mathworld.wolfram.com/k-Statistic.html + + .. [2] http://mathworld.wolfram.com/Cumulant.html + + Examples + -------- + >>> from scipy import stats + >>> from numpy.random import default_rng + >>> rng = default_rng() + + As sample size increases, `n`-th moment and `n`-th k-statistic converge to the + same number (although they aren't identical). In the case of the normal + distribution, they converge to zero. + + >>> for i in range(2,8): + ... x = rng.normal(size=10**i) + ... m, k = stats.moment(x, 3), stats.kstat(x, 3) + ... print(f"{i=}: {m=:.3g}, {k=:.3g}, {(m-k)=:.3g}") + i=2: m=-0.631, k=-0.651, (m-k)=0.0194 # random + i=3: m=0.0282, k=0.0283, (m-k)=-8.49e-05 + i=4: m=-0.0454, k=-0.0454, (m-k)=1.36e-05 + i=6: m=7.53e-05, k=7.53e-05, (m-k)=-2.26e-09 + i=7: m=0.00166, k=0.00166, (m-k)=-4.99e-09 + i=8: m=-2.88e-06 k=-2.88e-06, (m-k)=8.63e-13 + """ + xp = array_namespace(data) + data = xp.asarray(data) + if n > 4 or n < 1: + raise ValueError("k-statistics only supported for 1<=n<=4") + n = int(n) + if axis is None: + data = xp.reshape(data, (-1,)) + axis = 0 + + N = data.shape[axis] + + S = [None] + [xp.sum(data**k, axis=axis) for k in range(1, n + 1)] + if n == 1: + return S[1] * 1.0/N + elif n == 2: + return (N*S[2] - S[1]**2.0) / (N*(N - 1.0)) + elif n == 3: + return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0)) + elif n == 4: + return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - + 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / + (N*(N-1.0)*(N-2.0)*(N-3.0))) + else: + raise ValueError("Should not be here.") + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None +) +def kstatvar(data, n=2, *, axis=None): + r"""Return an unbiased estimator of the variance of the k-statistic. + + See `kstat` and [1]_ for more details about the k-statistic. + + Parameters + ---------- + data : array_like + Input array. + n : int, {1, 2}, optional + Default is equal to 2. + axis : int or None, default: None + If an int, the axis of the input along which to compute the statistic. + The statistic of each axis-slice (e.g. row) of the input will appear + in a corresponding element of the output. If ``None``, the input will + be raveled before computing the statistic. + + Returns + ------- + kstatvar : float + The `n` th k-statistic variance. + + See Also + -------- + kstat : Returns the n-th k-statistic. + moment : Returns the n-th central moment about the mean for a sample. + + Notes + ----- + Unbiased estimators of the variances of the first two k-statistics are given by + + .. math:: + + \mathrm{var}(k_1) &= \frac{k_2}{n}, \\ + \mathrm{var}(k_2) &= \frac{2k_2^2n + (n-1)k_4}{n(n - 1)}. + + References + ---------- + .. [1] http://mathworld.wolfram.com/k-Statistic.html + + """ # noqa: E501 + xp = array_namespace(data) + data = xp.asarray(data) + if axis is None: + data = xp.reshape(data, (-1,)) + axis = 0 + N = data.shape[axis] + + if n == 1: + return kstat(data, n=2, axis=axis, _no_deco=True) * 1.0/N + elif n == 2: + k2 = kstat(data, n=2, axis=axis, _no_deco=True) + k4 = kstat(data, n=4, axis=axis, _no_deco=True) + return (2*N*k2**2 + (N-1)*k4) / (N*(N+1)) + else: + raise ValueError("Only n=1 or n=2 supported.") + + +def _calc_uniform_order_statistic_medians(n): + """Approximations of uniform order statistic medians. + + Parameters + ---------- + n : int + Sample size. + + Returns + ------- + v : 1d float array + Approximations of the order statistic medians. + + References + ---------- + .. [1] James J. Filliben, "The Probability Plot Correlation Coefficient + Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. + + Examples + -------- + Order statistics of the uniform distribution on the unit interval + are marginally distributed according to beta distributions. + The expectations of these order statistic are evenly spaced across + the interval, but the distributions are skewed in a way that + pushes the medians slightly towards the endpoints of the unit interval: + + >>> import numpy as np + >>> n = 4 + >>> k = np.arange(1, n+1) + >>> from scipy.stats import beta + >>> a = k + >>> b = n-k+1 + >>> beta.mean(a, b) + array([0.2, 0.4, 0.6, 0.8]) + >>> beta.median(a, b) + array([0.15910358, 0.38572757, 0.61427243, 0.84089642]) + + The Filliben approximation uses the exact medians of the smallest + and greatest order statistics, and the remaining medians are approximated + by points spread evenly across a sub-interval of the unit interval: + + >>> from scipy.stats._morestats import _calc_uniform_order_statistic_medians + >>> _calc_uniform_order_statistic_medians(n) + array([0.15910358, 0.38545246, 0.61454754, 0.84089642]) + + This plot shows the skewed distributions of the order statistics + of a sample of size four from a uniform distribution on the unit interval: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0.0, 1.0, num=50, endpoint=True) + >>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)] + >>> plt.figure() + >>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3]) + + """ + v = np.empty(n, dtype=np.float64) + v[-1] = 0.5**(1.0 / n) + v[0] = 1 - v[-1] + i = np.arange(2, n) + v[1:-1] = (i - 0.3175) / (n + 0.365) + return v + + +def _parse_dist_kw(dist, enforce_subclass=True): + """Parse `dist` keyword. + + Parameters + ---------- + dist : str or stats.distributions instance. + Several functions take `dist` as a keyword, hence this utility + function. + enforce_subclass : bool, optional + If True (default), `dist` needs to be a + `_distn_infrastructure.rv_generic` instance. + It can sometimes be useful to set this keyword to False, if a function + wants to accept objects that just look somewhat like such an instance + (for example, they have a ``ppf`` method). + + """ + if isinstance(dist, rv_generic): + pass + elif isinstance(dist, str): + try: + dist = getattr(distributions, dist) + except AttributeError as e: + raise ValueError(f"{dist} is not a valid distribution name") from e + elif enforce_subclass: + msg = ("`dist` should be a stats.distributions instance or a string " + "with the name of such a distribution.") + raise ValueError(msg) + + return dist + + +def _add_axis_labels_title(plot, xlabel, ylabel, title): + """Helper function to add axes labels and a title to stats plots.""" + try: + if hasattr(plot, 'set_title'): + # Matplotlib Axes instance or something that looks like it + plot.set_title(title) + plot.set_xlabel(xlabel) + plot.set_ylabel(ylabel) + else: + # matplotlib.pyplot module + plot.title(title) + plot.xlabel(xlabel) + plot.ylabel(ylabel) + except Exception: + # Not an MPL object or something that looks (enough) like it. + # Don't crash on adding labels or title + pass + + +def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False): + """ + Calculate quantiles for a probability plot, and optionally show the plot. + + Generates a probability plot of sample data against the quantiles of a + specified theoretical distribution (the normal distribution by default). + `probplot` optionally calculates a best-fit line for the data and plots the + results using Matplotlib or a given plot function. + + Parameters + ---------- + x : array_like + Sample/response data from which `probplot` creates the plot. + sparams : tuple, optional + Distribution-specific shape parameters (shape parameters plus location + and scale). + dist : str or stats.distributions instance, optional + Distribution or distribution function name. The default is 'norm' for a + normal probability plot. Objects that look enough like a + stats.distributions instance (i.e. they have a ``ppf`` method) are also + accepted. + fit : bool, optional + Fit a least-squares regression (best-fit) line to the sample data if + True (default). + plot : object, optional + If given, plots the quantiles. + If given and `fit` is True, also plots the least squares fit. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + rvalue : bool, optional + If `plot` is provided and `fit` is True, setting `rvalue` to True + includes the coefficient of determination on the plot. + Default is False. + + Returns + ------- + (osm, osr) : tuple of ndarrays + Tuple of theoretical quantiles (osm, or order statistic medians) and + ordered responses (osr). `osr` is simply sorted input `x`. + For details on how `osm` is calculated see the Notes section. + (slope, intercept, r) : tuple of floats, optional + Tuple containing the result of the least-squares fit, if that is + performed by `probplot`. `r` is the square root of the coefficient of + determination. If ``fit=False`` and ``plot=None``, this tuple is not + returned. + + Notes + ----- + Even if `plot` is given, the figure is not shown or saved by `probplot`; + ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after + calling `probplot`. + + `probplot` generates a probability plot, which should not be confused with + a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this + type, see ``statsmodels.api.ProbPlot``. + + The formula used for the theoretical quantiles (horizontal axis of the + probability plot) is Filliben's estimate:: + + quantiles = dist.ppf(val), for + + 0.5**(1/n), for i = n + val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1 + 1 - 0.5**(1/n), for i = 1 + + where ``i`` indicates the i-th ordered value and ``n`` is the total number + of values. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> nsample = 100 + >>> rng = np.random.default_rng() + + A t distribution with small degrees of freedom: + + >>> ax1 = plt.subplot(221) + >>> x = stats.t.rvs(3, size=nsample, random_state=rng) + >>> res = stats.probplot(x, plot=plt) + + A t distribution with larger degrees of freedom: + + >>> ax2 = plt.subplot(222) + >>> x = stats.t.rvs(25, size=nsample, random_state=rng) + >>> res = stats.probplot(x, plot=plt) + + A mixture of two normal distributions with broadcasting: + + >>> ax3 = plt.subplot(223) + >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], + ... size=(nsample//2,2), random_state=rng).ravel() + >>> res = stats.probplot(x, plot=plt) + + A standard normal distribution: + + >>> ax4 = plt.subplot(224) + >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample, random_state=rng) + >>> res = stats.probplot(x, plot=plt) + + Produce a new figure with a loggamma distribution, using the ``dist`` and + ``sparams`` keywords: + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> x = stats.loggamma.rvs(c=2.5, size=500, random_state=rng) + >>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax) + >>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5") + + Show the results with Matplotlib: + + >>> plt.show() + + """ + x = np.asarray(x) + if x.size == 0: + if fit: + return (x, x), (np.nan, np.nan, 0.0) + else: + return x, x + + osm_uniform = _calc_uniform_order_statistic_medians(len(x)) + dist = _parse_dist_kw(dist, enforce_subclass=False) + if sparams is None: + sparams = () + if isscalar(sparams): + sparams = (sparams,) + if not isinstance(sparams, tuple): + sparams = tuple(sparams) + + osm = dist.ppf(osm_uniform, *sparams) + osr = sort(x) + if fit: + # perform a linear least squares fit. + slope, intercept, r, prob, _ = _stats_py.linregress(osm, osr) + + if plot is not None: + plot.plot(osm, osr, 'bo') + if fit: + plot.plot(osm, slope*osm + intercept, 'r-') + _add_axis_labels_title(plot, xlabel='Theoretical quantiles', + ylabel='Ordered Values', + title='Probability Plot') + + # Add R^2 value to the plot as text + if fit and rvalue: + xmin = amin(osm) + xmax = amax(osm) + ymin = amin(x) + ymax = amax(x) + posx = xmin + 0.70 * (xmax - xmin) + posy = ymin + 0.01 * (ymax - ymin) + plot.text(posx, posy, "$R^2=%1.4f$" % r**2) + + if fit: + return (osm, osr), (slope, intercept, r) + else: + return osm, osr + + +def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'): + """Calculate the shape parameter that maximizes the PPCC. + + The probability plot correlation coefficient (PPCC) plot can be used + to determine the optimal shape parameter for a one-parameter family + of distributions. ``ppcc_max`` returns the shape parameter that would + maximize the probability plot correlation coefficient for the given + data to a one-parameter family of distributions. + + Parameters + ---------- + x : array_like + Input array. + brack : tuple, optional + Triple (a,b,c) where (a>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> c = 2.5 + >>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng) + + Generate the PPCC plot for this data with the Weibull distribution. + + >>> fig, ax = plt.subplots(figsize=(8, 6)) + >>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax) + + We calculate the value where the shape should reach its maximum and a + red line is drawn there. The line should coincide with the highest + point in the PPCC graph. + + >>> cmax = stats.ppcc_max(x, brack=(c/2, 2*c), dist='weibull_min') + >>> ax.axvline(cmax, color='r') + >>> plt.show() + + """ + dist = _parse_dist_kw(dist) + osm_uniform = _calc_uniform_order_statistic_medians(len(x)) + osr = sort(x) + + # this function computes the x-axis values of the probability plot + # and computes a linear regression (including the correlation) + # and returns 1-r so that a minimization function maximizes the + # correlation + def tempfunc(shape, mi, yvals, func): + xvals = func(mi, shape) + r, prob = _stats_py.pearsonr(xvals, yvals) + return 1 - r + + return optimize.brent(tempfunc, brack=brack, + args=(osm_uniform, osr, dist.ppf)) + + +def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80): + """Calculate and optionally plot probability plot correlation coefficient. + + The probability plot correlation coefficient (PPCC) plot can be used to + determine the optimal shape parameter for a one-parameter family of + distributions. It cannot be used for distributions without shape + parameters + (like the normal distribution) or with multiple shape parameters. + + By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A + Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed + distributions via an approximately normal one, and is therefore + particularly useful in practice. + + Parameters + ---------- + x : array_like + Input array. + a, b : scalar + Lower and upper bounds of the shape parameter to use. + dist : str or stats.distributions instance, optional + Distribution or distribution function name. Objects that look enough + like a stats.distributions instance (i.e. they have a ``ppf`` method) + are also accepted. The default is ``'tukeylambda'``. + plot : object, optional + If given, plots PPCC against the shape parameter. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + N : int, optional + Number of points on the horizontal axis (equally distributed from + `a` to `b`). + + Returns + ------- + svals : ndarray + The shape values for which `ppcc` was calculated. + ppcc : ndarray + The calculated probability plot correlation coefficient values. + + See Also + -------- + ppcc_max, probplot, boxcox_normplot, tukeylambda + + References + ---------- + J.J. Filliben, "The Probability Plot Correlation Coefficient Test for + Normality", Technometrics, Vol. 17, pp. 111-117, 1975. + + Examples + -------- + First we generate some random data from a Weibull distribution + with shape parameter 2.5, and plot the histogram of the data: + + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> c = 2.5 + >>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng) + + Take a look at the histogram of the data. + + >>> fig1, ax = plt.subplots(figsize=(9, 4)) + >>> ax.hist(x, bins=50) + >>> ax.set_title('Histogram of x') + >>> plt.show() + + Now we explore this data with a PPCC plot as well as the related + probability plot and Box-Cox normplot. A red line is drawn where we + expect the PPCC value to be maximal (at the shape parameter ``c`` + used above): + + >>> fig2 = plt.figure(figsize=(12, 4)) + >>> ax1 = fig2.add_subplot(1, 3, 1) + >>> ax2 = fig2.add_subplot(1, 3, 2) + >>> ax3 = fig2.add_subplot(1, 3, 3) + >>> res = stats.probplot(x, plot=ax1) + >>> res = stats.boxcox_normplot(x, -4, 4, plot=ax2) + >>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax3) + >>> ax3.axvline(c, color='r') + >>> plt.show() + + """ + if b <= a: + raise ValueError("`b` has to be larger than `a`.") + + svals = np.linspace(a, b, num=N) + ppcc = np.empty_like(svals) + for k, sval in enumerate(svals): + _, r2 = probplot(x, sval, dist=dist, fit=True) + ppcc[k] = r2[-1] + + if plot is not None: + plot.plot(svals, ppcc, 'x') + _add_axis_labels_title(plot, xlabel='Shape Values', + ylabel='Prob Plot Corr. Coef.', + title=f'({dist}) PPCC Plot') + + return svals, ppcc + + +def _log_mean(logx): + # compute log of mean of x from log(x) + return special.logsumexp(logx, axis=0) - np.log(len(logx)) + + +def _log_var(logx): + # compute log of variance of x from log(x) + logmean = _log_mean(logx) + pij = np.full_like(logx, np.pi * 1j, dtype=np.complex128) + logxmu = special.logsumexp([logx, logmean + pij], axis=0) + return np.real(special.logsumexp(2 * logxmu, axis=0)) - np.log(len(logx)) + + +def boxcox_llf(lmb, data): + r"""The boxcox log-likelihood function. + + Parameters + ---------- + lmb : scalar + Parameter for Box-Cox transformation. See `boxcox` for details. + data : array_like + Data to calculate Box-Cox log-likelihood for. If `data` is + multi-dimensional, the log-likelihood is calculated along the first + axis. + + Returns + ------- + llf : float or ndarray + Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`, + an array otherwise. + + See Also + -------- + boxcox, probplot, boxcox_normplot, boxcox_normmax + + Notes + ----- + The Box-Cox log-likelihood function is defined here as + + .. math:: + + llf = (\lambda - 1) \sum_i(\log(x_i)) - + N/2 \log(\sum_i (y_i - \bar{y})^2 / N), + + where ``y`` is the Box-Cox transformed input data ``x``. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes + + Generate some random variates and calculate Box-Cox log-likelihood values + for them for a range of ``lmbda`` values: + + >>> rng = np.random.default_rng() + >>> x = stats.loggamma.rvs(5, loc=10, size=1000, random_state=rng) + >>> lmbdas = np.linspace(-2, 10) + >>> llf = np.zeros(lmbdas.shape, dtype=float) + >>> for ii, lmbda in enumerate(lmbdas): + ... llf[ii] = stats.boxcox_llf(lmbda, x) + + Also find the optimal lmbda value with `boxcox`: + + >>> x_most_normal, lmbda_optimal = stats.boxcox(x) + + Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a + horizontal line to check that that's really the optimum: + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(lmbdas, llf, 'b.-') + >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r') + >>> ax.set_xlabel('lmbda parameter') + >>> ax.set_ylabel('Box-Cox log-likelihood') + + Now add some probability plots to show that where the log-likelihood is + maximized the data transformed with `boxcox` looks closest to normal: + + >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' + >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): + ... xt = stats.boxcox(x, lmbda=lmbda) + ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) + ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) + ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') + ... ax_inset.set_xticklabels([]) + ... ax_inset.set_yticklabels([]) + ... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda) + + >>> plt.show() + + """ + data = np.asarray(data) + N = data.shape[0] + if N == 0: + return np.nan + + logdata = np.log(data) + + # Compute the variance of the transformed data. + if lmb == 0: + logvar = np.log(np.var(logdata, axis=0)) + else: + # Transform without the constant offset 1/lmb. The offset does + # not affect the variance, and the subtraction of the offset can + # lead to loss of precision. + # Division by lmb can be factored out to enhance numerical stability. + logx = lmb * logdata + logvar = _log_var(logx) - 2 * np.log(abs(lmb)) + + return (lmb - 1) * np.sum(logdata, axis=0) - N/2 * logvar + + +def _boxcox_conf_interval(x, lmax, alpha): + # Need to find the lambda for which + # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 + fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1) + target = boxcox_llf(lmax, x) - fac + + def rootfunc(lmbda, data, target): + return boxcox_llf(lmbda, data) - target + + # Find positive endpoint of interval in which answer is to be found + newlm = lmax + 0.5 + N = 0 + while (rootfunc(newlm, x, target) > 0.0) and (N < 500): + newlm += 0.1 + N += 1 + + if N == 500: + raise RuntimeError("Could not find endpoint.") + + lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target)) + + # Now find negative interval in the same way + newlm = lmax - 0.5 + N = 0 + while (rootfunc(newlm, x, target) > 0.0) and (N < 500): + newlm -= 0.1 + N += 1 + + if N == 500: + raise RuntimeError("Could not find endpoint.") + + lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target)) + return lmminus, lmplus + + +def boxcox(x, lmbda=None, alpha=None, optimizer=None): + r"""Return a dataset transformed by a Box-Cox power transformation. + + Parameters + ---------- + x : ndarray + Input array to be transformed. + + If `lmbda` is not None, this is an alias of + `scipy.special.boxcox`. + Returns nan if ``x < 0``; returns -inf if ``x == 0 and lmbda < 0``. + + If `lmbda` is None, array must be positive, 1-dimensional, and + non-constant. + + lmbda : scalar, optional + If `lmbda` is None (default), find the value of `lmbda` that maximizes + the log-likelihood function and return it as the second output + argument. + + If `lmbda` is not None, do the transformation for that value. + + alpha : float, optional + If `lmbda` is None and `alpha` is not None (default), return the + ``100 * (1-alpha)%`` confidence interval for `lmbda` as the third + output argument. Must be between 0.0 and 1.0. + + If `lmbda` is not None, `alpha` is ignored. + optimizer : callable, optional + If `lmbda` is None, `optimizer` is the scalar optimizer used to find + the value of `lmbda` that minimizes the negative log-likelihood + function. `optimizer` is a callable that accepts one argument: + + fun : callable + The objective function, which evaluates the negative + log-likelihood function at a provided value of `lmbda` + + and returns an object, such as an instance of + `scipy.optimize.OptimizeResult`, which holds the optimal value of + `lmbda` in an attribute `x`. + + See the example in `boxcox_normmax` or the documentation of + `scipy.optimize.minimize_scalar` for more information. + + If `lmbda` is not None, `optimizer` is ignored. + + Returns + ------- + boxcox : ndarray + Box-Cox power transformed array. + maxlog : float, optional + If the `lmbda` parameter is None, the second returned argument is + the `lmbda` that maximizes the log-likelihood function. + (min_ci, max_ci) : tuple of float, optional + If `lmbda` parameter is None and `alpha` is not None, this returned + tuple of floats represents the minimum and maximum confidence limits + given `alpha`. + + See Also + -------- + probplot, boxcox_normplot, boxcox_normmax, boxcox_llf + + Notes + ----- + The Box-Cox transform is given by:: + + y = (x**lmbda - 1) / lmbda, for lmbda != 0 + log(x), for lmbda = 0 + + `boxcox` requires the input data to be positive. Sometimes a Box-Cox + transformation provides a shift parameter to achieve this; `boxcox` does + not. Such a shift parameter is equivalent to adding a positive constant to + `x` before calling `boxcox`. + + The confidence limits returned when `alpha` is provided give the interval + where: + + .. math:: + + llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1), + + with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared + function. + + References + ---------- + G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the + Royal Statistical Society B, 26, 211-252 (1964). + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + We generate some random variates from a non-normal distribution and make a + probability plot for it, to show it is non-normal in the tails: + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(211) + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) + >>> ax1.set_xlabel('') + >>> ax1.set_title('Probplot against normal distribution') + + We now use `boxcox` to transform the data so it's closest to normal: + + >>> ax2 = fig.add_subplot(212) + >>> xt, _ = stats.boxcox(x) + >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) + >>> ax2.set_title('Probplot after Box-Cox transformation') + + >>> plt.show() + + """ + x = np.asarray(x) + + if lmbda is not None: # single transformation + return special.boxcox(x, lmbda) + + if x.ndim != 1: + raise ValueError("Data must be 1-dimensional.") + + if x.size == 0: + return x + + if np.all(x == x[0]): + raise ValueError("Data must not be constant.") + + if np.any(x <= 0): + raise ValueError("Data must be positive.") + + # If lmbda=None, find the lmbda that maximizes the log-likelihood function. + lmax = boxcox_normmax(x, method='mle', optimizer=optimizer) + y = boxcox(x, lmax) + + if alpha is None: + return y, lmax + else: + # Find confidence interval + interval = _boxcox_conf_interval(x, lmax, alpha) + return y, lmax, interval + + +def _boxcox_inv_lmbda(x, y): + # compute lmbda given x and y for Box-Cox transformation + num = special.lambertw(-(x ** (-1 / y)) * np.log(x) / y, k=-1) + return np.real(-num / np.log(x) - 1 / y) + + +class _BigFloat: + def __repr__(self): + return "BIG_FLOAT" + + +def boxcox_normmax( + x, brack=None, method='pearsonr', optimizer=None, *, ymax=_BigFloat() +): + """Compute optimal Box-Cox transform parameter for input data. + + Parameters + ---------- + x : array_like + Input array. All entries must be positive, finite, real numbers. + brack : 2-tuple, optional, default (-2.0, 2.0) + The starting interval for a downhill bracket search for the default + `optimize.brent` solver. Note that this is in most cases not + critical; the final result is allowed to be outside this bracket. + If `optimizer` is passed, `brack` must be None. + method : str, optional + The method to determine the optimal transform parameter (`boxcox` + ``lmbda`` parameter). Options are: + + 'pearsonr' (default) + Maximizes the Pearson correlation coefficient between + ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be + normally-distributed. + + 'mle' + Maximizes the log-likelihood `boxcox_llf`. This is the method used + in `boxcox`. + + 'all' + Use all optimization methods available, and return all results. + Useful to compare different methods. + optimizer : callable, optional + `optimizer` is a callable that accepts one argument: + + fun : callable + The objective function to be minimized. `fun` accepts one argument, + the Box-Cox transform parameter `lmbda`, and returns the value of + the function (e.g., the negative log-likelihood) at the provided + argument. The job of `optimizer` is to find the value of `lmbda` + that *minimizes* `fun`. + + and returns an object, such as an instance of + `scipy.optimize.OptimizeResult`, which holds the optimal value of + `lmbda` in an attribute `x`. + + See the example below or the documentation of + `scipy.optimize.minimize_scalar` for more information. + ymax : float, optional + The unconstrained optimal transform parameter may cause Box-Cox + transformed data to have extreme magnitude or even overflow. + This parameter constrains MLE optimization such that the magnitude + of the transformed `x` does not exceed `ymax`. The default is + the maximum value of the input dtype. If set to infinity, + `boxcox_normmax` returns the unconstrained optimal lambda. + Ignored when ``method='pearsonr'``. + + Returns + ------- + maxlog : float or ndarray + The optimal transform parameter found. An array instead of a scalar + for ``method='all'``. + + See Also + -------- + boxcox, boxcox_llf, boxcox_normplot, scipy.optimize.minimize_scalar + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + We can generate some data and determine the optimal ``lmbda`` in various + ways: + + >>> rng = np.random.default_rng() + >>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5 + >>> y, lmax_mle = stats.boxcox(x) + >>> lmax_pearsonr = stats.boxcox_normmax(x) + + >>> lmax_mle + 2.217563431465757 + >>> lmax_pearsonr + 2.238318660200961 + >>> stats.boxcox_normmax(x, method='all') + array([2.23831866, 2.21756343]) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax) + >>> ax.axvline(lmax_mle, color='r') + >>> ax.axvline(lmax_pearsonr, color='g', ls='--') + + >>> plt.show() + + Alternatively, we can define our own `optimizer` function. Suppose we + are only interested in values of `lmbda` on the interval [6, 7], we + want to use `scipy.optimize.minimize_scalar` with ``method='bounded'``, + and we want to use tighter tolerances when optimizing the log-likelihood + function. To do this, we define a function that accepts positional argument + `fun` and uses `scipy.optimize.minimize_scalar` to minimize `fun` subject + to the provided bounds and tolerances: + + >>> from scipy import optimize + >>> options = {'xatol': 1e-12} # absolute tolerance on `x` + >>> def optimizer(fun): + ... return optimize.minimize_scalar(fun, bounds=(6, 7), + ... method="bounded", options=options) + >>> stats.boxcox_normmax(x, optimizer=optimizer) + 6.000000000 + """ + x = np.asarray(x) + + if not np.all(np.isfinite(x) & (x >= 0)): + message = ("The `x` argument of `boxcox_normmax` must contain " + "only positive, finite, real numbers.") + raise ValueError(message) + + end_msg = "exceed specified `ymax`." + if isinstance(ymax, _BigFloat): + dtype = x.dtype if np.issubdtype(x.dtype, np.floating) else np.float64 + # 10000 is a safety factor because `special.boxcox` overflows prematurely. + ymax = np.finfo(dtype).max / 10000 + end_msg = f"overflow in {dtype}." + elif ymax <= 0: + raise ValueError("`ymax` must be strictly positive") + + # If optimizer is not given, define default 'brent' optimizer. + if optimizer is None: + + # Set default value for `brack`. + if brack is None: + brack = (-2.0, 2.0) + + def _optimizer(func, args): + return optimize.brent(func, args=args, brack=brack) + + # Otherwise check optimizer. + else: + if not callable(optimizer): + raise ValueError("`optimizer` must be a callable") + + if brack is not None: + raise ValueError("`brack` must be None if `optimizer` is given") + + # `optimizer` is expected to return a `OptimizeResult` object, we here + # get the solution to the optimization problem. + def _optimizer(func, args): + def func_wrapped(x): + return func(x, *args) + return getattr(optimizer(func_wrapped), 'x', None) + + def _pearsonr(x): + osm_uniform = _calc_uniform_order_statistic_medians(len(x)) + xvals = distributions.norm.ppf(osm_uniform) + + def _eval_pearsonr(lmbda, xvals, samps): + # This function computes the x-axis values of the probability plot + # and computes a linear regression (including the correlation) and + # returns ``1 - r`` so that a minimization function maximizes the + # correlation. + y = boxcox(samps, lmbda) + yvals = np.sort(y) + r, prob = _stats_py.pearsonr(xvals, yvals) + return 1 - r + + return _optimizer(_eval_pearsonr, args=(xvals, x)) + + def _mle(x): + def _eval_mle(lmb, data): + # function to minimize + return -boxcox_llf(lmb, data) + + return _optimizer(_eval_mle, args=(x,)) + + def _all(x): + maxlog = np.empty(2, dtype=float) + maxlog[0] = _pearsonr(x) + maxlog[1] = _mle(x) + return maxlog + + methods = {'pearsonr': _pearsonr, + 'mle': _mle, + 'all': _all} + if method not in methods.keys(): + raise ValueError(f"Method {method} not recognized.") + + optimfunc = methods[method] + + res = optimfunc(x) + + if res is None: + message = ("The `optimizer` argument of `boxcox_normmax` must return " + "an object containing the optimal `lmbda` in attribute `x`.") + raise ValueError(message) + elif not np.isinf(ymax): # adjust the final lambda + # x > 1, boxcox(x) > 0; x < 1, boxcox(x) < 0 + xmax, xmin = np.max(x), np.min(x) + if xmin >= 1: + x_treme = xmax + elif xmax <= 1: + x_treme = xmin + else: # xmin < 1 < xmax + indicator = special.boxcox(xmax, res) > abs(special.boxcox(xmin, res)) + if isinstance(res, np.ndarray): + indicator = indicator[1] # select corresponds with 'mle' + x_treme = xmax if indicator else xmin + + mask = abs(special.boxcox(x_treme, res)) > ymax + if np.any(mask): + message = ( + f"The optimal lambda is {res}, but the returned lambda is the " + f"constrained optimum to ensure that the maximum or the minimum " + f"of the transformed data does not " + end_msg + ) + warnings.warn(message, stacklevel=2) + + # Return the constrained lambda to ensure the transformation + # does not cause overflow or exceed specified `ymax` + constrained_res = _boxcox_inv_lmbda(x_treme, ymax * np.sign(x_treme - 1)) + + if isinstance(res, np.ndarray): + res[mask] = constrained_res + else: + res = constrained_res + return res + + +def _normplot(method, x, la, lb, plot=None, N=80): + """Compute parameters for a Box-Cox or Yeo-Johnson normality plot, + optionally show it. + + See `boxcox_normplot` or `yeojohnson_normplot` for details. + """ + + if method == 'boxcox': + title = 'Box-Cox Normality Plot' + transform_func = boxcox + else: + title = 'Yeo-Johnson Normality Plot' + transform_func = yeojohnson + + x = np.asarray(x) + if x.size == 0: + return x + + if lb <= la: + raise ValueError("`lb` has to be larger than `la`.") + + if method == 'boxcox' and np.any(x <= 0): + raise ValueError("Data must be positive.") + + lmbdas = np.linspace(la, lb, num=N) + ppcc = lmbdas * 0.0 + for i, val in enumerate(lmbdas): + # Determine for each lmbda the square root of correlation coefficient + # of transformed x + z = transform_func(x, lmbda=val) + _, (_, _, r) = probplot(z, dist='norm', fit=True) + ppcc[i] = r + + if plot is not None: + plot.plot(lmbdas, ppcc, 'x') + _add_axis_labels_title(plot, xlabel='$\\lambda$', + ylabel='Prob Plot Corr. Coef.', + title=title) + + return lmbdas, ppcc + + +def boxcox_normplot(x, la, lb, plot=None, N=80): + """Compute parameters for a Box-Cox normality plot, optionally show it. + + A Box-Cox normality plot shows graphically what the best transformation + parameter is to use in `boxcox` to obtain a distribution that is close + to normal. + + Parameters + ---------- + x : array_like + Input array. + la, lb : scalar + The lower and upper bounds for the ``lmbda`` values to pass to `boxcox` + for Box-Cox transformations. These are also the limits of the + horizontal axis of the plot if that is generated. + plot : object, optional + If given, plots the quantiles and least squares fit. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + N : int, optional + Number of points on the horizontal axis (equally distributed from + `la` to `lb`). + + Returns + ------- + lmbdas : ndarray + The ``lmbda`` values for which a Box-Cox transform was done. + ppcc : ndarray + Probability Plot Correlelation Coefficient, as obtained from `probplot` + when fitting the Box-Cox transformed input `x` against a normal + distribution. + + See Also + -------- + probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max + + Notes + ----- + Even if `plot` is given, the figure is not shown or saved by + `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` + should be used after calling `probplot`. + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Generate some non-normally distributed data, and create a Box-Cox plot: + + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax) + + Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in + the same plot: + + >>> _, maxlog = stats.boxcox(x) + >>> ax.axvline(maxlog, color='r') + + >>> plt.show() + + """ + return _normplot('boxcox', x, la, lb, plot, N) + + +def yeojohnson(x, lmbda=None): + r"""Return a dataset transformed by a Yeo-Johnson power transformation. + + Parameters + ---------- + x : ndarray + Input array. Should be 1-dimensional. + lmbda : float, optional + If ``lmbda`` is ``None``, find the lambda that maximizes the + log-likelihood function and return it as the second output argument. + Otherwise the transformation is done for the given value. + + Returns + ------- + yeojohnson: ndarray + Yeo-Johnson power transformed array. + maxlog : float, optional + If the `lmbda` parameter is None, the second returned argument is + the lambda that maximizes the log-likelihood function. + + See Also + -------- + probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox + + Notes + ----- + The Yeo-Johnson transform is given by:: + + y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0 + log(x + 1), for x >= 0, lmbda = 0 + -((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2 + -log(-x + 1), for x < 0, lmbda = 2 + + Unlike `boxcox`, `yeojohnson` does not require the input data to be + positive. + + .. versionadded:: 1.2.0 + + + References + ---------- + I. Yeo and R.A. Johnson, "A New Family of Power Transformations to + Improve Normality or Symmetry", Biometrika 87.4 (2000): + + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + We generate some random variates from a non-normal distribution and make a + probability plot for it, to show it is non-normal in the tails: + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(211) + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) + >>> ax1.set_xlabel('') + >>> ax1.set_title('Probplot against normal distribution') + + We now use `yeojohnson` to transform the data so it's closest to normal: + + >>> ax2 = fig.add_subplot(212) + >>> xt, lmbda = stats.yeojohnson(x) + >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) + >>> ax2.set_title('Probplot after Yeo-Johnson transformation') + + >>> plt.show() + + """ + x = np.asarray(x) + if x.size == 0: + return x + + if np.issubdtype(x.dtype, np.complexfloating): + raise ValueError('Yeo-Johnson transformation is not defined for ' + 'complex numbers.') + + if np.issubdtype(x.dtype, np.integer): + x = x.astype(np.float64, copy=False) + + if lmbda is not None: + return _yeojohnson_transform(x, lmbda) + + # if lmbda=None, find the lmbda that maximizes the log-likelihood function. + lmax = yeojohnson_normmax(x) + y = _yeojohnson_transform(x, lmax) + + return y, lmax + + +def _yeojohnson_transform(x, lmbda): + """Returns `x` transformed by the Yeo-Johnson power transform with given + parameter `lmbda`. + """ + dtype = x.dtype if np.issubdtype(x.dtype, np.floating) else np.float64 + out = np.zeros_like(x, dtype=dtype) + pos = x >= 0 # binary mask + + # when x >= 0 + if abs(lmbda) < np.spacing(1.): + out[pos] = np.log1p(x[pos]) + else: # lmbda != 0 + # more stable version of: ((x + 1) ** lmbda - 1) / lmbda + out[pos] = np.expm1(lmbda * np.log1p(x[pos])) / lmbda + + # when x < 0 + if abs(lmbda - 2) > np.spacing(1.): + out[~pos] = -np.expm1((2 - lmbda) * np.log1p(-x[~pos])) / (2 - lmbda) + else: # lmbda == 2 + out[~pos] = -np.log1p(-x[~pos]) + + return out + + +def yeojohnson_llf(lmb, data): + r"""The yeojohnson log-likelihood function. + + Parameters + ---------- + lmb : scalar + Parameter for Yeo-Johnson transformation. See `yeojohnson` for + details. + data : array_like + Data to calculate Yeo-Johnson log-likelihood for. If `data` is + multi-dimensional, the log-likelihood is calculated along the first + axis. + + Returns + ------- + llf : float + Yeo-Johnson log-likelihood of `data` given `lmb`. + + See Also + -------- + yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax + + Notes + ----- + The Yeo-Johnson log-likelihood function is defined here as + + .. math:: + + llf = -N/2 \log(\hat{\sigma}^2) + (\lambda - 1) + \sum_i \text{ sign }(x_i)\log(|x_i| + 1) + + where :math:`\hat{\sigma}^2` is estimated variance of the Yeo-Johnson + transformed input data ``x``. + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes + + Generate some random variates and calculate Yeo-Johnson log-likelihood + values for them for a range of ``lmbda`` values: + + >>> x = stats.loggamma.rvs(5, loc=10, size=1000) + >>> lmbdas = np.linspace(-2, 10) + >>> llf = np.zeros(lmbdas.shape, dtype=float) + >>> for ii, lmbda in enumerate(lmbdas): + ... llf[ii] = stats.yeojohnson_llf(lmbda, x) + + Also find the optimal lmbda value with `yeojohnson`: + + >>> x_most_normal, lmbda_optimal = stats.yeojohnson(x) + + Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a + horizontal line to check that that's really the optimum: + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(lmbdas, llf, 'b.-') + >>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r') + >>> ax.set_xlabel('lmbda parameter') + >>> ax.set_ylabel('Yeo-Johnson log-likelihood') + + Now add some probability plots to show that where the log-likelihood is + maximized the data transformed with `yeojohnson` looks closest to normal: + + >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' + >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): + ... xt = stats.yeojohnson(x, lmbda=lmbda) + ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) + ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) + ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') + ... ax_inset.set_xticklabels([]) + ... ax_inset.set_yticklabels([]) + ... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda) + + >>> plt.show() + + """ + data = np.asarray(data) + n_samples = data.shape[0] + + if n_samples == 0: + return np.nan + + trans = _yeojohnson_transform(data, lmb) + trans_var = trans.var(axis=0) + loglike = np.empty_like(trans_var) + + # Avoid RuntimeWarning raised by np.log when the variance is too low + tiny_variance = trans_var < np.finfo(trans_var.dtype).tiny + loglike[tiny_variance] = np.inf + + loglike[~tiny_variance] = ( + -n_samples / 2 * np.log(trans_var[~tiny_variance])) + loglike[~tiny_variance] += ( + (lmb - 1) * (np.sign(data) * np.log1p(np.abs(data))).sum(axis=0)) + return loglike + + +def yeojohnson_normmax(x, brack=None): + """Compute optimal Yeo-Johnson transform parameter. + + Compute optimal Yeo-Johnson transform parameter for input data, using + maximum likelihood estimation. + + Parameters + ---------- + x : array_like + Input array. + brack : 2-tuple, optional + The starting interval for a downhill bracket search with + `optimize.brent`. Note that this is in most cases not critical; the + final result is allowed to be outside this bracket. If None, + `optimize.fminbound` is used with bounds that avoid overflow. + + Returns + ------- + maxlog : float + The optimal transform parameter found. + + See Also + -------- + yeojohnson, yeojohnson_llf, yeojohnson_normplot + + Notes + ----- + .. versionadded:: 1.2.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Generate some data and determine optimal ``lmbda`` + + >>> rng = np.random.default_rng() + >>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5 + >>> lmax = stats.yeojohnson_normmax(x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax) + >>> ax.axvline(lmax, color='r') + + >>> plt.show() + + """ + def _neg_llf(lmbda, data): + llf = yeojohnson_llf(lmbda, data) + # reject likelihoods that are inf which are likely due to small + # variance in the transformed space + llf[np.isinf(llf)] = -np.inf + return -llf + + with np.errstate(invalid='ignore'): + if not np.all(np.isfinite(x)): + raise ValueError('Yeo-Johnson input must be finite.') + if np.all(x == 0): + return 1.0 + if brack is not None: + return optimize.brent(_neg_llf, brack=brack, args=(x,)) + x = np.asarray(x) + dtype = x.dtype if np.issubdtype(x.dtype, np.floating) else np.float64 + # Allow values up to 20 times the maximum observed value to be safely + # transformed without over- or underflow. + log1p_max_x = np.log1p(20 * np.max(np.abs(x))) + # Use half of floating point's exponent range to allow safe computation + # of the variance of the transformed data. + log_eps = np.log(np.finfo(dtype).eps) + log_tiny_float = (np.log(np.finfo(dtype).tiny) - log_eps) / 2 + log_max_float = (np.log(np.finfo(dtype).max) + log_eps) / 2 + # Compute the bounds by approximating the inverse of the Yeo-Johnson + # transform on the smallest and largest floating point exponents, given + # the largest data we expect to observe. See [1] for further details. + # [1] https://github.com/scipy/scipy/pull/18852#issuecomment-1630286174 + lb = log_tiny_float / log1p_max_x + ub = log_max_float / log1p_max_x + # Convert the bounds if all or some of the data is negative. + if np.all(x < 0): + lb, ub = 2 - ub, 2 - lb + elif np.any(x < 0): + lb, ub = max(2 - ub, lb), min(2 - lb, ub) + # Match `optimize.brent`'s tolerance. + tol_brent = 1.48e-08 + return optimize.fminbound(_neg_llf, lb, ub, args=(x,), xtol=tol_brent) + + +def yeojohnson_normplot(x, la, lb, plot=None, N=80): + """Compute parameters for a Yeo-Johnson normality plot, optionally show it. + + A Yeo-Johnson normality plot shows graphically what the best + transformation parameter is to use in `yeojohnson` to obtain a + distribution that is close to normal. + + Parameters + ---------- + x : array_like + Input array. + la, lb : scalar + The lower and upper bounds for the ``lmbda`` values to pass to + `yeojohnson` for Yeo-Johnson transformations. These are also the + limits of the horizontal axis of the plot if that is generated. + plot : object, optional + If given, plots the quantiles and least squares fit. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + N : int, optional + Number of points on the horizontal axis (equally distributed from + `la` to `lb`). + + Returns + ------- + lmbdas : ndarray + The ``lmbda`` values for which a Yeo-Johnson transform was done. + ppcc : ndarray + Probability Plot Correlelation Coefficient, as obtained from `probplot` + when fitting the Box-Cox transformed input `x` against a normal + distribution. + + See Also + -------- + probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max + + Notes + ----- + Even if `plot` is given, the figure is not shown or saved by + `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` + should be used after calling `probplot`. + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Generate some non-normally distributed data, and create a Yeo-Johnson plot: + + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax) + + Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in + the same plot: + + >>> _, maxlog = stats.yeojohnson(x) + >>> ax.axvline(maxlog, color='r') + + >>> plt.show() + + """ + return _normplot('yeojohnson', x, la, lb, plot, N) + + +ShapiroResult = namedtuple('ShapiroResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(ShapiroResult, n_samples=1, too_small=2, default_axis=None) +def shapiro(x): + r"""Perform the Shapiro-Wilk test for normality. + + The Shapiro-Wilk test tests the null hypothesis that the + data was drawn from a normal distribution. + + Parameters + ---------- + x : array_like + Array of sample data. Must contain at least three observations. + + Returns + ------- + statistic : float + The test statistic. + p-value : float + The p-value for the hypothesis test. + + See Also + -------- + anderson : The Anderson-Darling test for normality + kstest : The Kolmogorov-Smirnov test for goodness of fit. + + Notes + ----- + The algorithm used is described in [4]_ but censoring parameters as + described are not implemented. For N > 5000 the W test statistic is + accurate, but the p-value may not be. + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm + :doi:`10.18434/M32189` + .. [2] Shapiro, S. S. & Wilk, M.B, "An analysis of variance test for + normality (complete samples)", Biometrika, 1965, Vol. 52, + pp. 591-611, :doi:`10.2307/2333709` + .. [3] Razali, N. M. & Wah, Y. B., "Power comparisons of Shapiro-Wilk, + Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests", Journal + of Statistical Modeling and Analytics, 2011, Vol. 2, pp. 21-33. + .. [4] Royston P., "Remark AS R94: A Remark on Algorithm AS 181: The + W-test for Normality", 1995, Applied Statistics, Vol. 44, + :doi:`10.2307/2986146` + .. [5] Phipson B., and Smyth, G. K., "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn", Statistical Applications in Genetics and Molecular Biology, + 2010, Vol.9, :doi:`10.2202/1544-6115.1585` + .. [6] Panagiotakos, D. B., "The value of p-value in biomedical + research", The Open Cardiovascular Medicine Journal, 2008, Vol.2, + pp. 97-99, :doi:`10.2174/1874192400802010097` + + Examples + -------- + Suppose we wish to infer from measurements whether the weights of adult + human males in a medical study are not normally distributed [2]_. + The weights (lbs) are recorded in the array ``x`` below. + + >>> import numpy as np + >>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236]) + + The normality test of [1]_ and [2]_ begins by computing a statistic based + on the relationship between the observations and the expected order + statistics of a normal distribution. + + >>> from scipy import stats + >>> res = stats.shapiro(x) + >>> res.statistic + 0.7888147830963135 + + The value of this statistic tends to be high (close to 1) for samples drawn + from a normal distribution. + + The test is performed by comparing the observed value of the statistic + against the null distribution: the distribution of statistic values formed + under the null hypothesis that the weights were drawn from a normal + distribution. For this normality test, the null distribution is not easy to + calculate exactly, so it is usually approximated by Monte Carlo methods, + that is, drawing many samples of the same size as ``x`` from a normal + distribution and computing the values of the statistic for each. + + >>> def statistic(x): + ... # Get only the `shapiro` statistic; ignore its p-value + ... return stats.shapiro(x).statistic + >>> ref = stats.monte_carlo_test(x, stats.norm.rvs, statistic, + ... alternative='less') + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> bins = np.linspace(0.65, 1, 50) + >>> def plot(ax): # we'll reuse this + ... ax.hist(ref.null_distribution, density=True, bins=bins) + ... ax.set_title("Shapiro-Wilk Test Null Distribution \n" + ... "(Monte Carlo Approximation, 11 Observations)") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution less than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> annotation = (f'p-value={res.pvalue:.6f}\n(highlighted area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (0.75, 0.1), (0.68, 0.7), arrowprops=props) + >>> i_extreme = np.where(bins <= res.statistic)[0] + >>> for i in i_extreme: + ... ax.patches[i].set_color('C1') + >>> plt.xlim(0.65, 0.9) + >>> plt.ylim(0, 4) + >>> plt.show + >>> res.pvalue + 0.006703833118081093 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from a normally distributed population that produces such an + extreme value of the statistic - this may be taken as evidence against + the null hypothesis in favor of the alternative: the weights were not + drawn from a normal distribution. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence *for* the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [5]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + + """ + x = np.ravel(x).astype(np.float64) + + N = len(x) + if N < 3: + raise ValueError("Data must be at least length 3.") + + a = zeros(N//2, dtype=np.float64) + init = 0 + + y = sort(x) + y -= x[N//2] # subtract the median (or a nearby value); see gh-15777 + + w, pw, ifault = swilk(y, a, init) + if ifault not in [0, 2]: + warnings.warn("scipy.stats.shapiro: Input data has range zero. The" + " results may not be accurate.", stacklevel=2) + if N > 5000: + warnings.warn("scipy.stats.shapiro: For N > 5000, computed p-value " + f"may not be accurate. Current N is {N}.", + stacklevel=2) + + # `w` and `pw` are always Python floats, which are double precision. + # We want to ensure that they are NumPy floats, so until dtypes are + # respected, we can explicitly convert each to float64 (faster than + # `np.array([w, pw])`). + return ShapiroResult(np.float64(w), np.float64(pw)) + + +# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and +# Some Comparisons", Journal of the American Statistical +# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 +_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) +_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) +# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", +# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. +_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) +# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based +# on the Empirical Distribution Function.", Biometrika, +# Vol. 66, Issue 3, Dec. 1979, pp 591-595. +_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) +# From Richard A. Lockhart and Michael A. Stephens "Estimation and Tests of +# Fit for the Three-Parameter Weibull Distribution" +# Journal of the Royal Statistical Society.Series B(Methodological) +# Vol. 56, No. 3 (1994), pp. 491-500, table 1. Keys are c*100 +_Avals_weibull = [[0.292, 0.395, 0.467, 0.522, 0.617, 0.711, 0.836, 0.931], + [0.295, 0.399, 0.471, 0.527, 0.623, 0.719, 0.845, 0.941], + [0.298, 0.403, 0.476, 0.534, 0.631, 0.728, 0.856, 0.954], + [0.301, 0.408, 0.483, 0.541, 0.640, 0.738, 0.869, 0.969], + [0.305, 0.414, 0.490, 0.549, 0.650, 0.751, 0.885, 0.986], + [0.309, 0.421, 0.498, 0.559, 0.662, 0.765, 0.902, 1.007], + [0.314, 0.429, 0.508, 0.570, 0.676, 0.782, 0.923, 1.030], + [0.320, 0.438, 0.519, 0.583, 0.692, 0.802, 0.947, 1.057], + [0.327, 0.448, 0.532, 0.598, 0.711, 0.824, 0.974, 1.089], + [0.334, 0.469, 0.547, 0.615, 0.732, 0.850, 1.006, 1.125], + [0.342, 0.472, 0.563, 0.636, 0.757, 0.879, 1.043, 1.167]] +_Avals_weibull = np.array(_Avals_weibull) +_cvals_weibull = np.linspace(0, 0.5, 11) +_get_As_weibull = interpolate.interp1d(_cvals_weibull, _Avals_weibull.T, + kind='linear', bounds_error=False, + fill_value=_Avals_weibull[-1]) + + +def _weibull_fit_check(params, x): + # Refine the fit returned by `weibull_min.fit` to ensure that the first + # order necessary conditions are satisfied. If not, raise an error. + # Here, use `m` for the shape parameter to be consistent with [7] + # and avoid confusion with `c` as defined in [7]. + n = len(x) + m, u, s = params + + def dnllf_dm(m, u): + # Partial w.r.t. shape w/ optimal scale. See [7] Equation 5. + xu = x-u + return (1/m - (xu**m*np.log(xu)).sum()/(xu**m).sum() + + np.log(xu).sum()/n) + + def dnllf_du(m, u): + # Partial w.r.t. loc w/ optimal scale. See [7] Equation 6. + xu = x-u + return (m-1)/m*(xu**-1).sum() - n*(xu**(m-1)).sum()/(xu**m).sum() + + def get_scale(m, u): + # Partial w.r.t. scale solved in terms of shape and location. + # See [7] Equation 7. + return ((x-u)**m/n).sum()**(1/m) + + def dnllf(params): + # Partial derivatives of the NLLF w.r.t. parameters, i.e. + # first order necessary conditions for MLE fit. + return [dnllf_dm(*params), dnllf_du(*params)] + + suggestion = ("Maximum likelihood estimation is known to be challenging " + "for the three-parameter Weibull distribution. Consider " + "performing a custom goodness-of-fit test using " + "`scipy.stats.monte_carlo_test`.") + + if np.allclose(u, np.min(x)) or m < 1: + # The critical values provided by [7] don't seem to control the + # Type I error rate in this case. Error out. + message = ("Maximum likelihood estimation has converged to " + "a solution in which the location is equal to the minimum " + "of the data, the shape parameter is less than 2, or both. " + "The table of critical values in [7] does not " + "include this case. " + suggestion) + raise ValueError(message) + + try: + # Refine the MLE / verify that first-order necessary conditions are + # satisfied. If so, the critical values provided in [7] seem reliable. + with np.errstate(over='raise', invalid='raise'): + res = optimize.root(dnllf, params[:-1]) + + message = ("Solution of MLE first-order conditions failed: " + f"{res.message}. `anderson` cannot continue. " + suggestion) + if not res.success: + raise ValueError(message) + + except (FloatingPointError, ValueError) as e: + message = ("An error occurred while fitting the Weibull distribution " + "to the data, so `anderson` cannot continue. " + suggestion) + raise ValueError(message) from e + + m, u = res.x + s = get_scale(m, u) + return m, u, s + + +AndersonResult = _make_tuple_bunch('AndersonResult', + ['statistic', 'critical_values', + 'significance_level'], ['fit_result']) + + +def anderson(x, dist='norm'): + """Anderson-Darling test for data coming from a particular distribution. + + The Anderson-Darling test tests the null hypothesis that a sample is + drawn from a population that follows a particular distribution. + For the Anderson-Darling test, the critical values depend on + which distribution is being tested against. This function works + for normal, exponential, logistic, weibull_min, or Gumbel (Extreme Value + Type I) distributions. + + Parameters + ---------- + x : array_like + Array of sample data. + dist : {'norm', 'expon', 'logistic', 'gumbel', 'gumbel_l', 'gumbel_r', 'extreme1', 'weibull_min'}, optional + The type of distribution to test against. The default is 'norm'. + The names 'extreme1', 'gumbel_l' and 'gumbel' are synonyms for the + same distribution. + + Returns + ------- + result : AndersonResult + An object with the following attributes: + + statistic : float + The Anderson-Darling test statistic. + critical_values : list + The critical values for this distribution. + significance_level : list + The significance levels for the corresponding critical values + in percents. The function returns critical values for a + differing set of significance levels depending on the + distribution that is being tested against. + fit_result : `~scipy.stats._result_classes.FitResult` + An object containing the results of fitting the distribution to + the data. + + See Also + -------- + kstest : The Kolmogorov-Smirnov test for goodness-of-fit. + + Notes + ----- + Critical values provided are for the following significance levels: + + normal/exponential + 15%, 10%, 5%, 2.5%, 1% + logistic + 25%, 10%, 5%, 2.5%, 1%, 0.5% + gumbel_l / gumbel_r + 25%, 10%, 5%, 2.5%, 1% + weibull_min + 50%, 25%, 15%, 10%, 5%, 2.5%, 1%, 0.5% + + If the returned statistic is larger than these critical values then + for the corresponding significance level, the null hypothesis that + the data come from the chosen distribution can be rejected. + The returned statistic is referred to as 'A2' in the references. + + For `weibull_min`, maximum likelihood estimation is known to be + challenging. If the test returns successfully, then the first order + conditions for a maximum likehood estimate have been verified and + the critical values correspond relatively well to the significance levels, + provided that the sample is sufficiently large (>10 observations [7]). + However, for some data - especially data with no left tail - `anderson` + is likely to result in an error message. In this case, consider + performing a custom goodness of fit test using + `scipy.stats.monte_carlo_test`. + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm + .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and + Some Comparisons, Journal of the American Statistical Association, + Vol. 69, pp. 730-737. + .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit + Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, + pp. 357-369. + .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value + Distribution, Biometrika, Vol. 64, pp. 583-588. + .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference + to Tests for Exponentiality , Technical Report No. 262, + Department of Statistics, Stanford University, Stanford, CA. + .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution + Based on the Empirical Distribution Function, Biometrika, Vol. 66, + pp. 591-595. + .. [7] Richard A. Lockhart and Michael A. Stephens "Estimation and Tests of + Fit for the Three-Parameter Weibull Distribution" + Journal of the Royal Statistical Society.Series B(Methodological) + Vol. 56, No. 3 (1994), pp. 491-500, Table 0. + + Examples + -------- + Test the null hypothesis that a random sample was drawn from a normal + distribution (with unspecified mean and standard deviation). + + >>> import numpy as np + >>> from scipy.stats import anderson + >>> rng = np.random.default_rng() + >>> data = rng.random(size=35) + >>> res = anderson(data) + >>> res.statistic + 0.8398018749744764 + >>> res.critical_values + array([0.527, 0.6 , 0.719, 0.839, 0.998]) + >>> res.significance_level + array([15. , 10. , 5. , 2.5, 1. ]) + + The value of the statistic (barely) exceeds the critical value associated + with a significance level of 2.5%, so the null hypothesis may be rejected + at a significance level of 2.5%, but not at a significance level of 1%. + + """ # numpy/numpydoc#87 # noqa: E501 + dist = dist.lower() + if dist in {'extreme1', 'gumbel'}: + dist = 'gumbel_l' + dists = {'norm', 'expon', 'gumbel_l', + 'gumbel_r', 'logistic', 'weibull_min'} + + if dist not in dists: + raise ValueError(f"Invalid distribution; dist must be in {dists}.") + y = sort(x) + xbar = np.mean(x, axis=0) + N = len(y) + if dist == 'norm': + s = np.std(x, ddof=1, axis=0) + w = (y - xbar) / s + fit_params = xbar, s + logcdf = distributions.norm.logcdf(w) + logsf = distributions.norm.logsf(w) + sig = array([15, 10, 5, 2.5, 1]) + critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3) + elif dist == 'expon': + w = y / xbar + fit_params = 0, xbar + logcdf = distributions.expon.logcdf(w) + logsf = distributions.expon.logsf(w) + sig = array([15, 10, 5, 2.5, 1]) + critical = around(_Avals_expon / (1.0 + 0.6/N), 3) + elif dist == 'logistic': + def rootfunc(ab, xj, N): + a, b = ab + tmp = (xj - a) / b + tmp2 = exp(tmp) + val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N, + np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N] + return array(val) + + sol0 = array([xbar, np.std(x, ddof=1, axis=0)]) + sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5) + w = (y - sol[0]) / sol[1] + fit_params = sol + logcdf = distributions.logistic.logcdf(w) + logsf = distributions.logistic.logsf(w) + sig = array([25, 10, 5, 2.5, 1, 0.5]) + critical = around(_Avals_logistic / (1.0 + 0.25/N), 3) + elif dist == 'gumbel_r': + xbar, s = distributions.gumbel_r.fit(x) + w = (y - xbar) / s + fit_params = xbar, s + logcdf = distributions.gumbel_r.logcdf(w) + logsf = distributions.gumbel_r.logsf(w) + sig = array([25, 10, 5, 2.5, 1]) + critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) + elif dist == 'gumbel_l': + xbar, s = distributions.gumbel_l.fit(x) + w = (y - xbar) / s + fit_params = xbar, s + logcdf = distributions.gumbel_l.logcdf(w) + logsf = distributions.gumbel_l.logsf(w) + sig = array([25, 10, 5, 2.5, 1]) + critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) + elif dist == 'weibull_min': + message = ("Critical values of the test statistic are given for the " + "asymptotic distribution. These may not be accurate for " + "samples with fewer than 10 observations. Consider using " + "`scipy.stats.monte_carlo_test`.") + if N < 10: + warnings.warn(message, stacklevel=2) + # [7] writes our 'c' as 'm', and they write `c = 1/m`. Use their names. + m, loc, scale = distributions.weibull_min.fit(y) + m, loc, scale = _weibull_fit_check((m, loc, scale), y) + fit_params = m, loc, scale + logcdf = stats.weibull_min(*fit_params).logcdf(y) + logsf = stats.weibull_min(*fit_params).logsf(y) + c = 1 / m # m and c are as used in [7] + sig = array([0.5, 0.75, 0.85, 0.9, 0.95, 0.975, 0.99, 0.995]) + critical = _get_As_weibull(c) + # Goodness-of-fit tests should only be used to provide evidence + # _against_ the null hypothesis. Be conservative and round up. + critical = np.round(critical + 0.0005, decimals=3) + + i = arange(1, N + 1) + A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0) + + # FitResult initializer expects an optimize result, so let's work with it + message = '`anderson` successfully fit the distribution to the data.' + res = optimize.OptimizeResult(success=True, message=message) + res.x = np.array(fit_params) + fit_result = FitResult(getattr(distributions, dist), y, + discrete=False, res=res) + + return AndersonResult(A2, critical, sig, fit_result=fit_result) + + +def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N): + """Compute A2akN equation 7 of Scholz and Stephens. + + Parameters + ---------- + samples : sequence of 1-D array_like + Array of sample arrays. + Z : array_like + Sorted array of all observations. + Zstar : array_like + Sorted array of unique observations. + k : int + Number of samples. + n : array_like + Number of observations in each sample. + N : int + Total number of observations. + + Returns + ------- + A2aKN : float + The A2aKN statistics of Scholz and Stephens 1987. + + """ + A2akN = 0. + Z_ssorted_left = Z.searchsorted(Zstar, 'left') + if N == Zstar.size: + lj = 1. + else: + lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left + Bj = Z_ssorted_left + lj / 2. + for i in arange(0, k): + s = np.sort(samples[i]) + s_ssorted_right = s.searchsorted(Zstar, side='right') + Mij = s_ssorted_right.astype(float) + fij = s_ssorted_right - s.searchsorted(Zstar, 'left') + Mij -= fij / 2. + inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.) + A2akN += inner.sum() / n[i] + A2akN *= (N - 1.) / N + return A2akN + + +def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): + """Compute A2akN equation 6 of Scholz & Stephens. + + Parameters + ---------- + samples : sequence of 1-D array_like + Array of sample arrays. + Z : array_like + Sorted array of all observations. + Zstar : array_like + Sorted array of unique observations. + k : int + Number of samples. + n : array_like + Number of observations in each sample. + N : int + Total number of observations. + + Returns + ------- + A2KN : float + The A2KN statistics of Scholz and Stephens 1987. + + """ + A2kN = 0. + lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], + 'left') + Bj = lj.cumsum() + for i in arange(0, k): + s = np.sort(samples[i]) + Mij = s.searchsorted(Zstar[:-1], side='right') + inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj)) + A2kN += inner.sum() / n[i] + return A2kN + + +Anderson_ksampResult = _make_tuple_bunch( + 'Anderson_ksampResult', + ['statistic', 'critical_values', 'pvalue'], [] +) + + +def anderson_ksamp(samples, midrank=True, *, method=None): + """The Anderson-Darling test for k-samples. + + The k-sample Anderson-Darling test is a modification of the + one-sample Anderson-Darling test. It tests the null hypothesis + that k-samples are drawn from the same population without having + to specify the distribution function of that population. The + critical values depend on the number of samples. + + Parameters + ---------- + samples : sequence of 1-D array_like + Array of sample data in arrays. + midrank : bool, optional + Type of Anderson-Darling test which is computed. Default + (True) is the midrank test applicable to continuous and + discrete populations. If False, the right side empirical + distribution is used. + method : PermutationMethod, optional + Defines the method used to compute the p-value. If `method` is an + instance of `PermutationMethod`, the p-value is computed using + `scipy.stats.permutation_test` with the provided configuration options + and other appropriate settings. Otherwise, the p-value is interpolated + from tabulated values. + + Returns + ------- + res : Anderson_ksampResult + An object containing attributes: + + statistic : float + Normalized k-sample Anderson-Darling test statistic. + critical_values : array + The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%, + 0.5%, 0.1%. + pvalue : float + The approximate p-value of the test. If `method` is not + provided, the value is floored / capped at 0.1% / 25%. + + Raises + ------ + ValueError + If fewer than 2 samples are provided, a sample is empty, or no + distinct observations are in the samples. + + See Also + -------- + ks_2samp : 2 sample Kolmogorov-Smirnov test + anderson : 1 sample Anderson-Darling test + + Notes + ----- + [1]_ defines three versions of the k-sample Anderson-Darling test: + one for continuous distributions and two for discrete + distributions, in which ties between samples may occur. The + default of this routine is to compute the version based on the + midrank empirical distribution function. This test is applicable + to continuous and discrete data. If midrank is set to False, the + right side empirical distribution is used for a test for discrete + data. According to [1]_, the two discrete test statistics differ + only slightly if a few collisions due to round-off errors occur in + the test not adjusted for ties between samples. + + The critical values corresponding to the significance levels from 0.01 + to 0.25 are taken from [1]_. p-values are floored / capped + at 0.1% / 25%. Since the range of critical values might be extended in + future releases, it is recommended not to test ``p == 0.25``, but rather + ``p >= 0.25`` (analogously for the lower bound). + + .. versionadded:: 0.14.0 + + References + ---------- + .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample + Anderson-Darling Tests, Journal of the American Statistical + Association, Vol. 82, pp. 918-924. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> res = stats.anderson_ksamp([rng.normal(size=50), + ... rng.normal(loc=0.5, size=30)]) + >>> res.statistic, res.pvalue + (1.974403288713695, 0.04991293614572478) + >>> res.critical_values + array([0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546]) + + The null hypothesis that the two random samples come from the same + distribution can be rejected at the 5% level because the returned + test value is greater than the critical value for 5% (1.961) but + not at the 2.5% level. The interpolation gives an approximate + p-value of 4.99%. + + >>> samples = [rng.normal(size=50), rng.normal(size=30), + ... rng.normal(size=20)] + >>> res = stats.anderson_ksamp(samples) + >>> res.statistic, res.pvalue + (-0.29103725200789504, 0.25) + >>> res.critical_values + array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856, + 4.07210043, 5.56419101]) + + The null hypothesis cannot be rejected for three samples from an + identical distribution. The reported p-value (25%) has been capped and + may not be very accurate (since it corresponds to the value 0.449 + whereas the statistic is -0.291). + + In such cases where the p-value is capped or when sample sizes are + small, a permutation test may be more accurate. + + >>> method = stats.PermutationMethod(n_resamples=9999, random_state=rng) + >>> res = stats.anderson_ksamp(samples, method=method) + >>> res.pvalue + 0.5254 + + """ + k = len(samples) + if (k < 2): + raise ValueError("anderson_ksamp needs at least two samples") + + samples = list(map(np.asarray, samples)) + Z = np.sort(np.hstack(samples)) + N = Z.size + Zstar = np.unique(Z) + if Zstar.size < 2: + raise ValueError("anderson_ksamp needs more than one distinct " + "observation") + + n = np.array([sample.size for sample in samples]) + if np.any(n == 0): + raise ValueError("anderson_ksamp encountered sample without " + "observations") + + if midrank: + A2kN_fun = _anderson_ksamp_midrank + else: + A2kN_fun = _anderson_ksamp_right + A2kN = A2kN_fun(samples, Z, Zstar, k, n, N) + + def statistic(*samples): + return A2kN_fun(samples, Z, Zstar, k, n, N) + + if method is not None: + res = stats.permutation_test(samples, statistic, **method._asdict(), + alternative='greater') + + H = (1. / n).sum() + hs_cs = (1. / arange(N - 1, 1, -1)).cumsum() + h = hs_cs[-1] + 1 + g = (hs_cs / arange(2, N)).sum() + + a = (4*g - 6) * (k - 1) + (10 - 6*g)*H + b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6 + c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h + d = (2*h + 6)*k**2 - 4*h*k + sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.)) + m = k - 1 + A2 = (A2kN - m) / math.sqrt(sigmasq) + + # The b_i values are the interpolation coefficients from Table 2 + # of Scholz and Stephens 1987 + b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085]) + b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615]) + b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154]) + critical = b0 + b1 / math.sqrt(m) + b2 / m + + sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001]) + + if A2 < critical.min() and method is None: + p = sig.max() + msg = (f"p-value capped: true value larger than {p}. Consider " + "specifying `method` " + "(e.g. `method=stats.PermutationMethod()`.)") + warnings.warn(msg, stacklevel=2) + elif A2 > critical.max() and method is None: + p = sig.min() + msg = (f"p-value floored: true value smaller than {p}. Consider " + "specifying `method` " + "(e.g. `method=stats.PermutationMethod()`.)") + warnings.warn(msg, stacklevel=2) + elif method is None: + # interpolation of probit of significance level + pf = np.polyfit(critical, log(sig), 2) + p = math.exp(np.polyval(pf, A2)) + else: + p = res.pvalue if method is not None else p + + # create result object with alias for backward compatibility + res = Anderson_ksampResult(A2, critical, p) + res.significance_level = p + return res + + +AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue')) + + +class _ABW: + """Distribution of Ansari-Bradley W-statistic under the null hypothesis.""" + # TODO: calculate exact distribution considering ties + # We could avoid summing over more than half the frequencies, + # but initially it doesn't seem worth the extra complexity + + def __init__(self): + """Minimal initializer.""" + self.m = None + self.n = None + self.astart = None + self.total = None + self.freqs = None + + def _recalc(self, n, m): + """When necessary, recalculate exact distribution.""" + if n != self.n or m != self.m: + self.n, self.m = n, m + # distribution is NOT symmetric when m + n is odd + # n is len(x), m is len(y), and ratio of scales is defined x/y + astart, a1, _ = gscale(n, m) + self.astart = astart # minimum value of statistic + # Exact distribution of test statistic under null hypothesis + # expressed as frequencies/counts/integers to maintain precision. + # Stored as floats to avoid overflow of sums. + self.freqs = a1.astype(np.float64) + self.total = self.freqs.sum() # could calculate from m and n + # probability mass is self.freqs / self.total; + + def pmf(self, k, n, m): + """Probability mass function.""" + self._recalc(n, m) + # The convention here is that PMF at k = 12.5 is the same as at k = 12, + # -> use `floor` in case of ties. + ind = np.floor(k - self.astart).astype(int) + return self.freqs[ind] / self.total + + def cdf(self, k, n, m): + """Cumulative distribution function.""" + self._recalc(n, m) + # Null distribution derived without considering ties is + # approximate. Round down to avoid Type I error. + ind = np.ceil(k - self.astart).astype(int) + return self.freqs[:ind+1].sum() / self.total + + def sf(self, k, n, m): + """Survival function.""" + self._recalc(n, m) + # Null distribution derived without considering ties is + # approximate. Round down to avoid Type I error. + ind = np.floor(k - self.astart).astype(int) + return self.freqs[ind:].sum() / self.total + + +# Maintain state for faster repeat calls to ansari w/ method='exact' +_abw_state = _ABW() + + +@_axis_nan_policy_factory(AnsariResult, n_samples=2) +def ansari(x, y, alternative='two-sided'): + """Perform the Ansari-Bradley test for equal scale parameters. + + The Ansari-Bradley test ([1]_, [2]_) is a non-parametric test + for the equality of the scale parameter of the distributions + from which two samples were drawn. The null hypothesis states that + the ratio of the scale of the distribution underlying `x` to the scale + of the distribution underlying `y` is 1. + + Parameters + ---------- + x, y : array_like + Arrays of sample data. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the ratio of scales is not equal to 1. + * 'less': the ratio of scales is less than 1. + * 'greater': the ratio of scales is greater than 1. + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float + The Ansari-Bradley test statistic. + pvalue : float + The p-value of the hypothesis test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + mood : A non-parametric test for the equality of two scale parameters + + Notes + ----- + The p-value given is exact when the sample sizes are both less than + 55 and there are no ties, otherwise a normal approximation for the + p-value is used. + + References + ---------- + .. [1] Ansari, A. R. and Bradley, R. A. (1960) Rank-sum tests for + dispersions, Annals of Mathematical Statistics, 31, 1174-1189. + .. [2] Sprent, Peter and N.C. Smeeton. Applied nonparametric + statistical methods. 3rd ed. Chapman and Hall/CRC. 2001. + Section 5.8.2. + .. [3] Nathaniel E. Helwig "Nonparametric Dispersion and Equality + Tests" at http://users.stat.umn.edu/~helwig/notes/npde-Notes.pdf + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import ansari + >>> rng = np.random.default_rng() + + For these examples, we'll create three random data sets. The first + two, with sizes 35 and 25, are drawn from a normal distribution with + mean 0 and standard deviation 2. The third data set has size 25 and + is drawn from a normal distribution with standard deviation 1.25. + + >>> x1 = rng.normal(loc=0, scale=2, size=35) + >>> x2 = rng.normal(loc=0, scale=2, size=25) + >>> x3 = rng.normal(loc=0, scale=1.25, size=25) + + First we apply `ansari` to `x1` and `x2`. These samples are drawn + from the same distribution, so we expect the Ansari-Bradley test + should not lead us to conclude that the scales of the distributions + are different. + + >>> ansari(x1, x2) + AnsariResult(statistic=541.0, pvalue=0.9762532927399098) + + With a p-value close to 1, we cannot conclude that there is a + significant difference in the scales (as expected). + + Now apply the test to `x1` and `x3`: + + >>> ansari(x1, x3) + AnsariResult(statistic=425.0, pvalue=0.0003087020407974518) + + The probability of observing such an extreme value of the statistic + under the null hypothesis of equal scales is only 0.03087%. We take this + as evidence against the null hypothesis in favor of the alternative: + the scales of the distributions from which the samples were drawn + are not equal. + + We can use the `alternative` parameter to perform a one-tailed test. + In the above example, the scale of `x1` is greater than `x3` and so + the ratio of scales of `x1` and `x3` is greater than 1. This means + that the p-value when ``alternative='greater'`` should be near 0 and + hence we should be able to reject the null hypothesis: + + >>> ansari(x1, x3, alternative='greater') + AnsariResult(statistic=425.0, pvalue=0.0001543510203987259) + + As we can see, the p-value is indeed quite low. Use of + ``alternative='less'`` should thus yield a large p-value: + + >>> ansari(x1, x3, alternative='less') + AnsariResult(statistic=425.0, pvalue=0.9998643258449039) + + """ + if alternative not in {'two-sided', 'greater', 'less'}: + raise ValueError("'alternative' must be 'two-sided'," + " 'greater', or 'less'.") + x, y = asarray(x), asarray(y) + n = len(x) + m = len(y) + if m < 1: + raise ValueError("Not enough other observations.") + if n < 1: + raise ValueError("Not enough test observations.") + + N = m + n + xy = r_[x, y] # combine + rank = _stats_py.rankdata(xy) + symrank = amin(array((rank, N - rank + 1)), 0) + AB = np.sum(symrank[:n], axis=0) + uxy = unique(xy) + repeats = (len(uxy) != len(xy)) + exact = ((m < 55) and (n < 55) and not repeats) + if repeats and (m < 55 or n < 55): + warnings.warn("Ties preclude use of exact statistic.", stacklevel=2) + if exact: + if alternative == 'two-sided': + pval = 2.0 * np.minimum(_abw_state.cdf(AB, n, m), + _abw_state.sf(AB, n, m)) + elif alternative == 'greater': + # AB statistic is _smaller_ when ratio of scales is larger, + # so this is the opposite of the usual calculation + pval = _abw_state.cdf(AB, n, m) + else: + pval = _abw_state.sf(AB, n, m) + return AnsariResult(AB, min(1.0, pval)) + + # otherwise compute normal approximation + if N % 2: # N odd + mnAB = n * (N+1.0)**2 / 4.0 / N + varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2) + else: + mnAB = n * (N+2.0) / 4.0 + varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0) + if repeats: # adjust variance estimates + # compute np.sum(tj * rj**2,axis=0) + fac = np.sum(symrank**2, axis=0) + if N % 2: # N odd + varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1)) + else: # N even + varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1)) + + # Small values of AB indicate larger dispersion for the x sample. + # Large values of AB indicate larger dispersion for the y sample. + # This is opposite to the way we define the ratio of scales. see [1]_. + z = (mnAB - AB) / sqrt(varAB) + pvalue = _get_pvalue(z, _SimpleNormal(), alternative, xp=np) + return AnsariResult(AB[()], pvalue[()]) + + +BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(BartlettResult, n_samples=None) +def bartlett(*samples, axis=0): + r"""Perform Bartlett's test for equal variances. + + Bartlett's test tests the null hypothesis that all input samples + are from populations with equal variances. For samples + from significantly non-normal populations, Levene's test + `levene` is more robust. + + Parameters + ---------- + sample1, sample2, ... : array_like + arrays of sample data. Only 1d arrays are accepted, they may have + different lengths. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The p-value of the test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + levene : A robust parametric test for equality of k variances + + Notes + ----- + Conover et al. (1981) examine many of the existing parametric and + nonparametric tests by extensive simulations and they conclude that the + tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be + superior in terms of robustness of departures from normality and power + ([3]_). + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm + .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical + Methods, Eighth Edition, Iowa State University Press. + .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and + Hypothesis Testing based on Quadratic Inference Function. Technical + Report #99-03, Center for Likelihood Studies, Pennsylvania State + University. + .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical + Tests. Proceedings of the Royal Society of London. Series A, + Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282. + .. [5] C.I. BLISS (1952), The Statistics of Bioassay: With Special + Reference to the Vitamins, pp 499-503, + :doi:`10.1016/C2013-0-12584-6`. + .. [6] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + .. [7] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are + superior to t and F tests in biomedical research. The American + Statistician, 52(2), 127-132. + + Examples + -------- + In [5]_, the influence of vitamin C on the tooth growth of guinea pigs + was investigated. In a control study, 60 subjects were divided into + small dose, medium dose, and large dose groups that received + daily doses of 0.5, 1.0 and 2.0 mg of vitamin C, respectively. + After 42 days, the tooth growth was measured. + + The ``small_dose``, ``medium_dose``, and ``large_dose`` arrays below record + tooth growth measurements of the three groups in microns. + + >>> import numpy as np + >>> small_dose = np.array([ + ... 4.2, 11.5, 7.3, 5.8, 6.4, 10, 11.2, 11.2, 5.2, 7, + ... 15.2, 21.5, 17.6, 9.7, 14.5, 10, 8.2, 9.4, 16.5, 9.7 + ... ]) + >>> medium_dose = np.array([ + ... 16.5, 16.5, 15.2, 17.3, 22.5, 17.3, 13.6, 14.5, 18.8, 15.5, + ... 19.7, 23.3, 23.6, 26.4, 20, 25.2, 25.8, 21.2, 14.5, 27.3 + ... ]) + >>> large_dose = np.array([ + ... 23.6, 18.5, 33.9, 25.5, 26.4, 32.5, 26.7, 21.5, 23.3, 29.5, + ... 25.5, 26.4, 22.4, 24.5, 24.8, 30.9, 26.4, 27.3, 29.4, 23 + ... ]) + + The `bartlett` statistic is sensitive to differences in variances + between the samples. + + >>> from scipy import stats + >>> res = stats.bartlett(small_dose, medium_dose, large_dose) + >>> res.statistic + 0.6654670663030519 + + The value of the statistic tends to be high when there is a large + difference in variances. + + We can test for inequality of variance among the groups by comparing the + observed value of the statistic against the null distribution: the + distribution of statistic values derived under the null hypothesis that + the population variances of the three groups are equal. + + For this test, the null distribution follows the chi-square distribution + as shown below. + + >>> import matplotlib.pyplot as plt + >>> k = 3 # number of samples + >>> dist = stats.chi2(df=k-1) + >>> val = np.linspace(0, 5, 100) + >>> pdf = dist.pdf(val) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def plot(ax): # we'll reuse this + ... ax.plot(val, pdf, color='C0') + ... ax.set_title("Bartlett Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + ... ax.set_xlim(0, 5) + ... ax.set_ylim(0, 1) + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution greater than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> pvalue = dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (1.5, 0.22), (2.25, 0.3), arrowprops=props) + >>> i = val >= res.statistic + >>> ax.fill_between(val[i], y1=0, y2=pdf[i], color='C0') + >>> plt.show() + + >>> res.pvalue + 0.71696121509966 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from distributions with identical variances that produces + such an extreme value of the statistic - this may be taken as evidence + against the null hypothesis in favor of the alternative: the variances of + the groups are not equal. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [6]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + - Small p-values are not evidence for a *large* effect; rather, they can + only provide evidence for a "significant" effect, meaning that they are + unlikely to have occurred under the null hypothesis. + + Note that the chi-square distribution provides the null distribution + when the observations are normally distributed. For small samples + drawn from non-normal populations, it may be more appropriate to + perform a + permutation test: Under the null hypothesis that all three samples were + drawn from the same population, each of the measurements is equally likely + to have been observed in any of the three samples. Therefore, we can form + a randomized null distribution by calculating the statistic under many + randomly-generated partitionings of the observations into the three + samples. + + >>> def statistic(*samples): + ... return stats.bartlett(*samples).statistic + >>> ref = stats.permutation_test( + ... (small_dose, medium_dose, large_dose), statistic, + ... permutation_type='independent', alternative='greater' + ... ) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> bins = np.linspace(0, 5, 25) + >>> ax.hist( + ... ref.null_distribution, bins=bins, density=True, facecolor="C1" + ... ) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... 'randomized null distribution']) + >>> plot(ax) + >>> plt.show() + + >>> ref.pvalue # randomized test p-value + 0.5387 # may vary + + Note that there is significant disagreement between the p-value calculated + here and the asymptotic approximation returned by `bartlett` above. + The statistical inferences that can be drawn rigorously from a permutation + test are limited; nonetheless, they may be the preferred approach in many + circumstances [7]_. + + Following is another generic example where the null hypothesis would be + rejected. + + Test whether the lists `a`, `b` and `c` come from populations + with equal variances. + + >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99] + >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05] + >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98] + >>> stat, p = stats.bartlett(a, b, c) + >>> p + 1.1254782518834628e-05 + + The very small p-value suggests that the populations do not have equal + variances. + + This is not surprising, given that the sample variance of `b` is much + larger than that of `a` and `c`: + + >>> [np.var(x, ddof=1) for x in [a, b, c]] + [0.007054444444444413, 0.13073888888888888, 0.008890000000000002] + + """ + xp = array_namespace(*samples) + + k = len(samples) + if k < 2: + raise ValueError("Must enter at least two input sample vectors.") + + samples = _broadcast_arrays(samples, axis=axis, xp=xp) + samples = [xp_moveaxis_to_end(sample, axis, xp=xp) for sample in samples] + + Ni = [xp.asarray(sample.shape[-1], dtype=sample.dtype) for sample in samples] + Ni = [xp.broadcast_to(N, samples[0].shape[:-1]) for N in Ni] + ssq = [xp.var(sample, correction=1, axis=-1) for sample in samples] + Ni = [arr[xp.newaxis, ...] for arr in Ni] + ssq = [arr[xp.newaxis, ...] for arr in ssq] + Ni = xp.concat(Ni, axis=0) + ssq = xp.concat(ssq, axis=0) + Ntot = xp.sum(Ni, axis=0) + spsq = xp.sum((Ni - 1)*ssq, axis=0) / (Ntot - k) + numer = (Ntot - k) * xp.log(spsq) - xp.sum((Ni - 1)*xp.log(ssq), axis=0) + denom = 1 + 1/(3*(k - 1)) * ((xp.sum(1/(Ni - 1), axis=0)) - 1/(Ntot - k)) + T = numer / denom + + chi2 = _SimpleChi2(xp.asarray(k-1)) + pvalue = _get_pvalue(T, chi2, alternative='greater', symmetric=False, xp=xp) + + T = T[()] if T.ndim == 0 else T + pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue + + return BartlettResult(T, pvalue) + + +LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(LeveneResult, n_samples=None) +def levene(*samples, center='median', proportiontocut=0.05): + r"""Perform Levene test for equal variances. + + The Levene test tests the null hypothesis that all input samples + are from populations with equal variances. Levene's test is an + alternative to Bartlett's test `bartlett` in the case where + there are significant deviations from normality. + + Parameters + ---------- + sample1, sample2, ... : array_like + The sample data, possibly with different lengths. Only one-dimensional + samples are accepted. + center : {'mean', 'median', 'trimmed'}, optional + Which function of the data to use in the test. The default + is 'median'. + proportiontocut : float, optional + When `center` is 'trimmed', this gives the proportion of data points + to cut from each end. (See `scipy.stats.trim_mean`.) + Default is 0.05. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The p-value for the test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + bartlett : A parametric test for equality of k variances in normal samples + + Notes + ----- + Three variations of Levene's test are possible. The possibilities + and their recommended usages are: + + * 'median' : Recommended for skewed (non-normal) distributions> + * 'mean' : Recommended for symmetric, moderate-tailed distributions. + * 'trimmed' : Recommended for heavy-tailed distributions. + + The test version using the mean was proposed in the original article + of Levene ([2]_) while the median and trimmed mean have been studied by + Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe + test. + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm + .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: + Essays in Honor of Harold Hotelling, I. Olkin et al. eds., + Stanford University Press, pp. 278-292. + .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American + Statistical Association, 69, 364-367 + .. [4] C.I. BLISS (1952), The Statistics of Bioassay: With Special + Reference to the Vitamins, pp 499-503, + :doi:`10.1016/C2013-0-12584-6`. + .. [5] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + .. [6] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are + superior to t and F tests in biomedical research. The American + Statistician, 52(2), 127-132. + + Examples + -------- + In [4]_, the influence of vitamin C on the tooth growth of guinea pigs + was investigated. In a control study, 60 subjects were divided into + small dose, medium dose, and large dose groups that received + daily doses of 0.5, 1.0 and 2.0 mg of vitamin C, respectively. + After 42 days, the tooth growth was measured. + + The ``small_dose``, ``medium_dose``, and ``large_dose`` arrays below record + tooth growth measurements of the three groups in microns. + + >>> import numpy as np + >>> small_dose = np.array([ + ... 4.2, 11.5, 7.3, 5.8, 6.4, 10, 11.2, 11.2, 5.2, 7, + ... 15.2, 21.5, 17.6, 9.7, 14.5, 10, 8.2, 9.4, 16.5, 9.7 + ... ]) + >>> medium_dose = np.array([ + ... 16.5, 16.5, 15.2, 17.3, 22.5, 17.3, 13.6, 14.5, 18.8, 15.5, + ... 19.7, 23.3, 23.6, 26.4, 20, 25.2, 25.8, 21.2, 14.5, 27.3 + ... ]) + >>> large_dose = np.array([ + ... 23.6, 18.5, 33.9, 25.5, 26.4, 32.5, 26.7, 21.5, 23.3, 29.5, + ... 25.5, 26.4, 22.4, 24.5, 24.8, 30.9, 26.4, 27.3, 29.4, 23 + ... ]) + + The `levene` statistic is sensitive to differences in variances + between the samples. + + >>> from scipy import stats + >>> res = stats.levene(small_dose, medium_dose, large_dose) + >>> res.statistic + 0.6457341109631506 + + The value of the statistic tends to be high when there is a large + difference in variances. + + We can test for inequality of variance among the groups by comparing the + observed value of the statistic against the null distribution: the + distribution of statistic values derived under the null hypothesis that + the population variances of the three groups are equal. + + For this test, the null distribution follows the F distribution as shown + below. + + >>> import matplotlib.pyplot as plt + >>> k, n = 3, 60 # number of samples, total number of observations + >>> dist = stats.f(dfn=k-1, dfd=n-k) + >>> val = np.linspace(0, 5, 100) + >>> pdf = dist.pdf(val) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def plot(ax): # we'll reuse this + ... ax.plot(val, pdf, color='C0') + ... ax.set_title("Levene Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + ... ax.set_xlim(0, 5) + ... ax.set_ylim(0, 1) + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution greater than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> pvalue = dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (1.5, 0.22), (2.25, 0.3), arrowprops=props) + >>> i = val >= res.statistic + >>> ax.fill_between(val[i], y1=0, y2=pdf[i], color='C0') + >>> plt.show() + + >>> res.pvalue + 0.5280694573759905 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from distributions with identical variances that produces + such an extreme value of the statistic - this may be taken as evidence + against the null hypothesis in favor of the alternative: the variances of + the groups are not equal. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [5]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + - Small p-values are not evidence for a *large* effect; rather, they can + only provide evidence for a "significant" effect, meaning that they are + unlikely to have occurred under the null hypothesis. + + Note that the F distribution provides an asymptotic approximation of the + null distribution. + For small samples, it may be more appropriate to perform a permutation + test: Under the null hypothesis that all three samples were drawn from + the same population, each of the measurements is equally likely to have + been observed in any of the three samples. Therefore, we can form a + randomized null distribution by calculating the statistic under many + randomly-generated partitionings of the observations into the three + samples. + + >>> def statistic(*samples): + ... return stats.levene(*samples).statistic + >>> ref = stats.permutation_test( + ... (small_dose, medium_dose, large_dose), statistic, + ... permutation_type='independent', alternative='greater' + ... ) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> bins = np.linspace(0, 5, 25) + >>> ax.hist( + ... ref.null_distribution, bins=bins, density=True, facecolor="C1" + ... ) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... 'randomized null distribution']) + >>> plot(ax) + >>> plt.show() + + >>> ref.pvalue # randomized test p-value + 0.4559 # may vary + + Note that there is significant disagreement between the p-value calculated + here and the asymptotic approximation returned by `levene` above. + The statistical inferences that can be drawn rigorously from a permutation + test are limited; nonetheless, they may be the preferred approach in many + circumstances [6]_. + + Following is another generic example where the null hypothesis would be + rejected. + + Test whether the lists `a`, `b` and `c` come from populations + with equal variances. + + >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99] + >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05] + >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98] + >>> stat, p = stats.levene(a, b, c) + >>> p + 0.002431505967249681 + + The small p-value suggests that the populations do not have equal + variances. + + This is not surprising, given that the sample variance of `b` is much + larger than that of `a` and `c`: + + >>> [np.var(x, ddof=1) for x in [a, b, c]] + [0.007054444444444413, 0.13073888888888888, 0.008890000000000002] + + """ + if center not in ['mean', 'median', 'trimmed']: + raise ValueError("center must be 'mean', 'median' or 'trimmed'.") + + k = len(samples) + if k < 2: + raise ValueError("Must enter at least two input sample vectors.") + + Ni = np.empty(k) + Yci = np.empty(k, 'd') + + if center == 'median': + + def func(x): + return np.median(x, axis=0) + + elif center == 'mean': + + def func(x): + return np.mean(x, axis=0) + + else: # center == 'trimmed' + samples = tuple(_stats_py.trimboth(np.sort(sample), proportiontocut) + for sample in samples) + + def func(x): + return np.mean(x, axis=0) + + for j in range(k): + Ni[j] = len(samples[j]) + Yci[j] = func(samples[j]) + Ntot = np.sum(Ni, axis=0) + + # compute Zij's + Zij = [None] * k + for i in range(k): + Zij[i] = abs(asarray(samples[i]) - Yci[i]) + + # compute Zbari + Zbari = np.empty(k, 'd') + Zbar = 0.0 + for i in range(k): + Zbari[i] = np.mean(Zij[i], axis=0) + Zbar += Zbari[i] * Ni[i] + + Zbar /= Ntot + numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0) + + # compute denom_variance + dvar = 0.0 + for i in range(k): + dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0) + + denom = (k - 1.0) * dvar + + W = numer / denom + pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf + return LeveneResult(W, pval) + + +def _apply_func(x, g, func): + # g is list of indices into x + # separating x into different groups + # func should be applied over the groups + g = unique(r_[0, g, len(x)]) + output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)] + + return asarray(output) + + +FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(FlignerResult, n_samples=None) +def fligner(*samples, center='median', proportiontocut=0.05): + r"""Perform Fligner-Killeen test for equality of variance. + + Fligner's test tests the null hypothesis that all input samples + are from populations with equal variances. Fligner-Killeen's test is + distribution free when populations are identical [2]_. + + Parameters + ---------- + sample1, sample2, ... : array_like + Arrays of sample data. Need not be the same length. + center : {'mean', 'median', 'trimmed'}, optional + Keyword argument controlling which function of the data is used in + computing the test statistic. The default is 'median'. + proportiontocut : float, optional + When `center` is 'trimmed', this gives the proportion of data points + to cut from each end. (See `scipy.stats.trim_mean`.) + Default is 0.05. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The p-value for the hypothesis test. + + See Also + -------- + bartlett : A parametric test for equality of k variances in normal samples + levene : A robust parametric test for equality of k variances + + Notes + ----- + As with Levene's test there are three variants of Fligner's test that + differ by the measure of central tendency used in the test. See `levene` + for more information. + + Conover et al. (1981) examine many of the existing parametric and + nonparametric tests by extensive simulations and they conclude that the + tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be + superior in terms of robustness of departures from normality and power + [3]_. + + References + ---------- + .. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and + Hypothesis Testing based on Quadratic Inference Function. Technical + Report #99-03, Center for Likelihood Studies, Pennsylvania State + University. + https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf + .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample + tests for scale. 'Journal of the American Statistical Association.' + 71(353), 210-213. + .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and + Hypothesis Testing based on Quadratic Inference Function. Technical + Report #99-03, Center for Likelihood Studies, Pennsylvania State + University. + .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A + comparative study of tests for homogeneity of variances, with + applications to the outer continental shelf bidding data. + Technometrics, 23(4), 351-361. + .. [5] C.I. BLISS (1952), The Statistics of Bioassay: With Special + Reference to the Vitamins, pp 499-503, + :doi:`10.1016/C2013-0-12584-6`. + .. [6] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + .. [7] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are + superior to t and F tests in biomedical research. The American + Statistician, 52(2), 127-132. + + Examples + -------- + In [5]_, the influence of vitamin C on the tooth growth of guinea pigs + was investigated. In a control study, 60 subjects were divided into + small dose, medium dose, and large dose groups that received + daily doses of 0.5, 1.0 and 2.0 mg of vitamin C, respectively. + After 42 days, the tooth growth was measured. + + The ``small_dose``, ``medium_dose``, and ``large_dose`` arrays below record + tooth growth measurements of the three groups in microns. + + >>> import numpy as np + >>> small_dose = np.array([ + ... 4.2, 11.5, 7.3, 5.8, 6.4, 10, 11.2, 11.2, 5.2, 7, + ... 15.2, 21.5, 17.6, 9.7, 14.5, 10, 8.2, 9.4, 16.5, 9.7 + ... ]) + >>> medium_dose = np.array([ + ... 16.5, 16.5, 15.2, 17.3, 22.5, 17.3, 13.6, 14.5, 18.8, 15.5, + ... 19.7, 23.3, 23.6, 26.4, 20, 25.2, 25.8, 21.2, 14.5, 27.3 + ... ]) + >>> large_dose = np.array([ + ... 23.6, 18.5, 33.9, 25.5, 26.4, 32.5, 26.7, 21.5, 23.3, 29.5, + ... 25.5, 26.4, 22.4, 24.5, 24.8, 30.9, 26.4, 27.3, 29.4, 23 + ... ]) + + The `fligner` statistic is sensitive to differences in variances + between the samples. + + >>> from scipy import stats + >>> res = stats.fligner(small_dose, medium_dose, large_dose) + >>> res.statistic + 1.3878943408857916 + + The value of the statistic tends to be high when there is a large + difference in variances. + + We can test for inequality of variance among the groups by comparing the + observed value of the statistic against the null distribution: the + distribution of statistic values derived under the null hypothesis that + the population variances of the three groups are equal. + + For this test, the null distribution follows the chi-square distribution + as shown below. + + >>> import matplotlib.pyplot as plt + >>> k = 3 # number of samples + >>> dist = stats.chi2(df=k-1) + >>> val = np.linspace(0, 8, 100) + >>> pdf = dist.pdf(val) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def plot(ax): # we'll reuse this + ... ax.plot(val, pdf, color='C0') + ... ax.set_title("Fligner Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + ... ax.set_xlim(0, 8) + ... ax.set_ylim(0, 0.5) + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution greater than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> pvalue = dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.4f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (1.5, 0.22), (2.25, 0.3), arrowprops=props) + >>> i = val >= res.statistic + >>> ax.fill_between(val[i], y1=0, y2=pdf[i], color='C0') + >>> plt.show() + + >>> res.pvalue + 0.49960016501182125 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from distributions with identical variances that produces + such an extreme value of the statistic - this may be taken as evidence + against the null hypothesis in favor of the alternative: the variances of + the groups are not equal. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [6]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + - Small p-values are not evidence for a *large* effect; rather, they can + only provide evidence for a "significant" effect, meaning that they are + unlikely to have occurred under the null hypothesis. + + Note that the chi-square distribution provides an asymptotic approximation + of the null distribution. + For small samples, it may be more appropriate to perform a + permutation test: Under the null hypothesis that all three samples were + drawn from the same population, each of the measurements is equally likely + to have been observed in any of the three samples. Therefore, we can form + a randomized null distribution by calculating the statistic under many + randomly-generated partitionings of the observations into the three + samples. + + >>> def statistic(*samples): + ... return stats.fligner(*samples).statistic + >>> ref = stats.permutation_test( + ... (small_dose, medium_dose, large_dose), statistic, + ... permutation_type='independent', alternative='greater' + ... ) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> bins = np.linspace(0, 8, 25) + >>> ax.hist( + ... ref.null_distribution, bins=bins, density=True, facecolor="C1" + ... ) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... 'randomized null distribution']) + >>> plot(ax) + >>> plt.show() + + >>> ref.pvalue # randomized test p-value + 0.4332 # may vary + + Note that there is significant disagreement between the p-value calculated + here and the asymptotic approximation returned by `fligner` above. + The statistical inferences that can be drawn rigorously from a permutation + test are limited; nonetheless, they may be the preferred approach in many + circumstances [7]_. + + Following is another generic example where the null hypothesis would be + rejected. + + Test whether the lists `a`, `b` and `c` come from populations + with equal variances. + + >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99] + >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05] + >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98] + >>> stat, p = stats.fligner(a, b, c) + >>> p + 0.00450826080004775 + + The small p-value suggests that the populations do not have equal + variances. + + This is not surprising, given that the sample variance of `b` is much + larger than that of `a` and `c`: + + >>> [np.var(x, ddof=1) for x in [a, b, c]] + [0.007054444444444413, 0.13073888888888888, 0.008890000000000002] + + """ + if center not in ['mean', 'median', 'trimmed']: + raise ValueError("center must be 'mean', 'median' or 'trimmed'.") + + k = len(samples) + if k < 2: + raise ValueError("Must enter at least two input sample vectors.") + + # Handle empty input + for sample in samples: + if sample.size == 0: + NaN = _get_nan(*samples) + return FlignerResult(NaN, NaN) + + if center == 'median': + + def func(x): + return np.median(x, axis=0) + + elif center == 'mean': + + def func(x): + return np.mean(x, axis=0) + + else: # center == 'trimmed' + samples = tuple(_stats_py.trimboth(sample, proportiontocut) + for sample in samples) + + def func(x): + return np.mean(x, axis=0) + + Ni = asarray([len(samples[j]) for j in range(k)]) + Yci = asarray([func(samples[j]) for j in range(k)]) + Ntot = np.sum(Ni, axis=0) + # compute Zij's + Zij = [abs(asarray(samples[i]) - Yci[i]) for i in range(k)] + allZij = [] + g = [0] + for i in range(k): + allZij.extend(list(Zij[i])) + g.append(len(allZij)) + + ranks = _stats_py.rankdata(allZij) + sample = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5) + + # compute Aibar + Aibar = _apply_func(sample, g, np.sum) / Ni + anbar = np.mean(sample, axis=0) + varsq = np.var(sample, axis=0, ddof=1) + statistic = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq + chi2 = _SimpleChi2(k-1) + pval = _get_pvalue(statistic, chi2, alternative='greater', symmetric=False, xp=np) + return FlignerResult(statistic, pval) + + +@_axis_nan_policy_factory(lambda x1: (x1,), n_samples=4, n_outputs=1) +def _mood_inner_lc(xy, x, diffs, sorted_xy, n, m, N) -> float: + # Obtain the unique values and their frequencies from the pooled samples. + # "a_j, + b_j, = t_j, for j = 1, ... k" where `k` is the number of unique + # classes, and "[t]he number of values associated with the x's and y's in + # the jth class will be denoted by a_j, and b_j respectively." + # (Mielke, 312) + # Reuse previously computed sorted array and `diff` arrays to obtain the + # unique values and counts. Prepend `diffs` with a non-zero to indicate + # that the first element should be marked as not matching what preceded it. + diffs_prep = np.concatenate(([1], diffs)) + # Unique elements are where the was a difference between elements in the + # sorted array + uniques = sorted_xy[diffs_prep != 0] + # The count of each element is the bin size for each set of consecutive + # differences where the difference is zero. Replace nonzero differences + # with 1 and then use the cumulative sum to count the indices. + t = np.bincount(np.cumsum(np.asarray(diffs_prep != 0, dtype=int)))[1:] + k = len(uniques) + js = np.arange(1, k + 1, dtype=int) + # the `b` array mentioned in the paper is not used, outside of the + # calculation of `t`, so we do not need to calculate it separately. Here + # we calculate `a`. In plain language, `a[j]` is the number of values in + # `x` that equal `uniques[j]`. + sorted_xyx = np.sort(np.concatenate((xy, x))) + diffs = np.diff(sorted_xyx) + diffs_prep = np.concatenate(([1], diffs)) + diff_is_zero = np.asarray(diffs_prep != 0, dtype=int) + xyx_counts = np.bincount(np.cumsum(diff_is_zero))[1:] + a = xyx_counts - t + # "Define .. a_0 = b_0 = t_0 = S_0 = 0" (Mielke 312) so we shift `a` + # and `t` arrays over 1 to allow a first element of 0 to accommodate this + # indexing. + t = np.concatenate(([0], t)) + a = np.concatenate(([0], a)) + # S is built from `t`, so it does not need a preceding zero added on. + S = np.cumsum(t) + # define a copy of `S` with a prepending zero for later use to avoid + # the need for indexing. + S_i_m1 = np.concatenate(([0], S[:-1])) + + # Psi, as defined by the 6th unnumbered equation on page 313 (Mielke). + # Note that in the paper there is an error where the denominator `2` is + # squared when it should be the entire equation. + def psi(indicator): + return (indicator - (N + 1)/2)**2 + + # define summation range for use in calculation of phi, as seen in sum + # in the unnumbered equation on the bottom of page 312 (Mielke). + s_lower = S[js - 1] + 1 + s_upper = S[js] + 1 + phi_J = [np.arange(s_lower[idx], s_upper[idx]) for idx in range(k)] + + # for every range in the above array, determine the sum of psi(I) for + # every element in the range. Divide all the sums by `t`. Following the + # last unnumbered equation on page 312. + phis = [np.sum(psi(I_j)) for I_j in phi_J] / t[js] + + # `T` is equal to a[j] * phi[j], per the first unnumbered equation on + # page 312. `phis` is already in the order based on `js`, so we index + # into `a` with `js` as well. + T = sum(phis * a[js]) + + # The approximate statistic + E_0_T = n * (N * N - 1) / 12 + + varM = (m * n * (N + 1.0) * (N ** 2 - 4) / 180 - + m * n / (180 * N * (N - 1)) * np.sum( + t * (t**2 - 1) * (t**2 - 4 + (15 * (N - S - S_i_m1) ** 2)) + )) + + return ((T - E_0_T) / np.sqrt(varM),) + + +def _mood_too_small(samples, kwargs, axis=-1): + x, y = samples + n = x.shape[axis] + m = y.shape[axis] + N = m + n + return N < 3 + + +@_axis_nan_policy_factory(SignificanceResult, n_samples=2, too_small=_mood_too_small) +def mood(x, y, axis=0, alternative="two-sided"): + """Perform Mood's test for equal scale parameters. + + Mood's two-sample test for scale parameters is a non-parametric + test for the null hypothesis that two samples are drawn from the + same distribution with the same scale parameter. + + Parameters + ---------- + x, y : array_like + Arrays of sample data. There must be at least three observations + total. + axis : int, optional + The axis along which the samples are tested. `x` and `y` can be of + different length along `axis`. + If `axis` is None, `x` and `y` are flattened and the test is done on + all values in the flattened arrays. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the scales of the distributions underlying `x` and `y` + are different. + * 'less': the scale of the distribution underlying `x` is less than + the scale of the distribution underlying `y`. + * 'greater': the scale of the distribution underlying `x` is greater + than the scale of the distribution underlying `y`. + + .. versionadded:: 1.7.0 + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : scalar or ndarray + The z-score for the hypothesis test. For 1-D inputs a scalar is + returned. + pvalue : scalar ndarray + The p-value for the hypothesis test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + ansari : A non-parametric test for the equality of 2 variances + bartlett : A parametric test for equality of k variances in normal samples + levene : A parametric test for equality of k variances + + Notes + ----- + The data are assumed to be drawn from probability distributions ``f(x)`` + and ``f(x/s) / s`` respectively, for some probability density function f. + The null hypothesis is that ``s == 1``. + + For multi-dimensional arrays, if the inputs are of shapes + ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the + resulting z and p values will have shape ``(n0, n2, n3)``. Note that + ``n1`` and ``m1`` don't have to be equal, but the other dimensions do. + + References + ---------- + [1] Mielke, Paul W. "Note on Some Squared Rank Tests with Existing Ties." + Technometrics, vol. 9, no. 2, 1967, pp. 312-14. JSTOR, + https://doi.org/10.2307/1266427. Accessed 18 May 2022. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> x2 = rng.standard_normal((2, 45, 6, 7)) + >>> x1 = rng.standard_normal((2, 30, 6, 7)) + >>> res = stats.mood(x1, x2, axis=1) + >>> res.pvalue.shape + (2, 6, 7) + + Find the number of points where the difference in scale is not significant: + + >>> (res.pvalue > 0.1).sum() + 78 + + Perform the test with different scales: + + >>> x1 = rng.standard_normal((2, 30)) + >>> x2 = rng.standard_normal((2, 35)) * 10.0 + >>> stats.mood(x1, x2, axis=1) + SignificanceResult(statistic=array([-5.76174136, -6.12650783]), + pvalue=array([8.32505043e-09, 8.98287869e-10])) + + """ + x = np.asarray(x, dtype=float) + y = np.asarray(y, dtype=float) + + if axis < 0: + axis = x.ndim + axis + + # Determine shape of the result arrays + res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis]) + if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if + ax != axis])): + raise ValueError("Dimensions of x and y on all axes except `axis` " + "should match") + + n = x.shape[axis] + m = y.shape[axis] + N = m + n + if N < 3: + raise ValueError("Not enough observations.") + + xy = np.concatenate((x, y), axis=axis) + # determine if any of the samples contain ties + sorted_xy = np.sort(xy, axis=axis) + diffs = np.diff(sorted_xy, axis=axis) + if 0 in diffs: + z = np.asarray(_mood_inner_lc(xy, x, diffs, sorted_xy, n, m, N, + axis=axis)) + else: + if axis != 0: + xy = np.moveaxis(xy, axis, 0) + + xy = xy.reshape(xy.shape[0], -1) + # Generalized to the n-dimensional case by adding the axis argument, + # and using for loops, since rankdata is not vectorized. For improving + # performance consider vectorizing rankdata function. + all_ranks = np.empty_like(xy) + for j in range(xy.shape[1]): + all_ranks[:, j] = _stats_py.rankdata(xy[:, j]) + + Ri = all_ranks[:n] + M = np.sum((Ri - (N + 1.0) / 2) ** 2, axis=0) + # Approx stat. + mnM = n * (N * N - 1.0) / 12 + varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180 + z = (M - mnM) / sqrt(varM) + pval = _get_pvalue(z, _SimpleNormal(), alternative, xp=np) + + if res_shape == (): + # Return scalars, not 0-D arrays + z = z[0] + pval = pval[0] + else: + z.shape = res_shape + pval.shape = res_shape + return SignificanceResult(z[()], pval[()]) + + +WilcoxonResult = _make_tuple_bunch('WilcoxonResult', ['statistic', 'pvalue']) + + +def wilcoxon_result_unpacker(res): + if hasattr(res, 'zstatistic'): + return res.statistic, res.pvalue, res.zstatistic + else: + return res.statistic, res.pvalue + + +def wilcoxon_result_object(statistic, pvalue, zstatistic=None): + res = WilcoxonResult(statistic, pvalue) + if zstatistic is not None: + res.zstatistic = zstatistic + return res + + +def wilcoxon_outputs(kwds): + method = kwds.get('method', 'auto') + if method == 'approx': + return 3 + return 2 + + +@_rename_parameter("mode", "method") +@_axis_nan_policy_factory( + wilcoxon_result_object, paired=True, + n_samples=lambda kwds: 2 if kwds.get('y', None) is not None else 1, + result_to_tuple=wilcoxon_result_unpacker, n_outputs=wilcoxon_outputs, +) +def wilcoxon(x, y=None, zero_method="wilcox", correction=False, + alternative="two-sided", method='auto', *, axis=0): + """Calculate the Wilcoxon signed-rank test. + + The Wilcoxon signed-rank test tests the null hypothesis that two + related paired samples come from the same distribution. In particular, + it tests whether the distribution of the differences ``x - y`` is symmetric + about zero. It is a non-parametric version of the paired T-test. + + Parameters + ---------- + x : array_like + Either the first set of measurements (in which case ``y`` is the second + set of measurements), or the differences between two sets of + measurements (in which case ``y`` is not to be specified.) Must be + one-dimensional. + y : array_like, optional + Either the second set of measurements (if ``x`` is the first set of + measurements), or not specified (if ``x`` is the differences between + two sets of measurements.) Must be one-dimensional. + + .. warning:: + When `y` is provided, `wilcoxon` calculates the test statistic + based on the ranks of the absolute values of ``d = x - y``. + Roundoff error in the subtraction can result in elements of ``d`` + being assigned different ranks even when they would be tied with + exact arithmetic. Rather than passing `x` and `y` separately, + consider computing the difference ``x - y``, rounding as needed to + ensure that only truly unique elements are numerically distinct, + and passing the result as `x`, leaving `y` at the default (None). + + zero_method : {"wilcox", "pratt", "zsplit"}, optional + There are different conventions for handling pairs of observations + with equal values ("zero-differences", or "zeros"). + + * "wilcox": Discards all zero-differences (default); see [4]_. + * "pratt": Includes zero-differences in the ranking process, + but drops the ranks of the zeros (more conservative); see [3]_. + In this case, the normal approximation is adjusted as in [5]_. + * "zsplit": Includes zero-differences in the ranking process and + splits the zero rank between positive and negative ones. + + correction : bool, optional + If True, apply continuity correction by adjusting the Wilcoxon rank + statistic by 0.5 towards the mean value when computing the + z-statistic if a normal approximation is used. Default is False. + alternative : {"two-sided", "greater", "less"}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + In the following, let ``d`` represent the difference between the paired + samples: ``d = x - y`` if both ``x`` and ``y`` are provided, or + ``d = x`` otherwise. + + * 'two-sided': the distribution underlying ``d`` is not symmetric + about zero. + * 'less': the distribution underlying ``d`` is stochastically less + than a distribution symmetric about zero. + * 'greater': the distribution underlying ``d`` is stochastically + greater than a distribution symmetric about zero. + + method : {"auto", "exact", "approx"} or `PermutationMethod` instance, optional + Method to calculate the p-value, see Notes. Default is "auto". + + axis : int or None, default: 0 + If an int, the axis of the input along which to compute the statistic. + The statistic of each axis-slice (e.g. row) of the input will appear + in a corresponding element of the output. If ``None``, the input will + be raveled before computing the statistic. + + Returns + ------- + An object with the following attributes. + + statistic : array_like + If `alternative` is "two-sided", the sum of the ranks of the + differences above or below zero, whichever is smaller. + Otherwise the sum of the ranks of the differences above zero. + pvalue : array_like + The p-value for the test depending on `alternative` and `method`. + zstatistic : array_like + When ``method = 'approx'``, this is the normalized z-statistic:: + + z = (T - mn - d) / se + + where ``T`` is `statistic` as defined above, ``mn`` is the mean of the + distribution under the null hypothesis, ``d`` is a continuity + correction, and ``se`` is the standard error. + When ``method != 'approx'``, this attribute is not available. + + See Also + -------- + kruskal, mannwhitneyu + + Notes + ----- + In the following, let ``d`` represent the difference between the paired + samples: ``d = x - y`` if both ``x`` and ``y`` are provided, or ``d = x`` + otherwise. Assume that all elements of ``d`` are independent and + identically distributed observations, and all are distinct and nonzero. + + - When ``len(d)`` is sufficiently large, the null distribution of the + normalized test statistic (`zstatistic` above) is approximately normal, + and ``method = 'approx'`` can be used to compute the p-value. + + - When ``len(d)`` is small, the normal approximation may not be accurate, + and ``method='exact'`` is preferred (at the cost of additional + execution time). + + - The default, ``method='auto'``, selects between the two: when + ``len(d) <= 50`` and there are no zeros, the exact method is used; + otherwise, the approximate method is used. + + The presence of "ties" (i.e. not all elements of ``d`` are unique) or + "zeros" (i.e. elements of ``d`` are zero) changes the null distribution + of the test statistic, and ``method='exact'`` no longer calculates + the exact p-value. If ``method='approx'``, the z-statistic is adjusted + for more accurate comparison against the standard normal, but still, + for finite sample sizes, the standard normal is only an approximation of + the true null distribution of the z-statistic. For such situations, the + `method` parameter also accepts instances `PermutationMethod`. In this + case, the p-value is computed using `permutation_test` with the provided + configuration options and other appropriate settings. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test + .. [2] Conover, W.J., Practical Nonparametric Statistics, 1971. + .. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed + Rank Procedures, Journal of the American Statistical Association, + Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526` + .. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods, + Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968` + .. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank + Sampling Distribution When Zero Differences are Present, + Journal of the American Statistical Association, Vol. 62, 1967, + pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917` + + Examples + -------- + In [4]_, the differences in height between cross- and self-fertilized + corn plants is given as follows: + + >>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75] + + Cross-fertilized plants appear to be higher. To test the null + hypothesis that there is no height difference, we can apply the + two-sided test: + + >>> from scipy.stats import wilcoxon + >>> res = wilcoxon(d) + >>> res.statistic, res.pvalue + (24.0, 0.041259765625) + + Hence, we would reject the null hypothesis at a confidence level of 5%, + concluding that there is a difference in height between the groups. + To confirm that the median of the differences can be assumed to be + positive, we use: + + >>> res = wilcoxon(d, alternative='greater') + >>> res.statistic, res.pvalue + (96.0, 0.0206298828125) + + This shows that the null hypothesis that the median is negative can be + rejected at a confidence level of 5% in favor of the alternative that + the median is greater than zero. The p-values above are exact. Using the + normal approximation gives very similar values: + + >>> res = wilcoxon(d, method='approx') + >>> res.statistic, res.pvalue + (24.0, 0.04088813291185591) + + Note that the statistic changed to 96 in the one-sided case (the sum + of ranks of positive differences) whereas it is 24 in the two-sided + case (the minimum of sum of ranks above and below zero). + + In the example above, the differences in height between paired plants are + provided to `wilcoxon` directly. Alternatively, `wilcoxon` accepts two + samples of equal length, calculates the differences between paired + elements, then performs the test. Consider the samples ``x`` and ``y``: + + >>> import numpy as np + >>> x = np.array([0.5, 0.825, 0.375, 0.5]) + >>> y = np.array([0.525, 0.775, 0.325, 0.55]) + >>> res = wilcoxon(x, y, alternative='greater') + >>> res + WilcoxonResult(statistic=5.0, pvalue=0.5625) + + Note that had we calculated the differences by hand, the test would have + produced different results: + + >>> d = [-0.025, 0.05, 0.05, -0.05] + >>> ref = wilcoxon(d, alternative='greater') + >>> ref + WilcoxonResult(statistic=6.0, pvalue=0.4375) + + The substantial difference is due to roundoff error in the results of + ``x-y``: + + >>> d - (x-y) + array([2.08166817e-17, 6.93889390e-17, 1.38777878e-17, 4.16333634e-17]) + + Even though we expected all the elements of ``(x-y)[1:]`` to have the same + magnitude ``0.05``, they have slightly different magnitudes in practice, + and therefore are assigned different ranks in the test. Before performing + the test, consider calculating ``d`` and adjusting it as necessary to + ensure that theoretically identically values are not numerically distinct. + For example: + + >>> d2 = np.around(x - y, decimals=3) + >>> wilcoxon(d2, alternative='greater') + WilcoxonResult(statistic=6.0, pvalue=0.4375) + + """ + return _wilcoxon._wilcoxon_nd(x, y, zero_method, correction, alternative, + method, axis) + + +MedianTestResult = _make_tuple_bunch( + 'MedianTestResult', + ['statistic', 'pvalue', 'median', 'table'], [] +) + + +def median_test(*samples, ties='below', correction=True, lambda_=1, + nan_policy='propagate'): + """Perform a Mood's median test. + + Test that two or more samples come from populations with the same median. + + Let ``n = len(samples)`` be the number of samples. The "grand median" of + all the data is computed, and a contingency table is formed by + classifying the values in each sample as being above or below the grand + median. The contingency table, along with `correction` and `lambda_`, + are passed to `scipy.stats.chi2_contingency` to compute the test statistic + and p-value. + + Parameters + ---------- + sample1, sample2, ... : array_like + The set of samples. There must be at least two samples. + Each sample must be a one-dimensional sequence containing at least + one value. The samples are not required to have the same length. + ties : str, optional + Determines how values equal to the grand median are classified in + the contingency table. The string must be one of:: + + "below": + Values equal to the grand median are counted as "below". + "above": + Values equal to the grand median are counted as "above". + "ignore": + Values equal to the grand median are not counted. + + The default is "below". + correction : bool, optional + If True, *and* there are just two samples, apply Yates' correction + for continuity when computing the test statistic associated with + the contingency table. Default is True. + lambda_ : float or str, optional + By default, the statistic computed in this test is Pearson's + chi-squared statistic. `lambda_` allows a statistic from the + Cressie-Read power divergence family to be used instead. See + `power_divergence` for details. + Default is 1 (Pearson's chi-squared statistic). + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + res : MedianTestResult + An object containing attributes: + + statistic : float + The test statistic. The statistic that is returned is determined + by `lambda_`. The default is Pearson's chi-squared statistic. + pvalue : float + The p-value of the test. + median : float + The grand median. + table : ndarray + The contingency table. The shape of the table is (2, n), where + n is the number of samples. The first row holds the counts of the + values above the grand median, and the second row holds the counts + of the values below the grand median. The table allows further + analysis with, for example, `scipy.stats.chi2_contingency`, or with + `scipy.stats.fisher_exact` if there are two samples, without having + to recompute the table. If ``nan_policy`` is "propagate" and there + are nans in the input, the return value for ``table`` is ``None``. + + See Also + -------- + kruskal : Compute the Kruskal-Wallis H-test for independent samples. + mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y. + + Notes + ----- + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill + (1950), pp. 394-399. + .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010). + See Sections 8.12 and 10.15. + + Examples + -------- + A biologist runs an experiment in which there are three groups of plants. + Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants. + Each plant produces a number of seeds. The seed counts for each group + are:: + + Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49 + Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99 + Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84 + + The following code applies Mood's median test to these samples. + + >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49] + >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99] + >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84] + >>> from scipy.stats import median_test + >>> res = median_test(g1, g2, g3) + + The median is + + >>> res.median + 34.0 + + and the contingency table is + + >>> res.table + array([[ 5, 10, 7], + [11, 5, 10]]) + + `p` is too large to conclude that the medians are not the same: + + >>> res.pvalue + 0.12609082774093244 + + The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to + `median_test`. + + >>> res = median_test(g1, g2, g3, lambda_="log-likelihood") + >>> res.pvalue + 0.12224779737117837 + + The median occurs several times in the data, so we'll get a different + result if, for example, ``ties="above"`` is used: + + >>> res = median_test(g1, g2, g3, ties="above") + >>> res.pvalue + 0.063873276069553273 + + >>> res.table + array([[ 5, 11, 9], + [11, 4, 8]]) + + This example demonstrates that if the data set is not large and there + are values equal to the median, the p-value can be sensitive to the + choice of `ties`. + + """ + if len(samples) < 2: + raise ValueError('median_test requires two or more samples.') + + ties_options = ['below', 'above', 'ignore'] + if ties not in ties_options: + raise ValueError(f"invalid 'ties' option '{ties}'; 'ties' must be one " + f"of: {str(ties_options)[1:-1]}") + + data = [np.asarray(sample) for sample in samples] + + # Validate the sizes and shapes of the arguments. + for k, d in enumerate(data): + if d.size == 0: + raise ValueError("Sample %d is empty. All samples must " + "contain at least one value." % (k + 1)) + if d.ndim != 1: + raise ValueError("Sample %d has %d dimensions. All " + "samples must be one-dimensional sequences." % + (k + 1, d.ndim)) + + cdata = np.concatenate(data) + contains_nan, nan_policy = _contains_nan(cdata, nan_policy) + if contains_nan and nan_policy == 'propagate': + return MedianTestResult(np.nan, np.nan, np.nan, None) + + if contains_nan: + grand_median = np.median(cdata[~np.isnan(cdata)]) + else: + grand_median = np.median(cdata) + # When the minimum version of numpy supported by scipy is 1.9.0, + # the above if/else statement can be replaced by the single line: + # grand_median = np.nanmedian(cdata) + + # Create the contingency table. + table = np.zeros((2, len(data)), dtype=np.int64) + for k, sample in enumerate(data): + sample = sample[~np.isnan(sample)] + + nabove = count_nonzero(sample > grand_median) + nbelow = count_nonzero(sample < grand_median) + nequal = sample.size - (nabove + nbelow) + table[0, k] += nabove + table[1, k] += nbelow + if ties == "below": + table[1, k] += nequal + elif ties == "above": + table[0, k] += nequal + + # Check that no row or column of the table is all zero. + # Such a table can not be given to chi2_contingency, because it would have + # a zero in the table of expected frequencies. + rowsums = table.sum(axis=1) + if rowsums[0] == 0: + raise ValueError(f"All values are below the grand median ({grand_median}).") + if rowsums[1] == 0: + raise ValueError(f"All values are above the grand median ({grand_median}).") + if ties == "ignore": + # We already checked that each sample has at least one value, but it + # is possible that all those values equal the grand median. If `ties` + # is "ignore", that would result in a column of zeros in `table`. We + # check for that case here. + zero_cols = np.nonzero((table == 0).all(axis=0))[0] + if len(zero_cols) > 0: + msg = ("All values in sample %d are equal to the grand " + "median (%r), so they are ignored, resulting in an " + "empty sample." % (zero_cols[0] + 1, grand_median)) + raise ValueError(msg) + + stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_, + correction=correction) + return MedianTestResult(stat, p, grand_median, table) + + +def _circfuncs_common(samples, high, low, xp=None): + xp = array_namespace(samples) if xp is None else xp + + if xp.isdtype(samples.dtype, 'integral'): + dtype = xp.asarray(1.).dtype # get default float type + samples = xp.asarray(samples, dtype=dtype) + + # Recast samples as radians that range between 0 and 2 pi and calculate + # the sine and cosine + sin_samp = xp.sin((samples - low)*2.*xp.pi / (high - low)) + cos_samp = xp.cos((samples - low)*2.*xp.pi / (high - low)) + + return samples, sin_samp, cos_samp + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, default_axis=None, + result_to_tuple=lambda x: (x,) +) +def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'): + r"""Compute the circular mean of a sample of angle observations. + + Given :math:`n` angle observations :math:`x_1, \cdots, x_n` measured in + radians, their `circular mean` is defined by ([1]_, Eq. 2.2.4) + + .. math:: + + \mathrm{Arg} \left( \frac{1}{n} \sum_{k=1}^n e^{i x_k} \right) + + where :math:`i` is the imaginary unit and :math:`\mathop{\mathrm{Arg}} z` + gives the principal value of the argument of complex number :math:`z`, + restricted to the range :math:`[0,2\pi]` by default. :math:`z` in the + above expression is known as the `mean resultant vector`. + + Parameters + ---------- + samples : array_like + Input array of angle observations. The value of a full angle is + equal to ``(high - low)``. + high : float, optional + Upper boundary of the principal value of an angle. Default is ``2*pi``. + low : float, optional + Lower boundary of the principal value of an angle. Default is ``0``. + + Returns + ------- + circmean : float + Circular mean, restricted to the range ``[low, high]``. + + If the mean resultant vector is zero, an input-dependent, + implementation-defined number between ``[low, high]`` is returned. + If the input array is empty, ``np.nan`` is returned. + + See Also + -------- + circstd : Circular standard deviation. + circvar : Circular variance. + + References + ---------- + .. [1] Mardia, K. V. and Jupp, P. E. *Directional Statistics*. + John Wiley & Sons, 1999. + + Examples + -------- + For readability, all angles are printed out in degrees. + + >>> import numpy as np + >>> from scipy.stats import circmean + >>> import matplotlib.pyplot as plt + >>> angles = np.deg2rad(np.array([20, 30, 330])) + >>> circmean = circmean(angles) + >>> np.rad2deg(circmean) + 7.294976657784009 + + >>> mean = angles.mean() + >>> np.rad2deg(mean) + 126.66666666666666 + + Plot and compare the circular mean against the arithmetic mean. + + >>> plt.plot(np.cos(np.linspace(0, 2*np.pi, 500)), + ... np.sin(np.linspace(0, 2*np.pi, 500)), + ... c='k') + >>> plt.scatter(np.cos(angles), np.sin(angles), c='k') + >>> plt.scatter(np.cos(circmean), np.sin(circmean), c='b', + ... label='circmean') + >>> plt.scatter(np.cos(mean), np.sin(mean), c='r', label='mean') + >>> plt.legend() + >>> plt.axis('equal') + >>> plt.show() + + """ + xp = array_namespace(samples) + # Needed for non-NumPy arrays to get appropriate NaN result + # Apparently atan2(0, 0) is 0, even though it is mathematically undefined + if xp_size(samples) == 0: + return xp.mean(samples, axis=axis) + samples, sin_samp, cos_samp = _circfuncs_common(samples, high, low, xp=xp) + sin_sum = xp.sum(sin_samp, axis=axis) + cos_sum = xp.sum(cos_samp, axis=axis) + res = xp.atan2(sin_sum, cos_sum) % (2*xp.pi) + + res = res[()] if res.ndim == 0 else res + return res*(high - low)/2.0/xp.pi + low + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, default_axis=None, + result_to_tuple=lambda x: (x,) +) +def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'): + r"""Compute the circular variance of a sample of angle observations. + + Given :math:`n` angle observations :math:`x_1, \cdots, x_n` measured in + radians, their `circular variance` is defined by ([2]_, Eq. 2.3.3) + + .. math:: + + 1 - \left| \frac{1}{n} \sum_{k=1}^n e^{i x_k} \right| + + where :math:`i` is the imaginary unit and :math:`|z|` gives the length + of the complex number :math:`z`. :math:`|z|` in the above expression + is known as the `mean resultant length`. + + Parameters + ---------- + samples : array_like + Input array of angle observations. The value of a full angle is + equal to ``(high - low)``. + high : float, optional + Upper boundary of the principal value of an angle. Default is ``2*pi``. + low : float, optional + Lower boundary of the principal value of an angle. Default is ``0``. + + Returns + ------- + circvar : float + Circular variance. The returned value is in the range ``[0, 1]``, + where ``0`` indicates no variance and ``1`` indicates large variance. + + If the input array is empty, ``np.nan`` is returned. + + See Also + -------- + circmean : Circular mean. + circstd : Circular standard deviation. + + Notes + ----- + In the limit of small angles, the circular variance is close to + half the 'linear' variance if measured in radians. + + References + ---------- + .. [1] Fisher, N.I. *Statistical analysis of circular data*. Cambridge + University Press, 1993. + .. [2] Mardia, K. V. and Jupp, P. E. *Directional Statistics*. + John Wiley & Sons, 1999. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import circvar + >>> import matplotlib.pyplot as plt + >>> samples_1 = np.array([0.072, -0.158, 0.077, 0.108, 0.286, + ... 0.133, -0.473, -0.001, -0.348, 0.131]) + >>> samples_2 = np.array([0.111, -0.879, 0.078, 0.733, 0.421, + ... 0.104, -0.136, -0.867, 0.012, 0.105]) + >>> circvar_1 = circvar(samples_1) + >>> circvar_2 = circvar(samples_2) + + Plot the samples. + + >>> fig, (left, right) = plt.subplots(ncols=2) + >>> for image in (left, right): + ... image.plot(np.cos(np.linspace(0, 2*np.pi, 500)), + ... np.sin(np.linspace(0, 2*np.pi, 500)), + ... c='k') + ... image.axis('equal') + ... image.axis('off') + >>> left.scatter(np.cos(samples_1), np.sin(samples_1), c='k', s=15) + >>> left.set_title(f"circular variance: {np.round(circvar_1, 2)!r}") + >>> right.scatter(np.cos(samples_2), np.sin(samples_2), c='k', s=15) + >>> right.set_title(f"circular variance: {np.round(circvar_2, 2)!r}") + >>> plt.show() + + """ + xp = array_namespace(samples) + samples, sin_samp, cos_samp = _circfuncs_common(samples, high, low, xp=xp) + sin_mean = xp.mean(sin_samp, axis=axis) + cos_mean = xp.mean(cos_samp, axis=axis) + hypotenuse = (sin_mean**2. + cos_mean**2.)**0.5 + # hypotenuse can go slightly above 1 due to rounding errors + with np.errstate(invalid='ignore'): + R = xp_minimum(xp.asarray(1.), hypotenuse) + + res = 1. - R + return res + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, default_axis=None, + result_to_tuple=lambda x: (x,) +) +def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate', *, + normalize=False): + r""" + Compute the circular standard deviation of a sample of angle observations. + + Given :math:`n` angle observations :math:`x_1, \cdots, x_n` measured in + radians, their `circular standard deviation` is defined by + ([2]_, Eq. 2.3.11) + + .. math:: + + \sqrt{ -2 \log \left| \frac{1}{n} \sum_{k=1}^n e^{i x_k} \right| } + + where :math:`i` is the imaginary unit and :math:`|z|` gives the length + of the complex number :math:`z`. :math:`|z|` in the above expression + is known as the `mean resultant length`. + + Parameters + ---------- + samples : array_like + Input array of angle observations. The value of a full angle is + equal to ``(high - low)``. + high : float, optional + Upper boundary of the principal value of an angle. Default is ``2*pi``. + low : float, optional + Lower boundary of the principal value of an angle. Default is ``0``. + normalize : boolean, optional + If ``False`` (the default), the return value is computed from the + above formula with the input scaled by ``(2*pi)/(high-low)`` and + the output scaled (back) by ``(high-low)/(2*pi)``. If ``True``, + the output is not scaled and is returned directly. + + Returns + ------- + circstd : float + Circular standard deviation, optionally normalized. + + If the input array is empty, ``np.nan`` is returned. + + See Also + -------- + circmean : Circular mean. + circvar : Circular variance. + + Notes + ----- + In the limit of small angles, the circular standard deviation is close + to the 'linear' standard deviation if ``normalize`` is ``False``. + + References + ---------- + .. [1] Mardia, K. V. (1972). 2. In *Statistics of Directional Data* + (pp. 18-24). Academic Press. :doi:`10.1016/C2013-0-07425-7`. + .. [2] Mardia, K. V. and Jupp, P. E. *Directional Statistics*. + John Wiley & Sons, 1999. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import circstd + >>> import matplotlib.pyplot as plt + >>> samples_1 = np.array([0.072, -0.158, 0.077, 0.108, 0.286, + ... 0.133, -0.473, -0.001, -0.348, 0.131]) + >>> samples_2 = np.array([0.111, -0.879, 0.078, 0.733, 0.421, + ... 0.104, -0.136, -0.867, 0.012, 0.105]) + >>> circstd_1 = circstd(samples_1) + >>> circstd_2 = circstd(samples_2) + + Plot the samples. + + >>> fig, (left, right) = plt.subplots(ncols=2) + >>> for image in (left, right): + ... image.plot(np.cos(np.linspace(0, 2*np.pi, 500)), + ... np.sin(np.linspace(0, 2*np.pi, 500)), + ... c='k') + ... image.axis('equal') + ... image.axis('off') + >>> left.scatter(np.cos(samples_1), np.sin(samples_1), c='k', s=15) + >>> left.set_title(f"circular std: {np.round(circstd_1, 2)!r}") + >>> right.plot(np.cos(np.linspace(0, 2*np.pi, 500)), + ... np.sin(np.linspace(0, 2*np.pi, 500)), + ... c='k') + >>> right.scatter(np.cos(samples_2), np.sin(samples_2), c='k', s=15) + >>> right.set_title(f"circular std: {np.round(circstd_2, 2)!r}") + >>> plt.show() + + """ + xp = array_namespace(samples) + samples, sin_samp, cos_samp = _circfuncs_common(samples, high, low, xp=xp) + sin_mean = xp.mean(sin_samp, axis=axis) # [1] (2.2.3) + cos_mean = xp.mean(cos_samp, axis=axis) # [1] (2.2.3) + hypotenuse = (sin_mean**2. + cos_mean**2.)**0.5 + # hypotenuse can go slightly above 1 due to rounding errors + with np.errstate(invalid='ignore'): + R = xp_minimum(xp.asarray(1.), hypotenuse) # [1] (2.2.4) + + res = xp.sqrt(-2*xp.log(R)) + if not normalize: + res *= (high-low)/(2.*xp.pi) # [1] (2.3.14) w/ (2.3.7) + return res + + +class DirectionalStats: + def __init__(self, mean_direction, mean_resultant_length): + self.mean_direction = mean_direction + self.mean_resultant_length = mean_resultant_length + + def __repr__(self): + return (f"DirectionalStats(mean_direction={self.mean_direction}," + f" mean_resultant_length={self.mean_resultant_length})") + + +def directional_stats(samples, *, axis=0, normalize=True): + """ + Computes sample statistics for directional data. + + Computes the directional mean (also called the mean direction vector) and + mean resultant length of a sample of vectors. + + The directional mean is a measure of "preferred direction" of vector data. + It is analogous to the sample mean, but it is for use when the length of + the data is irrelevant (e.g. unit vectors). + + The mean resultant length is a value between 0 and 1 used to quantify the + dispersion of directional data: the smaller the mean resultant length, the + greater the dispersion. Several definitions of directional variance + involving the mean resultant length are given in [1]_ and [2]_. + + Parameters + ---------- + samples : array_like + Input array. Must be at least two-dimensional, and the last axis of the + input must correspond with the dimensionality of the vector space. + When the input is exactly two dimensional, this means that each row + of the data is a vector observation. + axis : int, default: 0 + Axis along which the directional mean is computed. + normalize: boolean, default: True + If True, normalize the input to ensure that each observation is a + unit vector. It the observations are already unit vectors, consider + setting this to False to avoid unnecessary computation. + + Returns + ------- + res : DirectionalStats + An object containing attributes: + + mean_direction : ndarray + Directional mean. + mean_resultant_length : ndarray + The mean resultant length [1]_. + + See Also + -------- + circmean: circular mean; i.e. directional mean for 2D *angles* + circvar: circular variance; i.e. directional variance for 2D *angles* + + Notes + ----- + This uses a definition of directional mean from [1]_. + Assuming the observations are unit vectors, the calculation is as follows. + + .. code-block:: python + + mean = samples.mean(axis=0) + mean_resultant_length = np.linalg.norm(mean) + mean_direction = mean / mean_resultant_length + + This definition is appropriate for *directional* data (i.e. vector data + for which the magnitude of each observation is irrelevant) but not + for *axial* data (i.e. vector data for which the magnitude and *sign* of + each observation is irrelevant). + + Several definitions of directional variance involving the mean resultant + length ``R`` have been proposed, including ``1 - R`` [1]_, ``1 - R**2`` + [2]_, and ``2 * (1 - R)`` [2]_. Rather than choosing one, this function + returns ``R`` as attribute `mean_resultant_length` so the user can compute + their preferred measure of dispersion. + + References + ---------- + .. [1] Mardia, Jupp. (2000). *Directional Statistics* + (p. 163). Wiley. + + .. [2] https://en.wikipedia.org/wiki/Directional_statistics + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import directional_stats + >>> data = np.array([[3, 4], # first observation, 2D vector space + ... [6, -8]]) # second observation + >>> dirstats = directional_stats(data) + >>> dirstats.mean_direction + array([1., 0.]) + + In contrast, the regular sample mean of the vectors would be influenced + by the magnitude of each observation. Furthermore, the result would not be + a unit vector. + + >>> data.mean(axis=0) + array([4.5, -2.]) + + An exemplary use case for `directional_stats` is to find a *meaningful* + center for a set of observations on a sphere, e.g. geographical locations. + + >>> data = np.array([[0.8660254, 0.5, 0.], + ... [0.8660254, -0.5, 0.]]) + >>> dirstats = directional_stats(data) + >>> dirstats.mean_direction + array([1., 0., 0.]) + + The regular sample mean on the other hand yields a result which does not + lie on the surface of the sphere. + + >>> data.mean(axis=0) + array([0.8660254, 0., 0.]) + + The function also returns the mean resultant length, which + can be used to calculate a directional variance. For example, using the + definition ``Var(z) = 1 - R`` from [2]_ where ``R`` is the + mean resultant length, we can calculate the directional variance of the + vectors in the above example as: + + >>> 1 - dirstats.mean_resultant_length + 0.13397459716167093 + """ + samples = np.asarray(samples) + if samples.ndim < 2: + raise ValueError("samples must at least be two-dimensional. " + f"Instead samples has shape: {samples.shape!r}") + samples = np.moveaxis(samples, axis, 0) + if normalize: + vectornorms = np.linalg.norm(samples, axis=-1, keepdims=True) + samples = samples/vectornorms + mean = np.mean(samples, axis=0) + mean_resultant_length = np.linalg.norm(mean, axis=-1, keepdims=True) + mean_direction = mean / mean_resultant_length + return DirectionalStats(mean_direction, + mean_resultant_length.squeeze(-1)[()]) + + +def false_discovery_control(ps, *, axis=0, method='bh'): + """Adjust p-values to control the false discovery rate. + + The false discovery rate (FDR) is the expected proportion of rejected null + hypotheses that are actually true. + If the null hypothesis is rejected when the *adjusted* p-value falls below + a specified level, the false discovery rate is controlled at that level. + + Parameters + ---------- + ps : 1D array_like + The p-values to adjust. Elements must be real numbers between 0 and 1. + axis : int + The axis along which to perform the adjustment. The adjustment is + performed independently along each axis-slice. If `axis` is None, `ps` + is raveled before performing the adjustment. + method : {'bh', 'by'} + The false discovery rate control procedure to apply: ``'bh'`` is for + Benjamini-Hochberg [1]_ (Eq. 1), ``'by'`` is for Benjaminini-Yekutieli + [2]_ (Theorem 1.3). The latter is more conservative, but it is + guaranteed to control the FDR even when the p-values are not from + independent tests. + + Returns + ------- + ps_adusted : array_like + The adjusted p-values. If the null hypothesis is rejected where these + fall below a specified level, the false discovery rate is controlled + at that level. + + See Also + -------- + combine_pvalues + statsmodels.stats.multitest.multipletests + + Notes + ----- + In multiple hypothesis testing, false discovery control procedures tend to + offer higher power than familywise error rate control procedures (e.g. + Bonferroni correction [1]_). + + If the p-values correspond with independent tests (or tests with + "positive regression dependencies" [2]_), rejecting null hypotheses + corresponding with Benjamini-Hochberg-adjusted p-values below :math:`q` + controls the false discovery rate at a level less than or equal to + :math:`q m_0 / m`, where :math:`m_0` is the number of true null hypotheses + and :math:`m` is the total number of null hypotheses tested. The same is + true even for dependent tests when the p-values are adjusted accorded to + the more conservative Benjaminini-Yekutieli procedure. + + The adjusted p-values produced by this function are comparable to those + produced by the R function ``p.adjust`` and the statsmodels function + `statsmodels.stats.multitest.multipletests`. Please consider the latter + for more advanced methods of multiple comparison correction. + + References + ---------- + .. [1] Benjamini, Yoav, and Yosef Hochberg. "Controlling the false + discovery rate: a practical and powerful approach to multiple + testing." Journal of the Royal statistical society: series B + (Methodological) 57.1 (1995): 289-300. + + .. [2] Benjamini, Yoav, and Daniel Yekutieli. "The control of the false + discovery rate in multiple testing under dependency." Annals of + statistics (2001): 1165-1188. + + .. [3] TileStats. FDR - Benjamini-Hochberg explained - Youtube. + https://www.youtube.com/watch?v=rZKa4tW2NKs. + + .. [4] Neuhaus, Karl-Ludwig, et al. "Improved thrombolysis in acute + myocardial infarction with front-loaded administration of alteplase: + results of the rt-PA-APSAC patency study (TAPS)." Journal of the + American College of Cardiology 19.5 (1992): 885-891. + + Examples + -------- + We follow the example from [1]_. + + Thrombolysis with recombinant tissue-type plasminogen activator (rt-PA) + and anisoylated plasminogen streptokinase activator (APSAC) in + myocardial infarction has been proved to reduce mortality. [4]_ + investigated the effects of a new front-loaded administration of rt-PA + versus those obtained with a standard regimen of APSAC, in a randomized + multicentre trial in 421 patients with acute myocardial infarction. + + There were four families of hypotheses tested in the study, the last of + which was "cardiac and other events after the start of thrombolitic + treatment". FDR control may be desired in this family of hypotheses + because it would not be appropriate to conclude that the front-loaded + treatment is better if it is merely equivalent to the previous treatment. + + The p-values corresponding with the 15 hypotheses in this family were + + >>> ps = [0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, + ... 0.0459, 0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000] + + If the chosen significance level is 0.05, we may be tempted to reject the + null hypotheses for the tests corresponding with the first nine p-values, + as the first nine p-values fall below the chosen significance level. + However, this would ignore the problem of "multiplicity": if we fail to + correct for the fact that multiple comparisons are being performed, we + are more likely to incorrectly reject true null hypotheses. + + One approach to the multiplicity problem is to control the family-wise + error rate (FWER), that is, the rate at which the null hypothesis is + rejected when it is actually true. A common procedure of this kind is the + Bonferroni correction [1]_. We begin by multiplying the p-values by the + number of hypotheses tested. + + >>> import numpy as np + >>> np.array(ps) * len(ps) + array([1.5000e-03, 6.0000e-03, 2.8500e-02, 1.4250e-01, 3.0150e-01, + 4.1700e-01, 4.4700e-01, 5.1600e-01, 6.8850e-01, 4.8600e+00, + 6.3930e+00, 8.5785e+00, 9.7920e+00, 1.1385e+01, 1.5000e+01]) + + To control the FWER at 5%, we reject only the hypotheses corresponding + with adjusted p-values less than 0.05. In this case, only the hypotheses + corresponding with the first three p-values can be rejected. According to + [1]_, these three hypotheses concerned "allergic reaction" and "two + different aspects of bleeding." + + An alternative approach is to control the false discovery rate: the + expected fraction of rejected null hypotheses that are actually true. The + advantage of this approach is that it typically affords greater power: an + increased rate of rejecting the null hypothesis when it is indeed false. To + control the false discovery rate at 5%, we apply the Benjamini-Hochberg + p-value adjustment. + + >>> from scipy import stats + >>> stats.false_discovery_control(ps) + array([0.0015 , 0.003 , 0.0095 , 0.035625 , 0.0603 , + 0.06385714, 0.06385714, 0.0645 , 0.0765 , 0.486 , + 0.58118182, 0.714875 , 0.75323077, 0.81321429, 1. ]) + + Now, the first *four* adjusted p-values fall below 0.05, so we would reject + the null hypotheses corresponding with these *four* p-values. Rejection + of the fourth null hypothesis was particularly important to the original + study as it led to the conclusion that the new treatment had a + "substantially lower in-hospital mortality rate." + + """ + # Input Validation and Special Cases + ps = np.asarray(ps) + + ps_in_range = (np.issubdtype(ps.dtype, np.number) + and np.all(ps == np.clip(ps, 0, 1))) + if not ps_in_range: + raise ValueError("`ps` must include only numbers between 0 and 1.") + + methods = {'bh', 'by'} + if method.lower() not in methods: + raise ValueError(f"Unrecognized `method` '{method}'." + f"Method must be one of {methods}.") + method = method.lower() + + if axis is None: + axis = 0 + ps = ps.ravel() + + axis = np.asarray(axis)[()] + if not np.issubdtype(axis.dtype, np.integer) or axis.size != 1: + raise ValueError("`axis` must be an integer or `None`") + + if ps.size <= 1 or ps.shape[axis] <= 1: + return ps[()] + + ps = np.moveaxis(ps, axis, -1) + m = ps.shape[-1] + + # Main Algorithm + # Equivalent to the ideas of [1] and [2], except that this adjusts the + # p-values as described in [3]. The results are similar to those produced + # by R's p.adjust. + + # "Let [ps] be the ordered observed p-values..." + order = np.argsort(ps, axis=-1) + ps = np.take_along_axis(ps, order, axis=-1) # this copies ps + + # Equation 1 of [1] rearranged to reject when p is less than specified q + i = np.arange(1, m+1) + ps *= m / i + + # Theorem 1.3 of [2] + if method == 'by': + ps *= np.sum(1 / i) + + # accounts for rejecting all null hypotheses i for i < k, where k is + # defined in Eq. 1 of either [1] or [2]. See [3]. Starting with the index j + # of the second to last element, we replace element j with element j+1 if + # the latter is smaller. + np.minimum.accumulate(ps[..., ::-1], out=ps[..., ::-1], axis=-1) + + # Restore original order of axes and data + np.put_along_axis(ps, order, values=ps.copy(), axis=-1) + ps = np.moveaxis(ps, -1, axis) + + return np.clip(ps, 0, 1) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_mstats_basic.py b/parrot/lib/python3.10/site-packages/scipy/stats/_mstats_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..9d07c7f2742824fecb776546c15db4220bb0592e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_mstats_basic.py @@ -0,0 +1,3663 @@ +""" +An extension of scipy.stats._stats_py to support masked arrays + +""" +# Original author (2007): Pierre GF Gerard-Marchant + + +__all__ = ['argstoarray', + 'count_tied_groups', + 'describe', + 'f_oneway', 'find_repeats','friedmanchisquare', + 'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis', + 'ks_twosamp', 'ks_2samp', 'kurtosis', 'kurtosistest', + 'ks_1samp', 'kstest', + 'linregress', + 'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign', + 'normaltest', + 'obrientransform', + 'pearsonr','plotting_positions','pointbiserialr', + 'rankdata', + 'scoreatpercentile','sem', + 'sen_seasonal_slopes','skew','skewtest','spearmanr', + 'siegelslopes', 'theilslopes', + 'tmax','tmean','tmin','trim','trimboth', + 'trimtail','trima','trimr','trimmed_mean','trimmed_std', + 'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp', + 'ttest_ind','ttest_rel','tvar', + 'variation', + 'winsorize', + 'brunnermunzel', + ] + +import numpy as np +from numpy import ndarray +import numpy.ma as ma +from numpy.ma import masked, nomask +import math + +import itertools +import warnings +from collections import namedtuple + +from . import distributions +from scipy._lib._util import _rename_parameter, _contains_nan +from scipy._lib._bunch import _make_tuple_bunch +import scipy.special as special +import scipy.stats._stats_py +import scipy.stats._stats_py as _stats_py + +from ._stats_mstats_common import ( + _find_repeats, + theilslopes as stats_theilslopes, + siegelslopes as stats_siegelslopes + ) + + +def _chk_asarray(a, axis): + # Always returns a masked array, raveled for axis=None + a = ma.asanyarray(a) + if axis is None: + a = ma.ravel(a) + outaxis = 0 + else: + outaxis = axis + return a, outaxis + + +def _chk2_asarray(a, b, axis): + a = ma.asanyarray(a) + b = ma.asanyarray(b) + if axis is None: + a = ma.ravel(a) + b = ma.ravel(b) + outaxis = 0 + else: + outaxis = axis + return a, b, outaxis + + +def _chk_size(a, b): + a = ma.asanyarray(a) + b = ma.asanyarray(b) + (na, nb) = (a.size, b.size) + if na != nb: + raise ValueError("The size of the input array should match!" + f" ({na} <> {nb})") + return (a, b, na) + + +def _ttest_finish(df, t, alternative): + """Common code between all 3 t-test functions.""" + # We use ``stdtr`` directly here to preserve masked arrays + + if alternative == 'less': + pval = special.stdtr(df, t) + elif alternative == 'greater': + pval = special.stdtr(df, -t) + elif alternative == 'two-sided': + pval = special.stdtr(df, -np.abs(t))*2 + else: + raise ValueError("alternative must be " + "'less', 'greater' or 'two-sided'") + + if t.ndim == 0: + t = t[()] + if pval.ndim == 0: + pval = pval[()] + + return t, pval + + +def argstoarray(*args): + """ + Constructs a 2D array from a group of sequences. + + Sequences are filled with missing values to match the length of the longest + sequence. + + Parameters + ---------- + *args : sequences + Group of sequences. + + Returns + ------- + argstoarray : MaskedArray + A ( `m` x `n` ) masked array, where `m` is the number of arguments and + `n` the length of the longest argument. + + Notes + ----- + `numpy.ma.vstack` has identical behavior, but is called with a sequence + of sequences. + + Examples + -------- + A 2D masked array constructed from a group of sequences is returned. + + >>> from scipy.stats.mstats import argstoarray + >>> argstoarray([1, 2, 3], [4, 5, 6]) + masked_array( + data=[[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]], + mask=[[False, False, False], + [False, False, False]], + fill_value=1e+20) + + The returned masked array filled with missing values when the lengths of + sequences are different. + + >>> argstoarray([1, 3], [4, 5, 6]) + masked_array( + data=[[1.0, 3.0, --], + [4.0, 5.0, 6.0]], + mask=[[False, False, True], + [False, False, False]], + fill_value=1e+20) + + """ + if len(args) == 1 and not isinstance(args[0], ndarray): + output = ma.asarray(args[0]) + if output.ndim != 2: + raise ValueError("The input should be 2D") + else: + n = len(args) + m = max([len(k) for k in args]) + output = ma.array(np.empty((n,m), dtype=float), mask=True) + for (k,v) in enumerate(args): + output[k,:len(v)] = v + + output[np.logical_not(np.isfinite(output._data))] = masked + return output + + +def find_repeats(arr): + """Find repeats in arr and return a tuple (repeats, repeat_count). + + The input is cast to float64. Masked values are discarded. + + Parameters + ---------- + arr : sequence + Input array. The array is flattened if it is not 1D. + + Returns + ------- + repeats : ndarray + Array of repeated values. + counts : ndarray + Array of counts. + + Examples + -------- + >>> from scipy.stats import mstats + >>> mstats.find_repeats([2, 1, 2, 3, 2, 2, 5]) + (array([2.]), array([4])) + + In the above example, 2 repeats 4 times. + + >>> mstats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]]) + (array([4., 5.]), array([2, 2])) + + In the above example, both 4 and 5 repeat 2 times. + + """ + # Make sure we get a copy. ma.compressed promises a "new array", but can + # actually return a reference. + compr = np.asarray(ma.compressed(arr), dtype=np.float64) + try: + need_copy = np.may_share_memory(compr, arr) + except AttributeError: + # numpy < 1.8.2 bug: np.may_share_memory([], []) raises, + # while in numpy 1.8.2 and above it just (correctly) returns False. + need_copy = False + if need_copy: + compr = compr.copy() + return _find_repeats(compr) + + +def count_tied_groups(x, use_missing=False): + """ + Counts the number of tied values. + + Parameters + ---------- + x : sequence + Sequence of data on which to counts the ties + use_missing : bool, optional + Whether to consider missing values as tied. + + Returns + ------- + count_tied_groups : dict + Returns a dictionary (nb of ties: nb of groups). + + Examples + -------- + >>> from scipy.stats import mstats + >>> import numpy as np + >>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6] + >>> mstats.count_tied_groups(z) + {2: 1, 3: 2} + + In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x). + + >>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6]) + >>> mstats.count_tied_groups(z) + {2: 2, 3: 1} + >>> z[[1,-1]] = np.ma.masked + >>> mstats.count_tied_groups(z, use_missing=True) + {2: 2, 3: 1} + + """ + nmasked = ma.getmask(x).sum() + # We need the copy as find_repeats will overwrite the initial data + data = ma.compressed(x).copy() + (ties, counts) = find_repeats(data) + nties = {} + if len(ties): + nties = dict(zip(np.unique(counts), itertools.repeat(1))) + nties.update(dict(zip(*find_repeats(counts)))) + + if nmasked and use_missing: + try: + nties[nmasked] += 1 + except KeyError: + nties[nmasked] = 1 + + return nties + + +def rankdata(data, axis=None, use_missing=False): + """Returns the rank (also known as order statistics) of each data point + along the given axis. + + If some values are tied, their rank is averaged. + If some values are masked, their rank is set to 0 if use_missing is False, + or set to the average rank of the unmasked values if use_missing is True. + + Parameters + ---------- + data : sequence + Input data. The data is transformed to a masked array + axis : {None,int}, optional + Axis along which to perform the ranking. + If None, the array is first flattened. An exception is raised if + the axis is specified for arrays with a dimension larger than 2 + use_missing : bool, optional + Whether the masked values have a rank of 0 (False) or equal to the + average rank of the unmasked values (True). + + """ + def _rank1d(data, use_missing=False): + n = data.count() + rk = np.empty(data.size, dtype=float) + idx = data.argsort() + rk[idx[:n]] = np.arange(1,n+1) + + if use_missing: + rk[idx[n:]] = (n+1)/2. + else: + rk[idx[n:]] = 0 + + repeats = find_repeats(data.copy()) + for r in repeats[0]: + condition = (data == r).filled(False) + rk[condition] = rk[condition].mean() + return rk + + data = ma.array(data, copy=False) + if axis is None: + if data.ndim > 1: + return _rank1d(data.ravel(), use_missing).reshape(data.shape) + else: + return _rank1d(data, use_missing) + else: + return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray) + + +ModeResult = namedtuple('ModeResult', ('mode', 'count')) + + +def mode(a, axis=0): + """ + Returns an array of the modal (most common) value in the passed array. + + Parameters + ---------- + a : array_like + n-dimensional array of which to find mode(s). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + mode : ndarray + Array of modal values. + count : ndarray + Array of counts for each mode. + + Notes + ----- + For more details, see `scipy.stats.mode`. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> from scipy.stats import mstats + >>> m_arr = np.ma.array([1, 1, 0, 0, 0, 0], mask=[0, 0, 1, 1, 1, 0]) + >>> mstats.mode(m_arr) # note that most zeros are masked + ModeResult(mode=array([1.]), count=array([2.])) + + """ + return _mode(a, axis=axis, keepdims=True) + + +def _mode(a, axis=0, keepdims=True): + # Don't want to expose `keepdims` from the public `mstats.mode` + a, axis = _chk_asarray(a, axis) + + def _mode1D(a): + (rep,cnt) = find_repeats(a) + if not cnt.ndim: + return (0, 0) + elif cnt.size: + return (rep[cnt.argmax()], cnt.max()) + else: + return (a.min(), 1) + + if axis is None: + output = _mode1D(ma.ravel(a)) + output = (ma.array(output[0]), ma.array(output[1])) + else: + output = ma.apply_along_axis(_mode1D, axis, a) + if keepdims is None or keepdims: + newshape = list(a.shape) + newshape[axis] = 1 + slices = [slice(None)] * output.ndim + slices[axis] = 0 + modes = output[tuple(slices)].reshape(newshape) + slices[axis] = 1 + counts = output[tuple(slices)].reshape(newshape) + output = (modes, counts) + else: + output = np.moveaxis(output, axis, 0) + + return ModeResult(*output) + + +def _betai(a, b, x): + x = np.asanyarray(x) + x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0 + return special.betainc(a, b, x) + + +def msign(x): + """Returns the sign of x, or 0 if x is masked.""" + return ma.filled(np.sign(x), 0) + + +def pearsonr(x, y): + r""" + Pearson correlation coefficient and p-value for testing non-correlation. + + The Pearson correlation coefficient [1]_ measures the linear relationship + between two datasets. The calculation of the p-value relies on the + assumption that each dataset is normally distributed. (See Kowalski [3]_ + for a discussion of the effects of non-normality of the input on the + distribution of the correlation coefficient.) Like other correlation + coefficients, this one varies between -1 and +1 with 0 implying no + correlation. Correlations of -1 or +1 imply an exact linear relationship. + + Parameters + ---------- + x : (N,) array_like + Input array. + y : (N,) array_like + Input array. + + Returns + ------- + r : float + Pearson's correlation coefficient. + p-value : float + Two-tailed p-value. + + Warns + ----- + `~scipy.stats.ConstantInputWarning` + Raised if an input is a constant array. The correlation coefficient + is not defined in this case, so ``np.nan`` is returned. + + `~scipy.stats.NearConstantInputWarning` + Raised if an input is "nearly" constant. The array ``x`` is considered + nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``. + Numerical errors in the calculation ``x - mean(x)`` in this case might + result in an inaccurate calculation of r. + + See Also + -------- + spearmanr : Spearman rank-order correlation coefficient. + kendalltau : Kendall's tau, a correlation measure for ordinal data. + + Notes + ----- + The correlation coefficient is calculated as follows: + + .. math:: + + r = \frac{\sum (x - m_x) (y - m_y)} + {\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}} + + where :math:`m_x` is the mean of the vector x and :math:`m_y` is + the mean of the vector y. + + Under the assumption that x and y are drawn from + independent normal distributions (so the population correlation coefficient + is 0), the probability density function of the sample correlation + coefficient r is ([1]_, [2]_): + + .. math:: + + f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)} + + where n is the number of samples, and B is the beta function. This + is sometimes referred to as the exact distribution of r. This is + the distribution that is used in `pearsonr` to compute the p-value. + The distribution is a beta distribution on the interval [-1, 1], + with equal shape parameters a = b = n/2 - 1. In terms of SciPy's + implementation of the beta distribution, the distribution of r is:: + + dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2) + + The p-value returned by `pearsonr` is a two-sided p-value. The p-value + roughly indicates the probability of an uncorrelated system + producing datasets that have a Pearson correlation at least as extreme + as the one computed from these datasets. More precisely, for a + given sample with correlation coefficient r, the p-value is + the probability that abs(r') of a random sample x' and y' drawn from + the population with zero correlation would be greater than or equal + to abs(r). In terms of the object ``dist`` shown above, the p-value + for a given r and length n can be computed as:: + + p = 2*dist.cdf(-abs(r)) + + When n is 2, the above continuous distribution is not well-defined. + One can interpret the limit of the beta distribution as the shape + parameters a and b approach a = b = 0 as a discrete distribution with + equal probability masses at r = 1 and r = -1. More directly, one + can observe that, given the data x = [x1, x2] and y = [y1, y2], and + assuming x1 != x2 and y1 != y2, the only possible values for r are 1 + and -1. Because abs(r') for any sample x' and y' with length 2 will + be 1, the two-sided p-value for a sample of length 2 is always 1. + + References + ---------- + .. [1] "Pearson correlation coefficient", Wikipedia, + https://en.wikipedia.org/wiki/Pearson_correlation_coefficient + .. [2] Student, "Probable error of a correlation coefficient", + Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310. + .. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution + of the Sample Product-Moment Correlation Coefficient" + Journal of the Royal Statistical Society. Series C (Applied + Statistics), Vol. 21, No. 1 (1972), pp. 1-12. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> from scipy.stats import mstats + >>> mstats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4]) + (-0.7426106572325057, 0.1505558088534455) + + There is a linear dependence between x and y if y = a + b*x + e, where + a,b are constants and e is a random error term, assumed to be independent + of x. For simplicity, assume that x is standard normal, a=0, b=1 and let + e follow a normal distribution with mean zero and standard deviation s>0. + + >>> s = 0.5 + >>> x = stats.norm.rvs(size=500) + >>> e = stats.norm.rvs(scale=s, size=500) + >>> y = x + e + >>> mstats.pearsonr(x, y) + (0.9029601878969703, 8.428978827629898e-185) # may vary + + This should be close to the exact value given by + + >>> 1/np.sqrt(1 + s**2) + 0.8944271909999159 + + For s=0.5, we observe a high level of correlation. In general, a large + variance of the noise reduces the correlation, while the correlation + approaches one as the variance of the error goes to zero. + + It is important to keep in mind that no correlation does not imply + independence unless (x, y) is jointly normal. Correlation can even be zero + when there is a very simple dependence structure: if X follows a + standard normal distribution, let y = abs(x). Note that the correlation + between x and y is zero. Indeed, since the expectation of x is zero, + cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero + by symmetry. The following lines of code illustrate this observation: + + >>> y = np.abs(x) + >>> mstats.pearsonr(x, y) + (-0.016172891856853524, 0.7182823678751942) # may vary + + A non-zero correlation coefficient can be misleading. For example, if X has + a standard normal distribution, define y = x if x < 0 and y = 0 otherwise. + A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797..., + implying a high level of correlation: + + >>> y = np.where(x < 0, x, 0) + >>> mstats.pearsonr(x, y) + (0.8537091583771509, 3.183461621422181e-143) # may vary + + This is unintuitive since there is no dependence of x and y if x is larger + than zero which happens in about half of the cases if we sample x and y. + """ + (x, y, n) = _chk_size(x, y) + (x, y) = (x.ravel(), y.ravel()) + # Get the common mask and the total nb of unmasked elements + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + n -= m.sum() + df = n-2 + if df < 0: + return (masked, masked) + + return scipy.stats._stats_py.pearsonr( + ma.masked_array(x, mask=m).compressed(), + ma.masked_array(y, mask=m).compressed()) + + +def spearmanr(x, y=None, use_ties=True, axis=None, nan_policy='propagate', + alternative='two-sided'): + """ + Calculates a Spearman rank-order correlation coefficient and the p-value + to test for non-correlation. + + The Spearman correlation is a nonparametric measure of the linear + relationship between two datasets. Unlike the Pearson correlation, the + Spearman correlation does not assume that both datasets are normally + distributed. Like other correlation coefficients, this one varies + between -1 and +1 with 0 implying no correlation. Correlations of -1 or + +1 imply a monotonic relationship. Positive correlations imply that + as `x` increases, so does `y`. Negative correlations imply that as `x` + increases, `y` decreases. + + Missing values are discarded pair-wise: if a value is missing in `x`, the + corresponding value in `y` is masked. + + The p-value roughly indicates the probability of an uncorrelated system + producing datasets that have a Spearman correlation at least as extreme + as the one computed from these datasets. The p-values are not entirely + reliable but are probably reasonable for datasets larger than 500 or so. + + Parameters + ---------- + x, y : 1D or 2D array_like, y is optional + One or two 1-D or 2-D arrays containing multiple variables and + observations. When these are 1-D, each represents a vector of + observations of a single variable. For the behavior in the 2-D case, + see under ``axis``, below. + use_ties : bool, optional + DO NOT USE. Does not do anything, keyword is only left in place for + backwards compatibility reasons. + axis : int or None, optional + If axis=0 (default), then each column represents a variable, with + observations in the rows. If axis=1, the relationship is transposed: + each row represents a variable, while the columns contain observations. + If axis=None, then both arrays will be raveled. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the correlation is nonzero + * 'less': the correlation is negative (less than zero) + * 'greater': the correlation is positive (greater than zero) + + .. versionadded:: 1.7.0 + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float or ndarray (2-D square) + Spearman correlation matrix or correlation coefficient (if only 2 + variables are given as parameters). Correlation matrix is square + with length equal to total number of variables (columns or rows) in + ``a`` and ``b`` combined. + pvalue : float + The p-value for a hypothesis test whose null hypothesis + is that two sets of data are linearly uncorrelated. See + `alternative` above for alternative hypotheses. `pvalue` has the + same shape as `statistic`. + + References + ---------- + [CRCProbStat2000] section 14.7 + + """ + if not use_ties: + raise ValueError("`use_ties=False` is not supported in SciPy >= 1.2.0") + + # Always returns a masked array, raveled if axis=None + x, axisout = _chk_asarray(x, axis) + if y is not None: + # Deal only with 2-D `x` case. + y, _ = _chk_asarray(y, axis) + if axisout == 0: + x = ma.column_stack((x, y)) + else: + x = ma.vstack((x, y)) + + if axisout == 1: + # To simplify the code that follow (always use `n_obs, n_vars` shape) + x = x.T + + if nan_policy == 'omit': + x = ma.masked_invalid(x) + + def _spearmanr_2cols(x): + # Mask the same observations for all variables, and then drop those + # observations (can't leave them masked, rankdata is weird). + x = ma.mask_rowcols(x, axis=0) + x = x[~x.mask.any(axis=1), :] + + # If either column is entirely NaN or Inf + if not np.any(x.data): + res = scipy.stats._stats_py.SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + m = ma.getmask(x) + n_obs = x.shape[0] + dof = n_obs - 2 - int(m.sum(axis=0)[0]) + if dof < 0: + raise ValueError("The input must have at least 3 entries!") + + # Gets the ranks and rank differences + x_ranked = rankdata(x, axis=0) + rs = ma.corrcoef(x_ranked, rowvar=False).data + + # rs can have elements equal to 1, so avoid zero division warnings + with np.errstate(divide='ignore'): + # clip the small negative values possibly caused by rounding + # errors before taking the square root + t = rs * np.sqrt((dof / ((rs+1.0) * (1.0-rs))).clip(0)) + + t, prob = _ttest_finish(dof, t, alternative) + + # For backwards compatibility, return scalars when comparing 2 columns + if rs.shape == (2, 2): + res = scipy.stats._stats_py.SignificanceResult(rs[1, 0], + prob[1, 0]) + res.correlation = rs[1, 0] + return res + else: + res = scipy.stats._stats_py.SignificanceResult(rs, prob) + res.correlation = rs + return res + + # Need to do this per pair of variables, otherwise the dropped observations + # in a third column mess up the result for a pair. + n_vars = x.shape[1] + if n_vars == 2: + return _spearmanr_2cols(x) + else: + rs = np.ones((n_vars, n_vars), dtype=float) + prob = np.zeros((n_vars, n_vars), dtype=float) + for var1 in range(n_vars - 1): + for var2 in range(var1+1, n_vars): + result = _spearmanr_2cols(x[:, [var1, var2]]) + rs[var1, var2] = result.correlation + rs[var2, var1] = result.correlation + prob[var1, var2] = result.pvalue + prob[var2, var1] = result.pvalue + + res = scipy.stats._stats_py.SignificanceResult(rs, prob) + res.correlation = rs + return res + + +def _kendall_p_exact(n, c, alternative='two-sided'): + + # Use the fact that distribution is symmetric: always calculate a CDF in + # the left tail. + # This will be the one-sided p-value if `c` is on the side of + # the null distribution predicted by the alternative hypothesis. + # The two-sided p-value will be twice this value. + # If `c` is on the other side of the null distribution, we'll need to + # take the complement and add back the probability mass at `c`. + in_right_tail = (c >= (n*(n-1))//2 - c) + alternative_greater = (alternative == 'greater') + c = int(min(c, (n*(n-1))//2 - c)) + + # Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods" + # (4th Edition), Charles Griffin & Co., 1970. + if n <= 0: + raise ValueError(f'n ({n}) must be positive') + elif c < 0 or 4*c > n*(n-1): + raise ValueError(f'c ({c}) must satisfy 0 <= 4c <= n(n-1) = {n*(n-1)}.') + elif n == 1: + prob = 1.0 + p_mass_at_c = 1 + elif n == 2: + prob = 1.0 + p_mass_at_c = 0.5 + elif c == 0: + prob = 2.0/math.factorial(n) if n < 171 else 0.0 + p_mass_at_c = prob/2 + elif c == 1: + prob = 2.0/math.factorial(n-1) if n < 172 else 0.0 + p_mass_at_c = (n-1)/math.factorial(n) + elif 4*c == n*(n-1) and alternative == 'two-sided': + # I'm sure there's a simple formula for p_mass_at_c in this + # case, but I don't know it. Use generic formula for one-sided p-value. + prob = 1.0 + elif n < 171: + new = np.zeros(c+1) + new[0:2] = 1.0 + for j in range(3,n+1): + new = np.cumsum(new) + if j <= c: + new[j:] -= new[:c+1-j] + prob = 2.0*np.sum(new)/math.factorial(n) + p_mass_at_c = new[-1]/math.factorial(n) + else: + new = np.zeros(c+1) + new[0:2] = 1.0 + for j in range(3, n+1): + new = np.cumsum(new)/j + if j <= c: + new[j:] -= new[:c+1-j] + prob = np.sum(new) + p_mass_at_c = new[-1]/2 + + if alternative != 'two-sided': + # if the alternative hypothesis and alternative agree, + # one-sided p-value is half the two-sided p-value + if in_right_tail == alternative_greater: + prob /= 2 + else: + prob = 1 - prob/2 + p_mass_at_c + + prob = np.clip(prob, 0, 1) + + return prob + + +def kendalltau(x, y, use_ties=True, use_missing=False, method='auto', + alternative='two-sided'): + """ + Computes Kendall's rank correlation tau on two variables *x* and *y*. + + Parameters + ---------- + x : sequence + First data list (for example, time). + y : sequence + Second data list. + use_ties : {True, False}, optional + Whether ties correction should be performed. + use_missing : {False, True}, optional + Whether missing data should be allocated a rank of 0 (False) or the + average rank (True) + method : {'auto', 'asymptotic', 'exact'}, optional + Defines which method is used to calculate the p-value [1]_. + 'asymptotic' uses a normal approximation valid for large samples. + 'exact' computes the exact p-value, but can only be used if no ties + are present. As the sample size increases, the 'exact' computation + time may grow and the result may lose some precision. + 'auto' is the default and selects the appropriate + method based on a trade-off between speed and accuracy. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the rank correlation is nonzero + * 'less': the rank correlation is negative (less than zero) + * 'greater': the rank correlation is positive (greater than zero) + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float + The tau statistic. + pvalue : float + The p-value for a hypothesis test whose null hypothesis is + an absence of association, tau = 0. + + References + ---------- + .. [1] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), + Charles Griffin & Co., 1970. + + """ + (x, y, n) = _chk_size(x, y) + (x, y) = (x.flatten(), y.flatten()) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + if m is not nomask: + x = ma.array(x, mask=m, copy=True) + y = ma.array(y, mask=m, copy=True) + # need int() here, otherwise numpy defaults to 32 bit + # integer on all Windows architectures, causing overflow. + # int() will keep it infinite precision. + n -= int(m.sum()) + + if n < 2: + res = scipy.stats._stats_py.SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0) + ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0) + idx = rx.argsort() + (rx, ry) = (rx[idx], ry[idx]) + C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum() + for i in range(len(ry)-1)], dtype=float) + D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum() + for i in range(len(ry)-1)], dtype=float) + xties = count_tied_groups(x) + yties = count_tied_groups(y) + if use_ties: + corr_x = np.sum([v*k*(k-1) for (k,v) in xties.items()], dtype=float) + corr_y = np.sum([v*k*(k-1) for (k,v) in yties.items()], dtype=float) + denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.) + else: + denom = n*(n-1)/2. + tau = (C-D) / denom + + if method == 'exact' and (xties or yties): + raise ValueError("Ties found, exact method cannot be used.") + + if method == 'auto': + if (not xties and not yties) and (n <= 33 or min(C, n*(n-1)/2.0-C) <= 1): + method = 'exact' + else: + method = 'asymptotic' + + if not xties and not yties and method == 'exact': + prob = _kendall_p_exact(n, C, alternative) + + elif method == 'asymptotic': + var_s = n*(n-1)*(2*n+5) + if use_ties: + var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in xties.items()]) + var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in yties.items()]) + v1 = (np.sum([v*k*(k-1) for (k, v) in xties.items()], dtype=float) * + np.sum([v*k*(k-1) for (k, v) in yties.items()], dtype=float)) + v1 /= 2.*n*(n-1) + if n > 2: + v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in xties.items()], + dtype=float) * \ + np.sum([v*k*(k-1)*(k-2) for (k,v) in yties.items()], + dtype=float) + v2 /= 9.*n*(n-1)*(n-2) + else: + v2 = 0 + else: + v1 = v2 = 0 + + var_s /= 18. + var_s += (v1 + v2) + z = (C-D)/np.sqrt(var_s) + prob = scipy.stats._stats_py._get_pvalue(z, distributions.norm, alternative) + else: + raise ValueError("Unknown method "+str(method)+" specified, please " + "use auto, exact or asymptotic.") + + res = scipy.stats._stats_py.SignificanceResult(tau[()], prob[()]) + res.correlation = tau + return res + + +def kendalltau_seasonal(x): + """ + Computes a multivariate Kendall's rank correlation tau, for seasonal data. + + Parameters + ---------- + x : 2-D ndarray + Array of seasonal data, with seasons in columns. + + """ + x = ma.array(x, subok=True, copy=False, ndmin=2) + (n,m) = x.shape + n_p = x.count(0) + + S_szn = sum(msign(x[i:]-x[i]).sum(0) for i in range(n)) + S_tot = S_szn.sum() + + n_tot = x.count() + ties = count_tied_groups(x.compressed()) + corr_ties = sum(v*k*(k-1) for (k,v) in ties.items()) + denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2. + + R = rankdata(x, axis=0, use_missing=True) + K = ma.empty((m,m), dtype=int) + covmat = ma.empty((m,m), dtype=float) + denom_szn = ma.empty(m, dtype=float) + for j in range(m): + ties_j = count_tied_groups(x[:,j].compressed()) + corr_j = sum(v*k*(k-1) for (k,v) in ties_j.items()) + cmb = n_p[j]*(n_p[j]-1) + for k in range(j,m,1): + K[j,k] = sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum() + for i in range(n)) + covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() - + n*(n_p[j]+1)*(n_p[k]+1))/3. + K[k,j] = K[j,k] + covmat[k,j] = covmat[j,k] + + denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2. + + var_szn = covmat.diagonal() + + z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn) + z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum()) + z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum()) + + prob_szn = special.erfc(abs(z_szn.data)/np.sqrt(2)) + prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2)) + prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2)) + + chi2_tot = (z_szn*z_szn).sum() + chi2_trd = m * z_szn.mean()**2 + output = {'seasonal tau': S_szn/denom_szn, + 'global tau': S_tot/denom_tot, + 'global tau (alt)': S_tot/denom_szn.sum(), + 'seasonal p-value': prob_szn, + 'global p-value (indep)': prob_tot_ind, + 'global p-value (dep)': prob_tot_dep, + 'chi2 total': chi2_tot, + 'chi2 trend': chi2_trd, + } + return output + + +PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation', + 'pvalue')) + + +def pointbiserialr(x, y): + """Calculates a point biserial correlation coefficient and its p-value. + + Parameters + ---------- + x : array_like of bools + Input array. + y : array_like + Input array. + + Returns + ------- + correlation : float + R value + pvalue : float + 2-tailed p-value + + Notes + ----- + Missing values are considered pair-wise: if a value is missing in x, + the corresponding value in y is masked. + + For more details on `pointbiserialr`, see `scipy.stats.pointbiserialr`. + + """ + x = ma.fix_invalid(x, copy=True).astype(bool) + y = ma.fix_invalid(y, copy=True).astype(float) + # Get rid of the missing data + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + if m is not nomask: + unmask = np.logical_not(m) + x = x[unmask] + y = y[unmask] + + n = len(x) + # phat is the fraction of x values that are True + phat = x.sum() / float(n) + y0 = y[~x] # y-values where x is False + y1 = y[x] # y-values where x is True + y0m = y0.mean() + y1m = y1.mean() + + rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std() + + df = n-2 + t = rpb*ma.sqrt(df/(1.0-rpb**2)) + prob = _betai(0.5*df, 0.5, df/(df+t*t)) + + return PointbiserialrResult(rpb, prob) + + +def linregress(x, y=None): + r""" + Calculate a linear least-squares regression for two sets of measurements. + + Parameters + ---------- + x, y : array_like + Two sets of measurements. Both arrays should have the same length N. If + only `x` is given (and ``y=None``), then it must be a two-dimensional + array where one dimension has length 2. The two sets of measurements + are then found by splitting the array along the length-2 dimension. In + the case where ``y=None`` and `x` is a 2xN array, ``linregress(x)`` is + equivalent to ``linregress(x[0], x[1])``. + + Returns + ------- + result : ``LinregressResult`` instance + The return value is an object with the following attributes: + + slope : float + Slope of the regression line. + intercept : float + Intercept of the regression line. + rvalue : float + The Pearson correlation coefficient. The square of ``rvalue`` + is equal to the coefficient of determination. + pvalue : float + The p-value for a hypothesis test whose null hypothesis is + that the slope is zero, using Wald Test with t-distribution of + the test statistic. See `alternative` above for alternative + hypotheses. + stderr : float + Standard error of the estimated slope (gradient), under the + assumption of residual normality. + intercept_stderr : float + Standard error of the estimated intercept, under the assumption + of residual normality. + + See Also + -------- + scipy.optimize.curve_fit : + Use non-linear least squares to fit a function to data. + scipy.optimize.leastsq : + Minimize the sum of squares of a set of equations. + + Notes + ----- + Missing values are considered pair-wise: if a value is missing in `x`, + the corresponding value in `y` is masked. + + For compatibility with older versions of SciPy, the return value acts + like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``, + ``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write:: + + slope, intercept, r, p, se = linregress(x, y) + + With that style, however, the standard error of the intercept is not + available. To have access to all the computed values, including the + standard error of the intercept, use the return value as an object + with attributes, e.g.:: + + result = linregress(x, y) + print(result.intercept, result.intercept_stderr) + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import stats + >>> rng = np.random.default_rng() + + Generate some data: + + >>> x = rng.random(10) + >>> y = 1.6*x + rng.random(10) + + Perform the linear regression: + + >>> res = stats.mstats.linregress(x, y) + + Coefficient of determination (R-squared): + + >>> print(f"R-squared: {res.rvalue**2:.6f}") + R-squared: 0.717533 + + Plot the data along with the fitted line: + + >>> plt.plot(x, y, 'o', label='original data') + >>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line') + >>> plt.legend() + >>> plt.show() + + Calculate 95% confidence interval on slope and intercept: + + >>> # Two-sided inverse Students t-distribution + >>> # p - probability, df - degrees of freedom + >>> from scipy.stats import t + >>> tinv = lambda p, df: abs(t.ppf(p/2, df)) + + >>> ts = tinv(0.05, len(x)-2) + >>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}") + slope (95%): 1.453392 +/- 0.743465 + >>> print(f"intercept (95%): {res.intercept:.6f}" + ... f" +/- {ts*res.intercept_stderr:.6f}") + intercept (95%): 0.616950 +/- 0.544475 + + """ + if y is None: + x = ma.array(x) + if x.shape[0] == 2: + x, y = x + elif x.shape[1] == 2: + x, y = x.T + else: + raise ValueError("If only `x` is given as input, " + "it has to be of shape (2, N) or (N, 2), " + f"provided shape was {x.shape}") + else: + x = ma.array(x) + y = ma.array(y) + + x = x.flatten() + y = y.flatten() + + if np.amax(x) == np.amin(x) and len(x) > 1: + raise ValueError("Cannot calculate a linear regression " + "if all x values are identical") + + m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False) + if m is not nomask: + x = ma.array(x, mask=m) + y = ma.array(y, mask=m) + if np.any(~m): + result = _stats_py.linregress(x.data[~m], y.data[~m]) + else: + # All data is masked + result = _stats_py.LinregressResult(slope=None, intercept=None, + rvalue=None, pvalue=None, + stderr=None, + intercept_stderr=None) + else: + result = _stats_py.linregress(x.data, y.data) + + return result + + +def theilslopes(y, x=None, alpha=0.95, method='separate'): + r""" + Computes the Theil-Sen estimator for a set of points (x, y). + + `theilslopes` implements a method for robust linear regression. It + computes the slope as the median of all slopes between paired values. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + alpha : float, optional + Confidence degree between 0 and 1. Default is 95% confidence. + Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are + interpreted as "find the 90% confidence interval". + method : {'joint', 'separate'}, optional + Method to be used for computing estimate for intercept. + Following methods are supported, + + * 'joint': Uses np.median(y - slope * x) as intercept. + * 'separate': Uses np.median(y) - slope * np.median(x) + as intercept. + + The default is 'separate'. + + .. versionadded:: 1.8.0 + + Returns + ------- + result : ``TheilslopesResult`` instance + The return value is an object with the following attributes: + + slope : float + Theil slope. + intercept : float + Intercept of the Theil line. + low_slope : float + Lower bound of the confidence interval on `slope`. + high_slope : float + Upper bound of the confidence interval on `slope`. + + See Also + -------- + siegelslopes : a similar technique using repeated medians + + + Notes + ----- + For more details on `theilslopes`, see `scipy.stats.theilslopes`. + + """ + y = ma.asarray(y).flatten() + if x is None: + x = ma.arange(len(y), dtype=float) + else: + x = ma.asarray(x).flatten() + if len(x) != len(y): + raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})") + + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + y._mask = x._mask = m + # Disregard any masked elements of x or y + y = y.compressed() + x = x.compressed().astype(float) + # We now have unmasked arrays so can use `scipy.stats.theilslopes` + return stats_theilslopes(y, x, alpha=alpha, method=method) + + +def siegelslopes(y, x=None, method="hierarchical"): + r""" + Computes the Siegel estimator for a set of points (x, y). + + `siegelslopes` implements a method for robust linear regression + using repeated medians to fit a line to the points (x, y). + The method is robust to outliers with an asymptotic breakdown point + of 50%. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + method : {'hierarchical', 'separate'} + If 'hierarchical', estimate the intercept using the estimated + slope ``slope`` (default option). + If 'separate', estimate the intercept independent of the estimated + slope. See Notes for details. + + Returns + ------- + result : ``SiegelslopesResult`` instance + The return value is an object with the following attributes: + + slope : float + Estimate of the slope of the regression line. + intercept : float + Estimate of the intercept of the regression line. + + See Also + -------- + theilslopes : a similar technique without repeated medians + + Notes + ----- + For more details on `siegelslopes`, see `scipy.stats.siegelslopes`. + + """ + y = ma.asarray(y).ravel() + if x is None: + x = ma.arange(len(y), dtype=float) + else: + x = ma.asarray(x).ravel() + if len(x) != len(y): + raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})") + + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + y._mask = x._mask = m + # Disregard any masked elements of x or y + y = y.compressed() + x = x.compressed().astype(float) + # We now have unmasked arrays so can use `scipy.stats.siegelslopes` + return stats_siegelslopes(y, x, method=method) + + +SenSeasonalSlopesResult = _make_tuple_bunch('SenSeasonalSlopesResult', + ['intra_slope', 'inter_slope']) + + +def sen_seasonal_slopes(x): + r""" + Computes seasonal Theil-Sen and Kendall slope estimators. + + The seasonal generalization of Sen's slope computes the slopes between all + pairs of values within a "season" (column) of a 2D array. It returns an + array containing the median of these "within-season" slopes for each + season (the Theil-Sen slope estimator of each season), and it returns the + median of the within-season slopes across all seasons (the seasonal Kendall + slope estimator). + + Parameters + ---------- + x : 2D array_like + Each column of `x` contains measurements of the dependent variable + within a season. The independent variable (usually time) of each season + is assumed to be ``np.arange(x.shape[0])``. + + Returns + ------- + result : ``SenSeasonalSlopesResult`` instance + The return value is an object with the following attributes: + + intra_slope : ndarray + For each season, the Theil-Sen slope estimator: the median of + within-season slopes. + inter_slope : float + The seasonal Kendall slope estimateor: the median of within-season + slopes *across all* seasons. + + See Also + -------- + theilslopes : the analogous function for non-seasonal data + scipy.stats.theilslopes : non-seasonal slopes for non-masked arrays + + Notes + ----- + The slopes :math:`d_{ijk}` within season :math:`i` are: + + .. math:: + + d_{ijk} = \frac{x_{ij} - x_{ik}} + {j - k} + + for pairs of distinct integer indices :math:`j, k` of :math:`x`. + + Element :math:`i` of the returned `intra_slope` array is the median of the + :math:`d_{ijk}` over all :math:`j < k`; this is the Theil-Sen slope + estimator of season :math:`i`. The returned `inter_slope` value, better + known as the seasonal Kendall slope estimator, is the median of the + :math:`d_{ijk}` over all :math:`i, j, k`. + + References + ---------- + .. [1] Hirsch, Robert M., James R. Slack, and Richard A. Smith. + "Techniques of trend analysis for monthly water quality data." + *Water Resources Research* 18.1 (1982): 107-121. + + Examples + -------- + Suppose we have 100 observations of a dependent variable for each of four + seasons: + + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> x = rng.random(size=(100, 4)) + + We compute the seasonal slopes as: + + >>> from scipy import stats + >>> intra_slope, inter_slope = stats.mstats.sen_seasonal_slopes(x) + + If we define a function to compute all slopes between observations within + a season: + + >>> def dijk(yi): + ... n = len(yi) + ... x = np.arange(n) + ... dy = yi - yi[:, np.newaxis] + ... dx = x - x[:, np.newaxis] + ... # we only want unique pairs of distinct indices + ... mask = np.triu(np.ones((n, n), dtype=bool), k=1) + ... return dy[mask]/dx[mask] + + then element ``i`` of ``intra_slope`` is the median of ``dijk[x[:, i]]``: + + >>> i = 2 + >>> np.allclose(np.median(dijk(x[:, i])), intra_slope[i]) + True + + and ``inter_slope`` is the median of the values returned by ``dijk`` for + all seasons: + + >>> all_slopes = np.concatenate([dijk(x[:, i]) for i in range(x.shape[1])]) + >>> np.allclose(np.median(all_slopes), inter_slope) + True + + Because the data are randomly generated, we would expect the median slopes + to be nearly zero both within and across all seasons, and indeed they are: + + >>> intra_slope.data + array([ 0.00124504, -0.00277761, -0.00221245, -0.00036338]) + >>> inter_slope + -0.0010511779872922058 + + """ + x = ma.array(x, subok=True, copy=False, ndmin=2) + (n,_) = x.shape + # Get list of slopes per season + szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None] + for i in range(n)]) + szn_medslopes = ma.median(szn_slopes, axis=0) + medslope = ma.median(szn_slopes, axis=None) + return SenSeasonalSlopesResult(szn_medslopes, medslope) + + +Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue')) + + +def ttest_1samp(a, popmean, axis=0, alternative='two-sided'): + """ + Calculates the T-test for the mean of ONE group of scores. + + Parameters + ---------- + a : array_like + sample observation + popmean : float or array_like + expected value in null hypothesis, if array_like than it must have the + same shape as `a` excluding the axis dimension + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + array `a`. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the mean of the underlying distribution of the sample + is different than the given population mean (`popmean`) + * 'less': the mean of the underlying distribution of the sample is + less than the given population mean (`popmean`) + * 'greater': the mean of the underlying distribution of the sample is + greater than the given population mean (`popmean`) + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float or array + t-statistic + pvalue : float or array + The p-value + + Notes + ----- + For more details on `ttest_1samp`, see `scipy.stats.ttest_1samp`. + + """ + a, axis = _chk_asarray(a, axis) + if a.size == 0: + return (np.nan, np.nan) + + x = a.mean(axis=axis) + v = a.var(axis=axis, ddof=1) + n = a.count(axis=axis) + # force df to be an array for masked division not to throw a warning + df = ma.asanyarray(n - 1.0) + svar = ((n - 1.0) * v) / df + with np.errstate(divide='ignore', invalid='ignore'): + t = (x - popmean) / ma.sqrt(svar / n) + + t, prob = _ttest_finish(df, t, alternative) + return Ttest_1sampResult(t, prob) + + +ttest_onesamp = ttest_1samp + + +Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue')) + + +def ttest_ind(a, b, axis=0, equal_var=True, alternative='two-sided'): + """ + Calculates the T-test for the means of TWO INDEPENDENT samples of scores. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + equal_var : bool, optional + If True, perform a standard independent 2 sample test that assumes equal + population variances. + If False, perform Welch's t-test, which does not assume equal population + variance. + + .. versionadded:: 0.17.0 + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions underlying the samples + are unequal. + * 'less': the mean of the distribution underlying the first sample + is less than the mean of the distribution underlying the second + sample. + * 'greater': the mean of the distribution underlying the first + sample is greater than the mean of the distribution underlying + the second sample. + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float or array + The calculated t-statistic. + pvalue : float or array + The p-value. + + Notes + ----- + For more details on `ttest_ind`, see `scipy.stats.ttest_ind`. + + """ + a, b, axis = _chk2_asarray(a, b, axis) + + if a.size == 0 or b.size == 0: + return Ttest_indResult(np.nan, np.nan) + + (x1, x2) = (a.mean(axis), b.mean(axis)) + (v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1)) + (n1, n2) = (a.count(axis), b.count(axis)) + + if equal_var: + # force df to be an array for masked division not to throw a warning + df = ma.asanyarray(n1 + n2 - 2.0) + svar = ((n1-1)*v1+(n2-1)*v2) / df + denom = ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here! + else: + vn1 = v1/n1 + vn2 = v2/n2 + with np.errstate(divide='ignore', invalid='ignore'): + df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1)) + + # If df is undefined, variances are zero. + # It doesn't matter what df is as long as it is not NaN. + df = np.where(np.isnan(df), 1, df) + denom = ma.sqrt(vn1 + vn2) + + with np.errstate(divide='ignore', invalid='ignore'): + t = (x1-x2) / denom + + t, prob = _ttest_finish(df, t, alternative) + return Ttest_indResult(t, prob) + + +Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue')) + + +def ttest_rel(a, b, axis=0, alternative='two-sided'): + """ + Calculates the T-test on TWO RELATED samples of scores, a and b. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape. + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions underlying the samples + are unequal. + * 'less': the mean of the distribution underlying the first sample + is less than the mean of the distribution underlying the second + sample. + * 'greater': the mean of the distribution underlying the first + sample is greater than the mean of the distribution underlying + the second sample. + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float or array + t-statistic + pvalue : float or array + two-tailed p-value + + Notes + ----- + For more details on `ttest_rel`, see `scipy.stats.ttest_rel`. + + """ + a, b, axis = _chk2_asarray(a, b, axis) + if len(a) != len(b): + raise ValueError('unequal length arrays') + + if a.size == 0 or b.size == 0: + return Ttest_relResult(np.nan, np.nan) + + n = a.count(axis) + df = ma.asanyarray(n-1.0) + d = (a-b).astype('d') + dm = d.mean(axis) + v = d.var(axis=axis, ddof=1) + denom = ma.sqrt(v / n) + with np.errstate(divide='ignore', invalid='ignore'): + t = dm / denom + + t, prob = _ttest_finish(df, t, alternative) + return Ttest_relResult(t, prob) + + +MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', + 'pvalue')) + + +def mannwhitneyu(x,y, use_continuity=True): + """ + Computes the Mann-Whitney statistic + + Missing values in `x` and/or `y` are discarded. + + Parameters + ---------- + x : sequence + Input + y : sequence + Input + use_continuity : {True, False}, optional + Whether a continuity correction (1/2.) should be taken into account. + + Returns + ------- + statistic : float + The minimum of the Mann-Whitney statistics + pvalue : float + Approximate two-sided p-value assuming a normal distribution. + + """ + x = ma.asarray(x).compressed().view(ndarray) + y = ma.asarray(y).compressed().view(ndarray) + ranks = rankdata(np.concatenate([x,y])) + (nx, ny) = (len(x), len(y)) + nt = nx + ny + U = ranks[:nx].sum() - nx*(nx+1)/2. + U = max(U, nx*ny - U) + u = nx*ny - U + + mu = (nx*ny)/2. + sigsq = (nt**3 - nt)/12. + ties = count_tied_groups(ranks) + sigsq -= sum(v*(k**3-k) for (k,v) in ties.items())/12. + sigsq *= nx*ny/float(nt*(nt-1)) + + if use_continuity: + z = (U - 1/2. - mu) / ma.sqrt(sigsq) + else: + z = (U - mu) / ma.sqrt(sigsq) + + prob = special.erfc(abs(z)/np.sqrt(2)) + return MannwhitneyuResult(u, prob) + + +KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue')) + + +def kruskal(*args): + """ + Compute the Kruskal-Wallis H-test for independent samples + + Parameters + ---------- + sample1, sample2, ... : array_like + Two or more arrays with the sample measurements can be given as + arguments. + + Returns + ------- + statistic : float + The Kruskal-Wallis H statistic, corrected for ties + pvalue : float + The p-value for the test using the assumption that H has a chi + square distribution + + Notes + ----- + For more details on `kruskal`, see `scipy.stats.kruskal`. + + Examples + -------- + >>> from scipy.stats.mstats import kruskal + + Random samples from three different brands of batteries were tested + to see how long the charge lasted. Results were as follows: + + >>> a = [6.3, 5.4, 5.7, 5.2, 5.0] + >>> b = [6.9, 7.0, 6.1, 7.9] + >>> c = [7.2, 6.9, 6.1, 6.5] + + Test the hypothesis that the distribution functions for all of the brands' + durations are identical. Use 5% level of significance. + + >>> kruskal(a, b, c) + KruskalResult(statistic=7.113812154696133, pvalue=0.028526948491942164) + + The null hypothesis is rejected at the 5% level of significance + because the returned p-value is less than the critical value of 5%. + + """ + output = argstoarray(*args) + ranks = ma.masked_equal(rankdata(output, use_missing=False), 0) + sumrk = ranks.sum(-1) + ngrp = ranks.count(-1) + ntot = ranks.count() + H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1) + # Tie correction + ties = count_tied_groups(ranks) + T = 1. - sum(v*(k**3-k) for (k,v) in ties.items())/float(ntot**3-ntot) + if T == 0: + raise ValueError('All numbers are identical in kruskal') + + H /= T + df = len(output) - 1 + prob = distributions.chi2.sf(H, df) + return KruskalResult(H, prob) + + +kruskalwallis = kruskal + + +@_rename_parameter("mode", "method") +def ks_1samp(x, cdf, args=(), alternative="two-sided", method='auto'): + """ + Computes the Kolmogorov-Smirnov test on one sample of masked values. + + Missing values in `x` are discarded. + + Parameters + ---------- + x : array_like + a 1-D array of observations of random variables. + cdf : str or callable + If a string, it should be the name of a distribution in `scipy.stats`. + If a callable, that callable is used to calculate the cdf. + args : tuple, sequence, optional + Distribution parameters, used if `cdf` is a string. + alternative : {'two-sided', 'less', 'greater'}, optional + Indicates the alternative hypothesis. Default is 'two-sided'. + method : {'auto', 'exact', 'asymp'}, optional + Defines the method used for calculating the p-value. + The following options are available (default is 'auto'): + + * 'auto' : use 'exact' for small size arrays, 'asymp' for large + * 'exact' : use approximation to exact distribution of test statistic + * 'asymp' : use asymptotic distribution of test statistic + + Returns + ------- + d : float + Value of the Kolmogorov Smirnov test + p : float + Corresponding p-value. + + """ + alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get( + alternative.lower()[0], alternative) + return scipy.stats._stats_py.ks_1samp( + x, cdf, args=args, alternative=alternative, method=method) + + +@_rename_parameter("mode", "method") +def ks_2samp(data1, data2, alternative="two-sided", method='auto'): + """ + Computes the Kolmogorov-Smirnov test on two samples. + + Missing values in `x` and/or `y` are discarded. + + Parameters + ---------- + data1 : array_like + First data set + data2 : array_like + Second data set + alternative : {'two-sided', 'less', 'greater'}, optional + Indicates the alternative hypothesis. Default is 'two-sided'. + method : {'auto', 'exact', 'asymp'}, optional + Defines the method used for calculating the p-value. + The following options are available (default is 'auto'): + + * 'auto' : use 'exact' for small size arrays, 'asymp' for large + * 'exact' : use approximation to exact distribution of test statistic + * 'asymp' : use asymptotic distribution of test statistic + + Returns + ------- + d : float + Value of the Kolmogorov Smirnov test + p : float + Corresponding p-value. + + """ + # Ideally this would be accomplished by + # ks_2samp = scipy.stats._stats_py.ks_2samp + # but the circular dependencies between _mstats_basic and stats prevent that. + alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get( + alternative.lower()[0], alternative) + return scipy.stats._stats_py.ks_2samp(data1, data2, + alternative=alternative, + method=method) + + +ks_twosamp = ks_2samp + + +@_rename_parameter("mode", "method") +def kstest(data1, data2, args=(), alternative='two-sided', method='auto'): + """ + + Parameters + ---------- + data1 : array_like + data2 : str, callable or array_like + args : tuple, sequence, optional + Distribution parameters, used if `data1` or `data2` are strings. + alternative : str, as documented in stats.kstest + method : str, as documented in stats.kstest + + Returns + ------- + tuple of (K-S statistic, probability) + + """ + return scipy.stats._stats_py.kstest(data1, data2, args, + alternative=alternative, method=method) + + +def trima(a, limits=None, inclusive=(True,True)): + """ + Trims an array by masking the data outside some given limits. + + Returns a masked version of the input array. + + Parameters + ---------- + a : array_like + Input array. + limits : {None, tuple}, optional + Tuple of (lower limit, upper limit) in absolute values. + Values of the input array lower (greater) than the lower (upper) limit + will be masked. A limit is None indicates an open interval. + inclusive : (bool, bool) tuple, optional + Tuple of (lower flag, upper flag), indicating whether values exactly + equal to the lower (upper) limit are allowed. + + Examples + -------- + >>> from scipy.stats.mstats import trima + >>> import numpy as np + + >>> a = np.arange(10) + + The interval is left-closed and right-open, i.e., `[2, 8)`. + Trim the array by keeping only values in the interval. + + >>> trima(a, limits=(2, 8), inclusive=(True, False)) + masked_array(data=[--, --, 2, 3, 4, 5, 6, 7, --, --], + mask=[ True, True, False, False, False, False, False, False, + True, True], + fill_value=999999) + + """ + a = ma.asarray(a) + a.unshare_mask() + if (limits is None) or (limits == (None, None)): + return a + + (lower_lim, upper_lim) = limits + (lower_in, upper_in) = inclusive + condition = False + if lower_lim is not None: + if lower_in: + condition |= (a < lower_lim) + else: + condition |= (a <= lower_lim) + + if upper_lim is not None: + if upper_in: + condition |= (a > upper_lim) + else: + condition |= (a >= upper_lim) + + a[condition.filled(True)] = masked + return a + + +def trimr(a, limits=None, inclusive=(True, True), axis=None): + """ + Trims an array by masking some proportion of the data on each end. + Returns a masked version of the input array. + + Parameters + ---------- + a : sequence + Input array. + limits : {None, tuple}, optional + Tuple of the percentages to cut on each side of the array, with respect + to the number of unmasked data, as floats between 0. and 1. + Noting n the number of unmasked data before trimming, the + (n*limits[0])th smallest data and the (n*limits[1])th largest data are + masked, and the total number of unmasked data after trimming is + n*(1.-sum(limits)). The value of one limit can be set to None to + indicate an open interval. + inclusive : {(True,True) tuple}, optional + Tuple of flags indicating whether the number of data being masked on + the left (right) end should be truncated (True) or rounded (False) to + integers. + axis : {None,int}, optional + Axis along which to trim. If None, the whole array is trimmed, but its + shape is maintained. + + """ + def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive): + n = a.count() + idx = a.argsort() + if low_limit: + if low_inclusive: + lowidx = int(low_limit*n) + else: + lowidx = int(np.round(low_limit*n)) + a[idx[:lowidx]] = masked + if up_limit is not None: + if up_inclusive: + upidx = n - int(n*up_limit) + else: + upidx = n - int(np.round(n*up_limit)) + a[idx[upidx:]] = masked + return a + + a = ma.asarray(a) + a.unshare_mask() + if limits is None: + return a + + # Check the limits + (lolim, uplim) = limits + errmsg = "The proportion to cut from the %s should be between 0. and 1." + if lolim is not None: + if lolim > 1. or lolim < 0: + raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) + if uplim is not None: + if uplim > 1. or uplim < 0: + raise ValueError(errmsg % 'end' + "(got %s)" % uplim) + + (loinc, upinc) = inclusive + + if axis is None: + shp = a.shape + return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp) + else: + return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc) + + +trimdoc = """ + Parameters + ---------- + a : sequence + Input array + limits : {None, tuple}, optional + If `relative` is False, tuple (lower limit, upper limit) in absolute values. + Values of the input array lower (greater) than the lower (upper) limit are + masked. + + If `relative` is True, tuple (lower percentage, upper percentage) to cut + on each side of the array, with respect to the number of unmasked data. + + Noting n the number of unmasked data before trimming, the (n*limits[0])th + smallest data and the (n*limits[1])th largest data are masked, and the + total number of unmasked data after trimming is n*(1.-sum(limits)) + In each case, the value of one limit can be set to None to indicate an + open interval. + + If limits is None, no trimming is performed + inclusive : {(bool, bool) tuple}, optional + If `relative` is False, tuple indicating whether values exactly equal + to the absolute limits are allowed. + If `relative` is True, tuple indicating whether the number of data + being masked on each side should be rounded (True) or truncated + (False). + relative : bool, optional + Whether to consider the limits as absolute values (False) or proportions + to cut (True). + axis : int, optional + Axis along which to trim. +""" + + +def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None): + """ + Trims an array by masking the data outside some given limits. + + Returns a masked version of the input array. + + %s + + Examples + -------- + >>> from scipy.stats.mstats import trim + >>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10] + >>> print(trim(z,(3,8))) + [-- -- 3 4 5 6 7 8 -- --] + >>> print(trim(z,(0.1,0.2),relative=True)) + [-- 2 3 4 5 6 7 8 -- --] + + """ + if relative: + return trimr(a, limits=limits, inclusive=inclusive, axis=axis) + else: + return trima(a, limits=limits, inclusive=inclusive) + + +if trim.__doc__: + trim.__doc__ = trim.__doc__ % trimdoc + + +def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None): + """ + Trims the smallest and largest data values. + + Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and + ``int(proportiontocut * n)`` largest values of data along the given axis, + where n is the number of unmasked values before trimming. + + Parameters + ---------- + data : ndarray + Data to trim. + proportiontocut : float, optional + Percentage of trimming (as a float between 0 and 1). + If n is the number of unmasked values before trimming, the number of + values after trimming is ``(1 - 2*proportiontocut) * n``. + Default is 0.2. + inclusive : {(bool, bool) tuple}, optional + Tuple indicating whether the number of data being masked on each side + should be rounded (True) or truncated (False). + axis : int, optional + Axis along which to perform the trimming. + If None, the input array is first flattened. + + """ + return trimr(data, limits=(proportiontocut,proportiontocut), + inclusive=inclusive, axis=axis) + + +def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True), + axis=None): + """ + Trims the data by masking values from one tail. + + Parameters + ---------- + data : array_like + Data to trim. + proportiontocut : float, optional + Percentage of trimming. If n is the number of unmasked values + before trimming, the number of values after trimming is + ``(1 - proportiontocut) * n``. Default is 0.2. + tail : {'left','right'}, optional + If 'left' the `proportiontocut` lowest values will be masked. + If 'right' the `proportiontocut` highest values will be masked. + Default is 'left'. + inclusive : {(bool, bool) tuple}, optional + Tuple indicating whether the number of data being masked on each side + should be rounded (True) or truncated (False). Default is + (True, True). + axis : int, optional + Axis along which to perform the trimming. + If None, the input array is first flattened. Default is None. + + Returns + ------- + trimtail : ndarray + Returned array of same shape as `data` with masked tail values. + + """ + tail = str(tail).lower()[0] + if tail == 'l': + limits = (proportiontocut,None) + elif tail == 'r': + limits = (None, proportiontocut) + else: + raise TypeError("The tail argument should be in ('left','right')") + + return trimr(data, limits=limits, axis=axis, inclusive=inclusive) + + +trim1 = trimtail + + +def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, + axis=None): + """Returns the trimmed mean of the data along the given axis. + + %s + + """ + if (not isinstance(limits,tuple)) and isinstance(limits,float): + limits = (limits, limits) + if relative: + return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis) + else: + return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis) + + +if trimmed_mean.__doc__: + trimmed_mean.__doc__ = trimmed_mean.__doc__ % trimdoc + + +def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, + axis=None, ddof=0): + """Returns the trimmed variance of the data along the given axis. + + %s + ddof : {0,integer}, optional + Means Delta Degrees of Freedom. The denominator used during computations + is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- + biased estimate of the variance. + + """ + if (not isinstance(limits,tuple)) and isinstance(limits,float): + limits = (limits, limits) + if relative: + out = trimr(a,limits=limits, inclusive=inclusive,axis=axis) + else: + out = trima(a,limits=limits,inclusive=inclusive) + + return out.var(axis=axis, ddof=ddof) + + +if trimmed_var.__doc__: + trimmed_var.__doc__ = trimmed_var.__doc__ % trimdoc + + +def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, + axis=None, ddof=0): + """Returns the trimmed standard deviation of the data along the given axis. + + %s + ddof : {0,integer}, optional + Means Delta Degrees of Freedom. The denominator used during computations + is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- + biased estimate of the variance. + + """ + if (not isinstance(limits,tuple)) and isinstance(limits,float): + limits = (limits, limits) + if relative: + out = trimr(a,limits=limits,inclusive=inclusive,axis=axis) + else: + out = trima(a,limits=limits,inclusive=inclusive) + return out.std(axis=axis,ddof=ddof) + + +if trimmed_std.__doc__: + trimmed_std.__doc__ = trimmed_std.__doc__ % trimdoc + + +def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None): + """ + Returns the standard error of the trimmed mean along the given axis. + + Parameters + ---------- + a : sequence + Input array + limits : {(0.1,0.1), tuple of float}, optional + tuple (lower percentage, upper percentage) to cut on each side of the + array, with respect to the number of unmasked data. + + If n is the number of unmasked data before trimming, the values + smaller than ``n * limits[0]`` and the values larger than + ``n * `limits[1]`` are masked, and the total number of unmasked + data after trimming is ``n * (1.-sum(limits))``. In each case, + the value of one limit can be set to None to indicate an open interval. + If `limits` is None, no trimming is performed. + inclusive : {(bool, bool) tuple} optional + Tuple indicating whether the number of data being masked on each side + should be rounded (True) or truncated (False). + axis : int, optional + Axis along which to trim. + + Returns + ------- + trimmed_stde : scalar or ndarray + + """ + def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive): + "Returns the standard error of the trimmed mean for a 1D input data." + n = a.count() + idx = a.argsort() + if low_limit: + if low_inclusive: + lowidx = int(low_limit*n) + else: + lowidx = np.round(low_limit*n) + a[idx[:lowidx]] = masked + if up_limit is not None: + if up_inclusive: + upidx = n - int(n*up_limit) + else: + upidx = n - np.round(n*up_limit) + a[idx[upidx:]] = masked + a[idx[:lowidx]] = a[idx[lowidx]] + a[idx[upidx:]] = a[idx[upidx-1]] + winstd = a.std(ddof=1) + return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a))) + + a = ma.array(a, copy=True, subok=True) + a.unshare_mask() + if limits is None: + return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis)) + if (not isinstance(limits,tuple)) and isinstance(limits,float): + limits = (limits, limits) + + # Check the limits + (lolim, uplim) = limits + errmsg = "The proportion to cut from the %s should be between 0. and 1." + if lolim is not None: + if lolim > 1. or lolim < 0: + raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) + if uplim is not None: + if uplim > 1. or uplim < 0: + raise ValueError(errmsg % 'end' + "(got %s)" % uplim) + + (loinc, upinc) = inclusive + if (axis is None): + return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc) + else: + if a.ndim > 2: + raise ValueError("Array 'a' must be at most two dimensional, " + "but got a.ndim = %d" % a.ndim) + return ma.apply_along_axis(_trimmed_stde_1D, axis, a, + lolim,uplim,loinc,upinc) + + +def _mask_to_limits(a, limits, inclusive): + """Mask an array for values outside of given limits. + + This is primarily a utility function. + + Parameters + ---------- + a : array + limits : (float or None, float or None) + A tuple consisting of the (lower limit, upper limit). Values in the + input array less than the lower limit or greater than the upper limit + will be masked out. None implies no limit. + inclusive : (bool, bool) + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to lower or upper are allowed. + + Returns + ------- + A MaskedArray. + + Raises + ------ + A ValueError if there are no values within the given limits. + """ + lower_limit, upper_limit = limits + lower_include, upper_include = inclusive + am = ma.MaskedArray(a) + if lower_limit is not None: + if lower_include: + am = ma.masked_less(am, lower_limit) + else: + am = ma.masked_less_equal(am, lower_limit) + + if upper_limit is not None: + if upper_include: + am = ma.masked_greater(am, upper_limit) + else: + am = ma.masked_greater_equal(am, upper_limit) + + if am.count() == 0: + raise ValueError("No array values within given limits") + + return am + + +def tmean(a, limits=None, inclusive=(True, True), axis=None): + """ + Compute the trimmed mean. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None (default), then all + values are used. Either of the limit values in the tuple can also be + None representing a half-open interval. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. If None, compute over the + whole array. Default is None. + + Returns + ------- + tmean : float + + Notes + ----- + For more details on `tmean`, see `scipy.stats.tmean`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import mstats + >>> a = np.array([[6, 8, 3, 0], + ... [3, 9, 1, 2], + ... [8, 7, 8, 2], + ... [5, 6, 0, 2], + ... [4, 5, 5, 2]]) + ... + ... + >>> mstats.tmean(a, (2,5)) + 3.3 + >>> mstats.tmean(a, (2,5), axis=0) + masked_array(data=[4.0, 5.0, 4.0, 2.0], + mask=[False, False, False, False], + fill_value=1e+20) + + """ + return trima(a, limits=limits, inclusive=inclusive).mean(axis=axis) + + +def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """ + Compute the trimmed variance + + This function computes the sample variance of an array of values, + while ignoring values which are outside of given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. If None, compute over the + whole array. Default is zero. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tvar : float + Trimmed variance. + + Notes + ----- + For more details on `tvar`, see `scipy.stats.tvar`. + + """ + a = a.astype(float).ravel() + if limits is None: + n = (~a.mask).sum() # todo: better way to do that? + return np.ma.var(a) * n/(n-1.) + am = _mask_to_limits(a, limits=limits, inclusive=inclusive) + + return np.ma.var(am, axis=axis, ddof=ddof) + + +def tmin(a, lowerlimit=None, axis=0, inclusive=True): + """ + Compute the trimmed minimum + + Parameters + ---------- + a : array_like + array of values + lowerlimit : None or float, optional + Values in the input array less than the given limit will be ignored. + When lowerlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the lower limit + are included. The default value is True. + + Returns + ------- + tmin : float, int or ndarray + + Notes + ----- + For more details on `tmin`, see `scipy.stats.tmin`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import mstats + >>> a = np.array([[6, 8, 3, 0], + ... [3, 2, 1, 2], + ... [8, 1, 8, 2], + ... [5, 3, 0, 2], + ... [4, 7, 5, 2]]) + ... + >>> mstats.tmin(a, 5) + masked_array(data=[5, 7, 5, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + a, axis = _chk_asarray(a, axis) + am = trima(a, (lowerlimit, None), (inclusive, False)) + return ma.minimum.reduce(am, axis) + + +def tmax(a, upperlimit=None, axis=0, inclusive=True): + """ + Compute the trimmed maximum + + This function computes the maximum value of an array along a given axis, + while ignoring values larger than a specified upper limit. + + Parameters + ---------- + a : array_like + array of values + upperlimit : None or float, optional + Values in the input array greater than the given limit will be ignored. + When upperlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the upper limit + are included. The default value is True. + + Returns + ------- + tmax : float, int or ndarray + + Notes + ----- + For more details on `tmax`, see `scipy.stats.tmax`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import mstats + >>> a = np.array([[6, 8, 3, 0], + ... [3, 9, 1, 2], + ... [8, 7, 8, 2], + ... [5, 6, 0, 2], + ... [4, 5, 5, 2]]) + ... + ... + >>> mstats.tmax(a, 4) + masked_array(data=[4, --, 3, 2], + mask=[False, True, False, False], + fill_value=999999) + + """ + a, axis = _chk_asarray(a, axis) + am = trima(a, (None, upperlimit), (False, inclusive)) + return ma.maximum.reduce(am, axis) + + +def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """ + Compute the trimmed standard error of the mean. + + This function finds the standard error of the mean for given + values, ignoring values outside the given `limits`. + + Parameters + ---------- + a : array_like + array of values + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. If None, compute over the + whole array. Default is zero. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tsem : float + + Notes + ----- + For more details on `tsem`, see `scipy.stats.tsem`. + + """ + a = ma.asarray(a).ravel() + if limits is None: + n = float(a.count()) + return a.std(axis=axis, ddof=ddof)/ma.sqrt(n) + + am = trima(a.ravel(), limits, inclusive) + sd = np.sqrt(am.var(axis=axis, ddof=ddof)) + return sd / np.sqrt(am.count()) + + +def winsorize(a, limits=None, inclusive=(True, True), inplace=False, + axis=None, nan_policy='propagate'): + """Returns a Winsorized version of the input array. + + The (limits[0])th lowest values are set to the (limits[0])th percentile, + and the (limits[1])th highest values are set to the (1 - limits[1])th + percentile. + Masked values are skipped. + + + Parameters + ---------- + a : sequence + Input array. + limits : {None, tuple of float}, optional + Tuple of the percentages to cut on each side of the array, with respect + to the number of unmasked data, as floats between 0. and 1. + Noting n the number of unmasked data before trimming, the + (n*limits[0])th smallest data and the (n*limits[1])th largest data are + masked, and the total number of unmasked data after trimming + is n*(1.-sum(limits)) The value of one limit can be set to None to + indicate an open interval. + inclusive : {(True, True) tuple}, optional + Tuple indicating whether the number of data being masked on each side + should be truncated (True) or rounded (False). + inplace : {False, True}, optional + Whether to winsorize in place (True) or to use a copy (False) + axis : {None, int}, optional + Axis along which to trim. If None, the whole array is trimmed, but its + shape is maintained. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': allows nan values and may overwrite or propagate them + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Notes + ----- + This function is applied to reduce the effect of possibly spurious outliers + by limiting the extreme values. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats.mstats import winsorize + + A shuffled array contains integers from 1 to 10. + + >>> a = np.array([10, 4, 9, 8, 5, 3, 7, 2, 1, 6]) + + The 10% of the lowest value (i.e., `1`) and the 20% of the highest + values (i.e., `9` and `10`) are replaced. + + >>> winsorize(a, limits=[0.1, 0.2]) + masked_array(data=[8, 4, 8, 8, 5, 3, 7, 2, 2, 6], + mask=False, + fill_value=999999) + + """ + def _winsorize1D(a, low_limit, up_limit, low_include, up_include, + contains_nan, nan_policy): + n = a.count() + idx = a.argsort() + if contains_nan: + nan_count = np.count_nonzero(np.isnan(a)) + if low_limit: + if low_include: + lowidx = int(low_limit * n) + else: + lowidx = np.round(low_limit * n).astype(int) + if contains_nan and nan_policy == 'omit': + lowidx = min(lowidx, n-nan_count-1) + a[idx[:lowidx]] = a[idx[lowidx]] + if up_limit is not None: + if up_include: + upidx = n - int(n * up_limit) + else: + upidx = n - np.round(n * up_limit).astype(int) + if contains_nan and nan_policy == 'omit': + a[idx[upidx:-nan_count]] = a[idx[upidx - 1]] + else: + a[idx[upidx:]] = a[idx[upidx - 1]] + return a + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + # We are going to modify a: better make a copy + a = ma.array(a, copy=np.logical_not(inplace)) + + if limits is None: + return a + if (not isinstance(limits, tuple)) and isinstance(limits, float): + limits = (limits, limits) + + # Check the limits + (lolim, uplim) = limits + errmsg = "The proportion to cut from the %s should be between 0. and 1." + if lolim is not None: + if lolim > 1. or lolim < 0: + raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) + if uplim is not None: + if uplim > 1. or uplim < 0: + raise ValueError(errmsg % 'end' + "(got %s)" % uplim) + + (loinc, upinc) = inclusive + + if axis is None: + shp = a.shape + return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc, + contains_nan, nan_policy).reshape(shp) + else: + return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc, + upinc, contains_nan, nan_policy) + + +def moment(a, moment=1, axis=0): + """ + Calculates the nth moment about the mean for a sample. + + Parameters + ---------- + a : array_like + data + moment : int, optional + order of central moment that is returned + axis : int or None, optional + Axis along which the central moment is computed. Default is 0. + If None, compute over the whole array `a`. + + Returns + ------- + n-th central moment : ndarray or float + The appropriate moment along the given axis or over all values if axis + is None. The denominator for the moment calculation is the number of + observations, no degrees of freedom correction is done. + + Notes + ----- + For more details about `moment`, see `scipy.stats.moment`. + + """ + a, axis = _chk_asarray(a, axis) + if a.size == 0: + moment_shape = list(a.shape) + del moment_shape[axis] + dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64 + # empty array, return nan(s) with shape matching `moment` + out_shape = (moment_shape if np.isscalar(moment) + else [len(moment)] + moment_shape) + if len(out_shape) == 0: + return dtype(np.nan) + else: + return ma.array(np.full(out_shape, np.nan, dtype=dtype)) + + # for array_like moment input, return a value for each. + if not np.isscalar(moment): + mean = a.mean(axis, keepdims=True) + mmnt = [_moment(a, i, axis, mean=mean) for i in moment] + return ma.array(mmnt) + else: + return _moment(a, moment, axis) + + +# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True) +def _moment(a, moment, axis, *, mean=None): + if np.abs(moment - np.round(moment)) > 0: + raise ValueError("All moment parameters must be integers") + + if moment == 0 or moment == 1: + # By definition the zeroth moment about the mean is 1, and the first + # moment is 0. + shape = list(a.shape) + del shape[axis] + dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64 + + if len(shape) == 0: + return dtype(1.0 if moment == 0 else 0.0) + else: + return (ma.ones(shape, dtype=dtype) if moment == 0 + else ma.zeros(shape, dtype=dtype)) + else: + # Exponentiation by squares: form exponent sequence + n_list = [moment] + current_n = moment + while current_n > 2: + if current_n % 2: + current_n = (current_n-1)/2 + else: + current_n /= 2 + n_list.append(current_n) + + # Starting point for exponentiation by squares + mean = a.mean(axis, keepdims=True) if mean is None else mean + a_zero_mean = a - mean + if n_list[-1] == 1: + s = a_zero_mean.copy() + else: + s = a_zero_mean**2 + + # Perform multiplications + for n in n_list[-2::-1]: + s = s**2 + if n % 2: + s *= a_zero_mean + return s.mean(axis) + + +def variation(a, axis=0, ddof=0): + """ + Compute the coefficient of variation. + + The coefficient of variation is the standard deviation divided by the + mean. This function is equivalent to:: + + np.std(x, axis=axis, ddof=ddof) / np.mean(x) + + The default for ``ddof`` is 0, but many definitions of the coefficient + of variation use the square root of the unbiased sample variance + for the sample standard deviation, which corresponds to ``ddof=1``. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate the coefficient of variation. Default + is 0. If None, compute over the whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 0. + + Returns + ------- + variation : ndarray + The calculated variation along the requested axis. + + Notes + ----- + For more details about `variation`, see `scipy.stats.variation`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats.mstats import variation + >>> a = np.array([2,8,4]) + >>> variation(a) + 0.5345224838248487 + >>> b = np.array([2,8,3,4]) + >>> c = np.ma.masked_array(b, mask=[0,0,1,0]) + >>> variation(c) + 0.5345224838248487 + + In the example above, it can be seen that this works the same as + `scipy.stats.variation` except 'stats.mstats.variation' ignores masked + array elements. + + """ + a, axis = _chk_asarray(a, axis) + return a.std(axis, ddof=ddof)/a.mean(axis) + + +def skew(a, axis=0, bias=True): + """ + Computes the skewness of a data set. + + Parameters + ---------- + a : ndarray + data + axis : int or None, optional + Axis along which skewness is calculated. Default is 0. + If None, compute over the whole array `a`. + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + + Returns + ------- + skewness : ndarray + The skewness of values along an axis, returning 0 where all values are + equal. + + Notes + ----- + For more details about `skew`, see `scipy.stats.skew`. + + """ + a, axis = _chk_asarray(a,axis) + mean = a.mean(axis, keepdims=True) + m2 = _moment(a, 2, axis, mean=mean) + m3 = _moment(a, 3, axis, mean=mean) + zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2) + with np.errstate(all='ignore'): + vals = ma.where(zero, 0, m3 / m2**1.5) + + if not bias and zero is not ma.masked and m2 is not ma.masked: + n = a.count(axis) + can_correct = ~zero & (n > 2) + if can_correct.any(): + n = np.extract(can_correct, n) + m2 = np.extract(can_correct, m2) + m3 = np.extract(can_correct, m3) + nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5 + np.place(vals, can_correct, nval) + return vals + + +def kurtosis(a, axis=0, fisher=True, bias=True): + """ + Computes the kurtosis (Fisher or Pearson) of a dataset. + + Kurtosis is the fourth central moment divided by the square of the + variance. If Fisher's definition is used, then 3.0 is subtracted from + the result to give 0.0 for a normal distribution. + + If bias is False then the kurtosis is calculated using k statistics to + eliminate bias coming from biased moment estimators + + Use `kurtosistest` to see if result is close enough to normal. + + Parameters + ---------- + a : array + data for which the kurtosis is calculated + axis : int or None, optional + Axis along which the kurtosis is calculated. Default is 0. + If None, compute over the whole array `a`. + fisher : bool, optional + If True, Fisher's definition is used (normal ==> 0.0). If False, + Pearson's definition is used (normal ==> 3.0). + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + + Returns + ------- + kurtosis : array + The kurtosis of values along an axis. If all values are equal, + return -3 for Fisher's definition and 0 for Pearson's definition. + + Notes + ----- + For more details about `kurtosis`, see `scipy.stats.kurtosis`. + + """ + a, axis = _chk_asarray(a, axis) + mean = a.mean(axis, keepdims=True) + m2 = _moment(a, 2, axis, mean=mean) + m4 = _moment(a, 4, axis, mean=mean) + zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2) + with np.errstate(all='ignore'): + vals = ma.where(zero, 0, m4 / m2**2.0) + + if not bias and zero is not ma.masked and m2 is not ma.masked: + n = a.count(axis) + can_correct = ~zero & (n > 3) + if can_correct.any(): + n = np.extract(can_correct, n) + m2 = np.extract(can_correct, m2) + m4 = np.extract(can_correct, m4) + nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0) + np.place(vals, can_correct, nval+3.0) + if fisher: + return vals - 3 + else: + return vals + + +DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean', + 'variance', 'skewness', + 'kurtosis')) + + +def describe(a, axis=0, ddof=0, bias=True): + """ + Computes several descriptive statistics of the passed array. + + Parameters + ---------- + a : array_like + Data array + axis : int or None, optional + Axis along which to calculate statistics. Default 0. If None, + compute over the whole array `a`. + ddof : int, optional + degree of freedom (default 0); note that default ddof is different + from the same routine in stats.describe + bias : bool, optional + If False, then the skewness and kurtosis calculations are corrected for + statistical bias. + + Returns + ------- + nobs : int + (size of the data (discarding missing values) + + minmax : (int, int) + min, max + + mean : float + arithmetic mean + + variance : float + unbiased variance + + skewness : float + biased skewness + + kurtosis : float + biased kurtosis + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats.mstats import describe + >>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1]) + >>> describe(ma) + DescribeResult(nobs=np.int64(3), minmax=(masked_array(data=0, + mask=False, + fill_value=999999), masked_array(data=2, + mask=False, + fill_value=999999)), mean=np.float64(1.0), + variance=np.float64(0.6666666666666666), + skewness=masked_array(data=0., mask=False, fill_value=1e+20), + kurtosis=np.float64(-1.5)) + + """ + a, axis = _chk_asarray(a, axis) + n = a.count(axis) + mm = (ma.minimum.reduce(a, axis=axis), ma.maximum.reduce(a, axis=axis)) + m = a.mean(axis) + v = a.var(axis, ddof=ddof) + sk = skew(a, axis, bias=bias) + kurt = kurtosis(a, axis, bias=bias) + + return DescribeResult(n, mm, m, v, sk, kurt) + + +def stde_median(data, axis=None): + """Returns the McKean-Schrader estimate of the standard error of the sample + median along the given axis. masked values are discarded. + + Parameters + ---------- + data : ndarray + Data to trim. + axis : {None,int}, optional + Axis along which to perform the trimming. + If None, the input array is first flattened. + + """ + def _stdemed_1D(data): + data = np.sort(data.compressed()) + n = len(data) + z = 2.5758293035489004 + k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0)) + return ((data[n-k] - data[k-1])/(2.*z)) + + data = ma.array(data, copy=False, subok=True) + if (axis is None): + return _stdemed_1D(data) + else: + if data.ndim > 2: + raise ValueError("Array 'data' must be at most two dimensional, " + "but got data.ndim = %d" % data.ndim) + return ma.apply_along_axis(_stdemed_1D, axis, data) + + +SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue')) + + +def skewtest(a, axis=0, alternative='two-sided'): + """ + Tests whether the skew is different from the normal distribution. + + Parameters + ---------- + a : array_like + The data to be tested + axis : int or None, optional + Axis along which statistics are calculated. Default is 0. + If None, compute over the whole array `a`. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the skewness of the distribution underlying the sample + is different from that of the normal distribution (i.e. 0) + * 'less': the skewness of the distribution underlying the sample + is less than that of the normal distribution + * 'greater': the skewness of the distribution underlying the sample + is greater than that of the normal distribution + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : array_like + The computed z-score for this test. + pvalue : array_like + A p-value for the hypothesis test + + Notes + ----- + For more details about `skewtest`, see `scipy.stats.skewtest`. + + """ + a, axis = _chk_asarray(a, axis) + if axis is None: + a = a.ravel() + axis = 0 + b2 = skew(a,axis) + n = a.count(axis) + if np.min(n) < 8: + raise ValueError( + "skewtest is not valid with less than 8 samples; %i samples" + " were given." % np.min(n)) + + y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2))) + beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9)) + W2 = -1 + ma.sqrt(2*(beta2-1)) + delta = 1/ma.sqrt(0.5*ma.log(W2)) + alpha = ma.sqrt(2.0/(W2-1)) + y = ma.where(y == 0, 1, y) + Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1)) + pvalue = scipy.stats._stats_py._get_pvalue(Z, distributions.norm, alternative) + + return SkewtestResult(Z[()], pvalue[()]) + + +KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue')) + + +def kurtosistest(a, axis=0, alternative='two-sided'): + """ + Tests whether a dataset has normal kurtosis + + Parameters + ---------- + a : array_like + array of the sample data + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the kurtosis of the distribution underlying the sample + is different from that of the normal distribution + * 'less': the kurtosis of the distribution underlying the sample + is less than that of the normal distribution + * 'greater': the kurtosis of the distribution underlying the sample + is greater than that of the normal distribution + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : array_like + The computed z-score for this test. + pvalue : array_like + The p-value for the hypothesis test + + Notes + ----- + For more details about `kurtosistest`, see `scipy.stats.kurtosistest`. + + """ + a, axis = _chk_asarray(a, axis) + n = a.count(axis=axis) + if np.min(n) < 5: + raise ValueError( + "kurtosistest requires at least 5 observations; %i observations" + " were given." % np.min(n)) + if np.min(n) < 20: + warnings.warn( + "kurtosistest only valid for n>=20 ... continuing anyway, n=%i" % np.min(n), + stacklevel=2, + ) + + b2 = kurtosis(a, axis, fisher=False) + E = 3.0*(n-1) / (n+1) + varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) + x = (b2-E)/ma.sqrt(varb2) + sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) / + (n*(n-2)*(n-3))) + A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) + term1 = 1 - 2./(9.0*A) + denom = 1 + x*ma.sqrt(2/(A-4.0)) + if np.ma.isMaskedArray(denom): + # For multi-dimensional array input + denom[denom == 0.0] = masked + elif denom == 0.0: + denom = masked + + term2 = np.ma.where(denom > 0, ma.power((1-2.0/A)/denom, 1/3.0), + -ma.power(-(1-2.0/A)/denom, 1/3.0)) + Z = (term1 - term2) / np.sqrt(2/(9.0*A)) + pvalue = scipy.stats._stats_py._get_pvalue(Z, distributions.norm, alternative) + + return KurtosistestResult(Z[()], pvalue[()]) + + +NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue')) + + +def normaltest(a, axis=0): + """ + Tests whether a sample differs from a normal distribution. + + Parameters + ---------- + a : array_like + The array containing the data to be tested. + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + + Returns + ------- + statistic : float or array + ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and + ``k`` is the z-score returned by `kurtosistest`. + pvalue : float or array + A 2-sided chi squared probability for the hypothesis test. + + Notes + ----- + For more details about `normaltest`, see `scipy.stats.normaltest`. + + """ + a, axis = _chk_asarray(a, axis) + s, _ = skewtest(a, axis) + k, _ = kurtosistest(a, axis) + k2 = s*s + k*k + + return NormaltestResult(k2, distributions.chi2.sf(k2, 2)) + + +def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, + limit=()): + """ + Computes empirical quantiles for a data array. + + Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``, + where ``x[j]`` is the j-th order statistic, and gamma is a function of + ``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and + ``g = n*p + m - j``. + + Reinterpreting the above equations to compare to **R** lead to the + equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)`` + + Typical values of (alphap,betap) are: + - (0,1) : ``p(k) = k/n`` : linear interpolation of cdf + (**R** type 4) + - (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function + (**R** type 5) + - (0,0) : ``p(k) = k/(n+1)`` : + (**R** type 6) + - (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])]. + (**R** type 7, **R** default) + - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])]. + The resulting quantile estimates are approximately median-unbiased + regardless of the distribution of x. + (**R** type 8) + - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom. + The resulting quantile estimates are approximately unbiased + if x is normally distributed + (**R** type 9) + - (.4,.4) : approximately quantile unbiased (Cunnane) + - (.35,.35): APL, used with PWM + + Parameters + ---------- + a : array_like + Input data, as a sequence or array of dimension at most 2. + prob : array_like, optional + List of quantiles to compute. + alphap : float, optional + Plotting positions parameter, default is 0.4. + betap : float, optional + Plotting positions parameter, default is 0.4. + axis : int, optional + Axis along which to perform the trimming. + If None (default), the input array is first flattened. + limit : tuple, optional + Tuple of (lower, upper) values. + Values of `a` outside this open interval are ignored. + + Returns + ------- + mquantiles : MaskedArray + An array containing the calculated quantiles. + + Notes + ----- + This formulation is very similar to **R** except the calculation of + ``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined + with each type. + + References + ---------- + .. [1] *R* statistical software: https://www.r-project.org/ + .. [2] *R* ``quantile`` function: + http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats.mstats import mquantiles + >>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.]) + >>> mquantiles(a) + array([ 19.2, 40. , 42.8]) + + Using a 2D array, specifying axis and limit. + + >>> data = np.array([[ 6., 7., 1.], + ... [ 47., 15., 2.], + ... [ 49., 36., 3.], + ... [ 15., 39., 4.], + ... [ 42., 40., -999.], + ... [ 41., 41., -999.], + ... [ 7., -999., -999.], + ... [ 39., -999., -999.], + ... [ 43., -999., -999.], + ... [ 40., -999., -999.], + ... [ 36., -999., -999.]]) + >>> print(mquantiles(data, axis=0, limit=(0, 50))) + [[19.2 14.6 1.45] + [40. 37.5 2.5 ] + [42.8 40.05 3.55]] + + >>> data[:, 2] = -999. + >>> print(mquantiles(data, axis=0, limit=(0, 50))) + [[19.200000000000003 14.6 --] + [40.0 37.5 --] + [42.800000000000004 40.05 --]] + + """ + def _quantiles1D(data,m,p): + x = np.sort(data.compressed()) + n = len(x) + if n == 0: + return ma.array(np.empty(len(p), dtype=float), mask=True) + elif n == 1: + return ma.array(np.resize(x, p.shape), mask=nomask) + aleph = (n*p + m) + k = np.floor(aleph.clip(1, n-1)).astype(int) + gamma = (aleph-k).clip(0,1) + return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] + + data = ma.array(a, copy=False) + if data.ndim > 2: + raise TypeError("Array should be 2D at most !") + + if limit: + condition = (limit[0] < data) & (data < limit[1]) + data[~condition.filled(True)] = masked + + p = np.atleast_1d(np.asarray(prob)) + m = alphap + p*(1.-alphap-betap) + # Computes quantiles along axis (or globally) + if (axis is None): + return _quantiles1D(data, m, p) + + return ma.apply_along_axis(_quantiles1D, axis, data, m, p) + + +def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4): + """Calculate the score at the given 'per' percentile of the + sequence a. For example, the score at per=50 is the median. + + This function is a shortcut to mquantile + + """ + if (per < 0) or (per > 100.): + raise ValueError("The percentile should be between 0. and 100. !" + " (got %s)" % per) + + return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap, + limit=limit, axis=0).squeeze() + + +def plotting_positions(data, alpha=0.4, beta=0.4): + """ + Returns plotting positions (or empirical percentile points) for the data. + + Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where: + - i is the rank order statistics + - n is the number of unmasked values along the given axis + - `alpha` and `beta` are two parameters. + + Typical values for `alpha` and `beta` are: + - (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4) + - (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function + (R, type 5) + - (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6) + - (1,1) : ``p(k) = (k-1)/(n-1)``, in this case, + ``p(k) = mode[F(x[k])]``. That's R default (R type 7) + - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then + ``p(k) ~ median[F(x[k])]``. + The resulting quantile estimates are approximately median-unbiased + regardless of the distribution of x. (R type 8) + - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom. + The resulting quantile estimates are approximately unbiased + if x is normally distributed (R type 9) + - (.4,.4) : approximately quantile unbiased (Cunnane) + - (.35,.35): APL, used with PWM + - (.3175, .3175): used in scipy.stats.probplot + + Parameters + ---------- + data : array_like + Input data, as a sequence or array of dimension at most 2. + alpha : float, optional + Plotting positions parameter. Default is 0.4. + beta : float, optional + Plotting positions parameter. Default is 0.4. + + Returns + ------- + positions : MaskedArray + The calculated plotting positions. + + """ + data = ma.array(data, copy=False).reshape(1,-1) + n = data.count() + plpos = np.empty(data.size, dtype=float) + plpos[n:] = 0 + plpos[data.argsort(axis=None)[:n]] = ((np.arange(1, n+1) - alpha) / + (n + 1.0 - alpha - beta)) + return ma.array(plpos, mask=data._mask) + + +meppf = plotting_positions + + +def obrientransform(*args): + """ + Computes a transform on input data (any number of columns). Used to + test for homogeneity of variance prior to running one-way stats. Each + array in ``*args`` is one level of a factor. If an `f_oneway()` run on + the transformed data and found significant, variances are unequal. From + Maxwell and Delaney, p.112. + + Returns: transformed data for use in an ANOVA + """ + data = argstoarray(*args).T + v = data.var(axis=0,ddof=1) + m = data.mean(0) + n = data.count(0).astype(float) + # result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2)) + data -= m + data **= 2 + data *= (n-1.5)*n + data -= 0.5*v*(n-1) + data /= (n-1.)*(n-2.) + if not ma.allclose(v,data.mean(0)): + raise ValueError("Lack of convergence in obrientransform.") + + return data + + +def sem(a, axis=0, ddof=1): + """ + Calculates the standard error of the mean of the input array. + + Also sometimes called standard error of measurement. + + Parameters + ---------- + a : array_like + An array containing the values for which the standard error is + returned. + axis : int or None, optional + If axis is None, ravel `a` first. If axis is an integer, this will be + the axis over which to operate. Defaults to 0. + ddof : int, optional + Delta degrees-of-freedom. How many degrees of freedom to adjust + for bias in limited samples relative to the population estimate + of variance. Defaults to 1. + + Returns + ------- + s : ndarray or float + The standard error of the mean in the sample(s), along the input axis. + + Notes + ----- + The default value for `ddof` changed in scipy 0.15.0 to be consistent with + `scipy.stats.sem` as well as with the most common definition used (like in + the R documentation). + + Examples + -------- + Find standard error along the first axis: + + >>> import numpy as np + >>> from scipy import stats + >>> a = np.arange(20).reshape(5,4) + >>> print(stats.mstats.sem(a)) + [2.8284271247461903 2.8284271247461903 2.8284271247461903 + 2.8284271247461903] + + Find standard error across the whole array, using n degrees of freedom: + + >>> print(stats.mstats.sem(a, axis=None, ddof=0)) + 1.2893796958227628 + + """ + a, axis = _chk_asarray(a, axis) + n = a.count(axis=axis) + s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n) + return s + + +F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue')) + + +def f_oneway(*args): + """ + Performs a 1-way ANOVA, returning an F-value and probability given + any number of groups. From Heiman, pp.394-7. + + Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays, + one per treatment group. + + Returns + ------- + statistic : float + The computed F-value of the test. + pvalue : float + The associated p-value from the F-distribution. + + """ + # Construct a single array of arguments: each row is a group + data = argstoarray(*args) + ngroups = len(data) + ntot = data.count() + sstot = (data**2).sum() - (data.sum())**2/float(ntot) + ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum() + sswg = sstot-ssbg + dfbg = ngroups-1 + dfwg = ntot - ngroups + msb = ssbg/float(dfbg) + msw = sswg/float(dfwg) + f = msb/msw + prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf + + return F_onewayResult(f, prob) + + +FriedmanchisquareResult = namedtuple('FriedmanchisquareResult', + ('statistic', 'pvalue')) + + +def friedmanchisquare(*args): + """Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. + This function calculates the Friedman Chi-square test for repeated measures + and returns the result, along with the associated probability value. + + Each input is considered a given group. Ideally, the number of treatments + among each group should be equal. If this is not the case, only the first + n treatments are taken into account, where n is the number of treatments + of the smallest group. + If a group has some missing values, the corresponding treatments are masked + in the other groups. + The test statistic is corrected for ties. + + Masked values in one group are propagated to the other groups. + + Returns + ------- + statistic : float + the test statistic. + pvalue : float + the associated p-value. + + """ + data = argstoarray(*args).astype(float) + k = len(data) + if k < 3: + raise ValueError("Less than 3 groups (%i): " % k + + "the Friedman test is NOT appropriate.") + + ranked = ma.masked_values(rankdata(data, axis=0), 0) + if ranked._mask is not nomask: + ranked = ma.mask_cols(ranked) + ranked = ranked.compressed().reshape(k,-1).view(ndarray) + else: + ranked = ranked._data + (k,n) = ranked.shape + # Ties correction + repeats = [find_repeats(row) for row in ranked.T] + ties = np.array([y for x, y in repeats if x.size > 0]) + tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k)) + + ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2) + chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction + + return FriedmanchisquareResult(chisq, + distributions.chi2.sf(chisq, k-1)) + + +BrunnerMunzelResult = namedtuple('BrunnerMunzelResult', ('statistic', 'pvalue')) + + +def brunnermunzel(x, y, alternative="two-sided", distribution="t"): + """ + Compute the Brunner-Munzel test on samples x and y. + + Any missing values in `x` and/or `y` are discarded. + + The Brunner-Munzel test is a nonparametric test of the null hypothesis that + when values are taken one by one from each group, the probabilities of + getting large values in both groups are equal. + Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the + assumption of equivariance of two groups. Note that this does not assume + the distributions are same. This test works on two independent samples, + which may have different sizes. + + Parameters + ---------- + x, y : array_like + Array of samples, should be one-dimensional. + alternative : 'less', 'two-sided', or 'greater', optional + Whether to get the p-value for the one-sided hypothesis ('less' + or 'greater') or for the two-sided hypothesis ('two-sided'). + Defaults value is 'two-sided' . + distribution : 't' or 'normal', optional + Whether to get the p-value by t-distribution or by standard normal + distribution. + Defaults value is 't' . + + Returns + ------- + statistic : float + The Brunner-Munzer W statistic. + pvalue : float + p-value assuming an t distribution. One-sided or + two-sided, depending on the choice of `alternative` and `distribution`. + + See Also + -------- + mannwhitneyu : Mann-Whitney rank test on two samples. + + Notes + ----- + For more details on `brunnermunzel`, see `scipy.stats.brunnermunzel`. + + Examples + -------- + >>> from scipy.stats.mstats import brunnermunzel + >>> import numpy as np + >>> x1 = [1, 2, np.nan, np.nan, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1] + >>> x2 = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4] + >>> brunnermunzel(x1, x2) + BrunnerMunzelResult(statistic=1.4723186918922935, pvalue=0.15479415300426624) # may vary + + """ # noqa: E501 + x = ma.asarray(x).compressed().view(ndarray) + y = ma.asarray(y).compressed().view(ndarray) + nx = len(x) + ny = len(y) + if nx == 0 or ny == 0: + return BrunnerMunzelResult(np.nan, np.nan) + rankc = rankdata(np.concatenate((x,y))) + rankcx = rankc[0:nx] + rankcy = rankc[nx:nx+ny] + rankcx_mean = np.mean(rankcx) + rankcy_mean = np.mean(rankcy) + rankx = rankdata(x) + ranky = rankdata(y) + rankx_mean = np.mean(rankx) + ranky_mean = np.mean(ranky) + + Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0)) + Sx /= nx - 1 + Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0)) + Sy /= ny - 1 + + wbfn = nx * ny * (rankcy_mean - rankcx_mean) + wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy) + + if distribution == "t": + df_numer = np.power(nx * Sx + ny * Sy, 2.0) + df_denom = np.power(nx * Sx, 2.0) / (nx - 1) + df_denom += np.power(ny * Sy, 2.0) / (ny - 1) + df = df_numer / df_denom + p = distributions.t.cdf(wbfn, df) + elif distribution == "normal": + p = distributions.norm.cdf(wbfn) + else: + raise ValueError( + "distribution should be 't' or 'normal'") + + if alternative == "greater": + pass + elif alternative == "less": + p = 1 - p + elif alternative == "two-sided": + p = 2 * np.min([p, 1-p]) + else: + raise ValueError( + "alternative should be 'less', 'greater' or 'two-sided'") + + return BrunnerMunzelResult(wbfn, p) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_multicomp.py b/parrot/lib/python3.10/site-packages/scipy/stats/_multicomp.py new file mode 100644 index 0000000000000000000000000000000000000000..c12ce65a91dbb0a6fed48e06127f8902ca71b9bf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_multicomp.py @@ -0,0 +1,459 @@ +from __future__ import annotations + +import warnings +from dataclasses import dataclass, field +from typing import TYPE_CHECKING + +import numpy as np + +from scipy import stats +from scipy.optimize import minimize_scalar +from scipy.stats._common import ConfidenceInterval +from scipy.stats._qmc import check_random_state +from scipy.stats._stats_py import _var + +if TYPE_CHECKING: + import numpy.typing as npt + from scipy._lib._util import DecimalNumber, SeedType + from typing import Literal, Sequence # noqa: UP035 + + +__all__ = [ + 'dunnett' +] + + +@dataclass +class DunnettResult: + """Result object returned by `scipy.stats.dunnett`. + + Attributes + ---------- + statistic : float ndarray + The computed statistic of the test for each comparison. The element + at index ``i`` is the statistic for the comparison between + groups ``i`` and the control. + pvalue : float ndarray + The computed p-value of the test for each comparison. The element + at index ``i`` is the p-value for the comparison between + group ``i`` and the control. + """ + statistic: np.ndarray + pvalue: np.ndarray + _alternative: Literal['two-sided', 'less', 'greater'] = field(repr=False) + _rho: np.ndarray = field(repr=False) + _df: int = field(repr=False) + _std: float = field(repr=False) + _mean_samples: np.ndarray = field(repr=False) + _mean_control: np.ndarray = field(repr=False) + _n_samples: np.ndarray = field(repr=False) + _n_control: int = field(repr=False) + _rng: SeedType = field(repr=False) + _ci: ConfidenceInterval | None = field(default=None, repr=False) + _ci_cl: DecimalNumber | None = field(default=None, repr=False) + + def __str__(self): + # Note: `__str__` prints the confidence intervals from the most + # recent call to `confidence_interval`. If it has not been called, + # it will be called with the default CL of .95. + if self._ci is None: + self.confidence_interval(confidence_level=.95) + s = ( + "Dunnett's test" + f" ({self._ci_cl*100:.1f}% Confidence Interval)\n" + "Comparison Statistic p-value Lower CI Upper CI\n" + ) + for i in range(self.pvalue.size): + s += (f" (Sample {i} - Control) {self.statistic[i]:>10.3f}" + f"{self.pvalue[i]:>10.3f}" + f"{self._ci.low[i]:>10.3f}" + f"{self._ci.high[i]:>10.3f}\n") + + return s + + def _allowance( + self, confidence_level: DecimalNumber = 0.95, tol: DecimalNumber = 1e-3 + ) -> float: + """Allowance. + + It is the quantity to add/subtract from the observed difference + between the means of observed groups and the mean of the control + group. The result gives confidence limits. + + Parameters + ---------- + confidence_level : float, optional + Confidence level for the computed confidence interval. + Default is .95. + tol : float, optional + A tolerance for numerical optimization: the allowance will produce + a confidence within ``10*tol*(1 - confidence_level)`` of the + specified level, or a warning will be emitted. Tight tolerances + may be impractical due to noisy evaluation of the objective. + Default is 1e-3. + + Returns + ------- + allowance : float + Allowance around the mean. + """ + alpha = 1 - confidence_level + + def pvalue_from_stat(statistic): + statistic = np.array(statistic) + sf = _pvalue_dunnett( + rho=self._rho, df=self._df, + statistic=statistic, alternative=self._alternative, + rng=self._rng + ) + return abs(sf - alpha)/alpha + + # Evaluation of `pvalue_from_stat` is noisy due to the use of RQMC to + # evaluate `multivariate_t.cdf`. `minimize_scalar` is not designed + # to tolerate a noisy objective function and may fail to find the + # minimum accurately. We mitigate this possibility with the validation + # step below, but implementation of a noise-tolerant root finder or + # minimizer would be a welcome enhancement. See gh-18150. + res = minimize_scalar(pvalue_from_stat, method='brent', tol=tol) + critical_value = res.x + + # validation + # tol*10 because tol=1e-3 means we tolerate a 1% change at most + if res.success is False or res.fun >= tol*10: + warnings.warn( + "Computation of the confidence interval did not converge to " + "the desired level. The confidence level corresponding with " + f"the returned interval is approximately {alpha*(1+res.fun)}.", + stacklevel=3 + ) + + # From [1] p. 1101 between (1) and (3) + allowance = critical_value*self._std*np.sqrt( + 1/self._n_samples + 1/self._n_control + ) + return abs(allowance) + + def confidence_interval( + self, confidence_level: DecimalNumber = 0.95 + ) -> ConfidenceInterval: + """Compute the confidence interval for the specified confidence level. + + Parameters + ---------- + confidence_level : float, optional + Confidence level for the computed confidence interval. + Default is .95. + + Returns + ------- + ci : ``ConfidenceInterval`` object + The object has attributes ``low`` and ``high`` that hold the + lower and upper bounds of the confidence intervals for each + comparison. The high and low values are accessible for each + comparison at index ``i`` for each group ``i``. + + """ + # check to see if the supplied confidence level matches that of the + # previously computed CI. + if (self._ci is not None) and (confidence_level == self._ci_cl): + return self._ci + + if not (0 < confidence_level < 1): + raise ValueError("Confidence level must be between 0 and 1.") + + allowance = self._allowance(confidence_level=confidence_level) + diff_means = self._mean_samples - self._mean_control + + low = diff_means-allowance + high = diff_means+allowance + + if self._alternative == 'greater': + high = [np.inf] * len(diff_means) + elif self._alternative == 'less': + low = [-np.inf] * len(diff_means) + + self._ci_cl = confidence_level + self._ci = ConfidenceInterval( + low=low, + high=high + ) + return self._ci + + +def dunnett( + *samples: npt.ArrayLike, # noqa: D417 + control: npt.ArrayLike, + alternative: Literal['two-sided', 'less', 'greater'] = "two-sided", + random_state: SeedType = None +) -> DunnettResult: + """Dunnett's test: multiple comparisons of means against a control group. + + This is an implementation of Dunnett's original, single-step test as + described in [1]_. + + Parameters + ---------- + sample1, sample2, ... : 1D array_like + The sample measurements for each experimental group. + control : 1D array_like + The sample measurements for the control group. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + + The null hypothesis is that the means of the distributions underlying + the samples and control are equal. The following alternative + hypotheses are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions underlying the samples + and control are unequal. + * 'less': the means of the distributions underlying the samples + are less than the mean of the distribution underlying the control. + * 'greater': the means of the distributions underlying the + samples are greater than the mean of the distribution underlying + the control. + random_state : {None, int, `numpy.random.Generator`}, optional + If `random_state` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(random_state)``. + If `random_state` is already a ``Generator`` instance, then the + provided instance is used. + + The random number generator is used to control the randomized + Quasi-Monte Carlo integration of the multivariate-t distribution. + + Returns + ------- + res : `~scipy.stats._result_classes.DunnettResult` + An object containing attributes: + + statistic : float ndarray + The computed statistic of the test for each comparison. The element + at index ``i`` is the statistic for the comparison between + groups ``i`` and the control. + pvalue : float ndarray + The computed p-value of the test for each comparison. The element + at index ``i`` is the p-value for the comparison between + group ``i`` and the control. + + And the following method: + + confidence_interval(confidence_level=0.95) : + Compute the difference in means of the groups + with the control +- the allowance. + + See Also + -------- + tukey_hsd : performs pairwise comparison of means. + + Notes + ----- + Like the independent-sample t-test, Dunnett's test [1]_ is used to make + inferences about the means of distributions from which samples were drawn. + However, when multiple t-tests are performed at a fixed significance level, + the "family-wise error rate" - the probability of incorrectly rejecting the + null hypothesis in at least one test - will exceed the significance level. + Dunnett's test is designed to perform multiple comparisons while + controlling the family-wise error rate. + + Dunnett's test compares the means of multiple experimental groups + against a single control group. Tukey's Honestly Significant Difference Test + is another multiple-comparison test that controls the family-wise error + rate, but `tukey_hsd` performs *all* pairwise comparisons between groups. + When pairwise comparisons between experimental groups are not needed, + Dunnett's test is preferable due to its higher power. + + + The use of this test relies on several assumptions. + + 1. The observations are independent within and among groups. + 2. The observations within each group are normally distributed. + 3. The distributions from which the samples are drawn have the same finite + variance. + + References + ---------- + .. [1] Charles W. Dunnett. "A Multiple Comparison Procedure for Comparing + Several Treatments with a Control." + Journal of the American Statistical Association, 50:272, 1096-1121, + :doi:`10.1080/01621459.1955.10501294`, 1955. + + Examples + -------- + In [1]_, the influence of drugs on blood count measurements on three groups + of animal is investigated. + + The following table summarizes the results of the experiment in which + two groups received different drugs, and one group acted as a control. + Blood counts (in millions of cells per cubic millimeter) were recorded:: + + >>> import numpy as np + >>> control = np.array([7.40, 8.50, 7.20, 8.24, 9.84, 8.32]) + >>> drug_a = np.array([9.76, 8.80, 7.68, 9.36]) + >>> drug_b = np.array([12.80, 9.68, 12.16, 9.20, 10.55]) + + We would like to see if the means between any of the groups are + significantly different. First, visually examine a box and whisker plot. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + >>> ax.boxplot([control, drug_a, drug_b]) + >>> ax.set_xticklabels(["Control", "Drug A", "Drug B"]) # doctest: +SKIP + >>> ax.set_ylabel("mean") # doctest: +SKIP + >>> plt.show() + + Note the overlapping interquartile ranges of the drug A group and control + group and the apparent separation between the drug B group and control + group. + + Next, we will use Dunnett's test to assess whether the difference + between group means is significant while controlling the family-wise error + rate: the probability of making any false discoveries. + Let the null hypothesis be that the experimental groups have the same + mean as the control and the alternative be that an experimental group does + not have the same mean as the control. We will consider a 5% family-wise + error rate to be acceptable, and therefore we choose 0.05 as the threshold + for significance. + + >>> from scipy.stats import dunnett + >>> res = dunnett(drug_a, drug_b, control=control) + >>> res.pvalue + array([0.62004941, 0.0059035 ]) # may vary + + The p-value corresponding with the comparison between group A and control + exceeds 0.05, so we do not reject the null hypothesis for that comparison. + However, the p-value corresponding with the comparison between group B + and control is less than 0.05, so we consider the experimental results + to be evidence against the null hypothesis in favor of the alternative: + group B has a different mean than the control group. + + """ + samples_, control_, rng = _iv_dunnett( + samples=samples, control=control, + alternative=alternative, random_state=random_state + ) + + rho, df, n_group, n_samples, n_control = _params_dunnett( + samples=samples_, control=control_ + ) + + statistic, std, mean_control, mean_samples = _statistic_dunnett( + samples_, control_, df, n_samples, n_control + ) + + pvalue = _pvalue_dunnett( + rho=rho, df=df, statistic=statistic, alternative=alternative, rng=rng + ) + + return DunnettResult( + statistic=statistic, pvalue=pvalue, + _alternative=alternative, + _rho=rho, _df=df, _std=std, + _mean_samples=mean_samples, + _mean_control=mean_control, + _n_samples=n_samples, + _n_control=n_control, + _rng=rng + ) + + +def _iv_dunnett( + samples: Sequence[npt.ArrayLike], + control: npt.ArrayLike, + alternative: Literal['two-sided', 'less', 'greater'], + random_state: SeedType +) -> tuple[list[np.ndarray], np.ndarray, SeedType]: + """Input validation for Dunnett's test.""" + rng = check_random_state(random_state) + + if alternative not in {'two-sided', 'less', 'greater'}: + raise ValueError( + "alternative must be 'less', 'greater' or 'two-sided'" + ) + + ndim_msg = "Control and samples groups must be 1D arrays" + n_obs_msg = "Control and samples groups must have at least 1 observation" + + control = np.asarray(control) + samples_ = [np.asarray(sample) for sample in samples] + + # samples checks + samples_control: list[np.ndarray] = samples_ + [control] + for sample in samples_control: + if sample.ndim > 1: + raise ValueError(ndim_msg) + + if sample.size < 1: + raise ValueError(n_obs_msg) + + return samples_, control, rng + + +def _params_dunnett( + samples: list[np.ndarray], control: np.ndarray +) -> tuple[np.ndarray, int, int, np.ndarray, int]: + """Specific parameters for Dunnett's test. + + Degree of freedom is the number of observations minus the number of groups + including the control. + """ + n_samples = np.array([sample.size for sample in samples]) + + # From [1] p. 1100 d.f. = (sum N)-(p+1) + n_sample = n_samples.sum() + n_control = control.size + n = n_sample + n_control + n_groups = len(samples) + df = n - n_groups - 1 + + # From [1] p. 1103 rho_ij = 1/sqrt((N0/Ni+1)(N0/Nj+1)) + rho = n_control/n_samples + 1 + rho = 1/np.sqrt(rho[:, None] * rho[None, :]) + np.fill_diagonal(rho, 1) + + return rho, df, n_groups, n_samples, n_control + + +def _statistic_dunnett( + samples: list[np.ndarray], control: np.ndarray, df: int, + n_samples: np.ndarray, n_control: int +) -> tuple[np.ndarray, float, np.ndarray, np.ndarray]: + """Statistic of Dunnett's test. + + Computation based on the original single-step test from [1]. + """ + mean_control = np.mean(control) + mean_samples = np.array([np.mean(sample) for sample in samples]) + all_samples = [control] + samples + all_means = np.concatenate([[mean_control], mean_samples]) + + # Variance estimate s^2 from [1] Eq. 1 + s2 = np.sum([_var(sample, mean=mean)*sample.size + for sample, mean in zip(all_samples, all_means)]) / df + std = np.sqrt(s2) + + # z score inferred from [1] unlabeled equation after Eq. 1 + z = (mean_samples - mean_control) / np.sqrt(1/n_samples + 1/n_control) + + return z / std, std, mean_control, mean_samples + + +def _pvalue_dunnett( + rho: np.ndarray, df: int, statistic: np.ndarray, + alternative: Literal['two-sided', 'less', 'greater'], + rng: SeedType = None +) -> np.ndarray: + """pvalue from the multivariate t-distribution. + + Critical values come from the multivariate student-t distribution. + """ + statistic = statistic.reshape(-1, 1) + + mvt = stats.multivariate_t(shape=rho, df=df, seed=rng) + if alternative == "two-sided": + statistic = abs(statistic) + pvalue = 1 - mvt.cdf(statistic, lower_limit=-statistic) + elif alternative == "greater": + pvalue = 1 - mvt.cdf(statistic, lower_limit=-np.inf) + else: + pvalue = 1 - mvt.cdf(np.inf, lower_limit=statistic) + + return np.atleast_1d(pvalue) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_multivariate.py b/parrot/lib/python3.10/site-packages/scipy/stats/_multivariate.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb59ae8c68ae0f0018f8cccfd79c7fadee742ec --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_multivariate.py @@ -0,0 +1,6981 @@ +# +# Author: Joris Vankerschaver 2013 +# +import math +import numpy as np +import scipy.linalg +from scipy._lib import doccer +from scipy.special import (gammaln, psi, multigammaln, xlogy, entr, betaln, + ive, loggamma) +from scipy._lib._util import check_random_state, _lazywhere +from scipy.linalg.blas import drot, get_blas_funcs +from ._continuous_distns import norm +from ._discrete_distns import binom +from . import _mvn, _covariance, _rcont +from ._qmvnt import _qmvt +from ._morestats import directional_stats +from scipy.optimize import root_scalar + +__all__ = ['multivariate_normal', + 'matrix_normal', + 'dirichlet', + 'dirichlet_multinomial', + 'wishart', + 'invwishart', + 'multinomial', + 'special_ortho_group', + 'ortho_group', + 'random_correlation', + 'unitary_group', + 'multivariate_t', + 'multivariate_hypergeom', + 'random_table', + 'uniform_direction', + 'vonmises_fisher'] + +_LOG_2PI = np.log(2 * np.pi) +_LOG_2 = np.log(2) +_LOG_PI = np.log(np.pi) + + +_doc_random_state = """\ +seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. +""" + + +def _squeeze_output(out): + """ + Remove single-dimensional entries from array and convert to scalar, + if necessary. + """ + out = out.squeeze() + if out.ndim == 0: + out = out[()] + return out + + +def _eigvalsh_to_eps(spectrum, cond=None, rcond=None): + """Determine which eigenvalues are "small" given the spectrum. + + This is for compatibility across various linear algebra functions + that should agree about whether or not a Hermitian matrix is numerically + singular and what is its numerical matrix rank. + This is designed to be compatible with scipy.linalg.pinvh. + + Parameters + ---------- + spectrum : 1d ndarray + Array of eigenvalues of a Hermitian matrix. + cond, rcond : float, optional + Cutoff for small eigenvalues. + Singular values smaller than rcond * largest_eigenvalue are + considered zero. + If None or -1, suitable machine precision is used. + + Returns + ------- + eps : float + Magnitude cutoff for numerical negligibility. + + """ + if rcond is not None: + cond = rcond + if cond in [None, -1]: + t = spectrum.dtype.char.lower() + factor = {'f': 1E3, 'd': 1E6} + cond = factor[t] * np.finfo(t).eps + eps = cond * np.max(abs(spectrum)) + return eps + + +def _pinv_1d(v, eps=1e-5): + """A helper function for computing the pseudoinverse. + + Parameters + ---------- + v : iterable of numbers + This may be thought of as a vector of eigenvalues or singular values. + eps : float + Values with magnitude no greater than eps are considered negligible. + + Returns + ------- + v_pinv : 1d float ndarray + A vector of pseudo-inverted numbers. + + """ + return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float) + + +class _PSD: + """ + Compute coordinated functions of a symmetric positive semidefinite matrix. + + This class addresses two issues. Firstly it allows the pseudoinverse, + the logarithm of the pseudo-determinant, and the rank of the matrix + to be computed using one call to eigh instead of three. + Secondly it allows these functions to be computed in a way + that gives mutually compatible results. + All of the functions are computed with a common understanding as to + which of the eigenvalues are to be considered negligibly small. + The functions are designed to coordinate with scipy.linalg.pinvh() + but not necessarily with np.linalg.det() or with np.linalg.matrix_rank(). + + Parameters + ---------- + M : array_like + Symmetric positive semidefinite matrix (2-D). + cond, rcond : float, optional + Cutoff for small eigenvalues. + Singular values smaller than rcond * largest_eigenvalue are + considered zero. + If None or -1, suitable machine precision is used. + lower : bool, optional + Whether the pertinent array data is taken from the lower + or upper triangle of M. (Default: lower) + check_finite : bool, optional + Whether to check that the input matrices contain only finite + numbers. Disabling may give a performance gain, but may result + in problems (crashes, non-termination) if the inputs do contain + infinities or NaNs. + allow_singular : bool, optional + Whether to allow a singular matrix. (Default: True) + + Notes + ----- + The arguments are similar to those of scipy.linalg.pinvh(). + + """ + + def __init__(self, M, cond=None, rcond=None, lower=True, + check_finite=True, allow_singular=True): + self._M = np.asarray(M) + + # Compute the symmetric eigendecomposition. + # Note that eigh takes care of array conversion, chkfinite, + # and assertion that the matrix is square. + s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite) + + eps = _eigvalsh_to_eps(s, cond, rcond) + if np.min(s) < -eps: + msg = "The input matrix must be symmetric positive semidefinite." + raise ValueError(msg) + d = s[s > eps] + if len(d) < len(s) and not allow_singular: + msg = ("When `allow_singular is False`, the input matrix must be " + "symmetric positive definite.") + raise np.linalg.LinAlgError(msg) + s_pinv = _pinv_1d(s, eps) + U = np.multiply(u, np.sqrt(s_pinv)) + + # Save the eigenvector basis, and tolerance for testing support + self.eps = 1e3*eps + self.V = u[:, s <= eps] + + # Initialize the eagerly precomputed attributes. + self.rank = len(d) + self.U = U + self.log_pdet = np.sum(np.log(d)) + + # Initialize attributes to be lazily computed. + self._pinv = None + + def _support_mask(self, x): + """ + Check whether x lies in the support of the distribution. + """ + residual = np.linalg.norm(x @ self.V, axis=-1) + in_support = residual < self.eps + return in_support + + @property + def pinv(self): + if self._pinv is None: + self._pinv = np.dot(self.U, self.U.T) + return self._pinv + + +class multi_rv_generic: + """ + Class which encapsulates common functionality between all multivariate + distributions. + """ + def __init__(self, seed=None): + super().__init__() + self._random_state = check_random_state(seed) + + @property + def random_state(self): + """ Get or set the Generator object for generating random variates. + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + """ + return self._random_state + + @random_state.setter + def random_state(self, seed): + self._random_state = check_random_state(seed) + + def _get_random_state(self, random_state): + if random_state is not None: + return check_random_state(random_state) + else: + return self._random_state + + +class multi_rv_frozen: + """ + Class which encapsulates common functionality between all frozen + multivariate distributions. + """ + @property + def random_state(self): + return self._dist._random_state + + @random_state.setter + def random_state(self, seed): + self._dist._random_state = check_random_state(seed) + + +_mvn_doc_default_callparams = """\ +mean : array_like, default: ``[0]`` + Mean of the distribution. +cov : array_like or `Covariance`, default: ``[1]`` + Symmetric positive (semi)definite covariance matrix of the distribution. +allow_singular : bool, default: ``False`` + Whether to allow a singular covariance matrix. This is ignored if `cov` is + a `Covariance` object. +""" + +_mvn_doc_callparams_note = """\ +Setting the parameter `mean` to `None` is equivalent to having `mean` +be the zero-vector. The parameter `cov` can be a scalar, in which case +the covariance matrix is the identity times that value, a vector of +diagonal entries for the covariance matrix, a two-dimensional array_like, +or a `Covariance` object. +""" + +_mvn_doc_frozen_callparams = "" + +_mvn_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +mvn_docdict_params = { + '_mvn_doc_default_callparams': _mvn_doc_default_callparams, + '_mvn_doc_callparams_note': _mvn_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mvn_docdict_noparams = { + '_mvn_doc_default_callparams': _mvn_doc_frozen_callparams, + '_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_normal_gen(multi_rv_generic): + r"""A multivariate normal random variable. + + The `mean` keyword specifies the mean. The `cov` keyword specifies the + covariance matrix. + + Methods + ------- + pdf(x, mean=None, cov=1, allow_singular=False) + Probability density function. + logpdf(x, mean=None, cov=1, allow_singular=False) + Log of the probability density function. + cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5, lower_limit=None) + Cumulative distribution function. + logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5) + Log of the cumulative distribution function. + rvs(mean=None, cov=1, size=1, random_state=None) + Draw random samples from a multivariate normal distribution. + entropy(mean=None, cov=1) + Compute the differential entropy of the multivariate normal. + fit(x, fix_mean=None, fix_cov=None) + Fit a multivariate normal distribution to data. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_mvn_doc_callparams_note)s + + The covariance matrix `cov` may be an instance of a subclass of + `Covariance`, e.g. `scipy.stats.CovViaPrecision`. If so, `allow_singular` + is ignored. + + Otherwise, `cov` must be a symmetric positive semidefinite + matrix when `allow_singular` is True; it must be (strictly) positive + definite when `allow_singular` is False. + Symmetry is not checked; only the lower triangular portion is used. + The determinant and inverse of `cov` are computed + as the pseudo-determinant and pseudo-inverse, respectively, so + that `cov` does not need to have full rank. + + The probability density function for `multivariate_normal` is + + .. math:: + + f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}} + \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right), + + where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix, + :math:`k` the rank of :math:`\Sigma`. In case of singular :math:`\Sigma`, + SciPy extends this definition according to [1]_. + + .. versionadded:: 0.14.0 + + References + ---------- + .. [1] Multivariate Normal Distribution - Degenerate Case, Wikipedia, + https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Degenerate_case + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import multivariate_normal + + >>> x = np.linspace(0, 5, 10, endpoint=False) + >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y + array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129, + 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349]) + >>> fig1 = plt.figure() + >>> ax = fig1.add_subplot(111) + >>> ax.plot(x, y) + >>> plt.show() + + Alternatively, the object may be called (as a function) to fix the mean + and covariance parameters, returning a "frozen" multivariate normal + random variable: + + >>> rv = multivariate_normal(mean=None, cov=1, allow_singular=False) + >>> # Frozen object with the same methods but holding the given + >>> # mean and covariance fixed. + + The input quantiles can be any shape of array, as long as the last + axis labels the components. This allows us for instance to + display the frozen pdf for a non-isotropic random variable in 2D as + follows: + + >>> x, y = np.mgrid[-1:1:.01, -1:1:.01] + >>> pos = np.dstack((x, y)) + >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]) + >>> fig2 = plt.figure() + >>> ax2 = fig2.add_subplot(111) + >>> ax2.contourf(x, y, rv.pdf(pos)) + + """ # noqa: E501 + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params) + + def __call__(self, mean=None, cov=1, allow_singular=False, seed=None): + """Create a frozen multivariate normal distribution. + + See `multivariate_normal_frozen` for more information. + """ + return multivariate_normal_frozen(mean, cov, + allow_singular=allow_singular, + seed=seed) + + def _process_parameters(self, mean, cov, allow_singular=True): + """ + Infer dimensionality from mean or covariance matrix, ensure that + mean and covariance are full vector resp. matrix. + """ + if isinstance(cov, _covariance.Covariance): + return self._process_parameters_Covariance(mean, cov) + else: + # Before `Covariance` classes were introduced, + # `multivariate_normal` accepted plain arrays as `cov` and used the + # following input validation. To avoid disturbing the behavior of + # `multivariate_normal` when plain arrays are used, we use the + # original input validation here. + dim, mean, cov = self._process_parameters_psd(None, mean, cov) + # After input validation, some methods then processed the arrays + # with a `_PSD` object and used that to perform computation. + # To avoid branching statements in each method depending on whether + # `cov` is an array or `Covariance` object, we always process the + # array with `_PSD`, and then use wrapper that satisfies the + # `Covariance` interface, `CovViaPSD`. + psd = _PSD(cov, allow_singular=allow_singular) + cov_object = _covariance.CovViaPSD(psd) + return dim, mean, cov_object + + def _process_parameters_Covariance(self, mean, cov): + dim = cov.shape[-1] + mean = np.array([0.]) if mean is None else mean + message = (f"`cov` represents a covariance matrix in {dim} dimensions," + f"and so `mean` must be broadcastable to shape {(dim,)}") + try: + mean = np.broadcast_to(mean, dim) + except ValueError as e: + raise ValueError(message) from e + return dim, mean, cov + + def _process_parameters_psd(self, dim, mean, cov): + # Try to infer dimensionality + if dim is None: + if mean is None: + if cov is None: + dim = 1 + else: + cov = np.asarray(cov, dtype=float) + if cov.ndim < 2: + dim = 1 + else: + dim = cov.shape[0] + else: + mean = np.asarray(mean, dtype=float) + dim = mean.size + else: + if not np.isscalar(dim): + raise ValueError("Dimension of random variable must be " + "a scalar.") + + # Check input sizes and return full arrays for mean and cov if + # necessary + if mean is None: + mean = np.zeros(dim) + mean = np.asarray(mean, dtype=float) + + if cov is None: + cov = 1.0 + cov = np.asarray(cov, dtype=float) + + if dim == 1: + mean = mean.reshape(1) + cov = cov.reshape(1, 1) + + if mean.ndim != 1 or mean.shape[0] != dim: + raise ValueError("Array 'mean' must be a vector of length %d." % + dim) + if cov.ndim == 0: + cov = cov * np.eye(dim) + elif cov.ndim == 1: + cov = np.diag(cov) + elif cov.ndim == 2 and cov.shape != (dim, dim): + rows, cols = cov.shape + if rows != cols: + msg = ("Array 'cov' must be square if it is two dimensional," + " but cov.shape = %s." % str(cov.shape)) + else: + msg = ("Dimension mismatch: array 'cov' is of shape %s," + " but 'mean' is a vector of length %d.") + msg = msg % (str(cov.shape), len(mean)) + raise ValueError(msg) + elif cov.ndim > 2: + raise ValueError("Array 'cov' must be at most two-dimensional," + " but cov.ndim = %d" % cov.ndim) + + return dim, mean, cov + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + + if x.ndim == 0: + x = x[np.newaxis] + elif x.ndim == 1: + if dim == 1: + x = x[:, np.newaxis] + else: + x = x[np.newaxis, :] + + return x + + def _logpdf(self, x, mean, cov_object): + """Log of the multivariate normal probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + mean : ndarray + Mean of the distribution + cov_object : Covariance + An object representing the Covariance matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + log_det_cov, rank = cov_object.log_pdet, cov_object.rank + dev = x - mean + if dev.ndim > 1: + log_det_cov = log_det_cov[..., np.newaxis] + rank = rank[..., np.newaxis] + maha = np.sum(np.square(cov_object.whiten(dev)), axis=-1) + return -0.5 * (rank * _LOG_2PI + log_det_cov + maha) + + def logpdf(self, x, mean=None, cov=1, allow_singular=False): + """Log of the multivariate normal probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + x = self._process_quantiles(x, dim) + out = self._logpdf(x, mean, cov_object) + if np.any(cov_object.rank < dim): + out_of_bounds = ~cov_object._support_mask(x-mean) + out[out_of_bounds] = -np.inf + return _squeeze_output(out) + + def pdf(self, x, mean=None, cov=1, allow_singular=False): + """Multivariate normal probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + x = self._process_quantiles(x, dim) + out = np.exp(self._logpdf(x, mean, cov_object)) + if np.any(cov_object.rank < dim): + out_of_bounds = ~cov_object._support_mask(x-mean) + out[out_of_bounds] = 0.0 + return _squeeze_output(out) + + def _cdf(self, x, mean, cov, maxpts, abseps, releps, lower_limit): + """Multivariate normal cumulative distribution function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the cumulative distribution function. + mean : ndarray + Mean of the distribution + cov : array_like + Covariance matrix of the distribution + maxpts : integer + The maximum number of points to use for integration + abseps : float + Absolute error tolerance + releps : float + Relative error tolerance + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'cdf' instead. + + + .. versionadded:: 1.0.0 + + """ + lower = (np.full(mean.shape, -np.inf) + if lower_limit is None else lower_limit) + # In 2d, _mvn.mvnun accepts input in which `lower` bound elements + # are greater than `x`. Not so in other dimensions. Fix this by + # ensuring that lower bounds are indeed lower when passed, then + # set signs of resulting CDF manually. + b, a = np.broadcast_arrays(x, lower) + i_swap = b < a + signs = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative + a, b = a.copy(), b.copy() + a[i_swap], b[i_swap] = b[i_swap], a[i_swap] + n = x.shape[-1] + limits = np.concatenate((a, b), axis=-1) + + # mvnun expects 1-d arguments, so process points sequentially + def func1d(limits): + return _mvn.mvnun(limits[:n], limits[n:], mean, cov, + maxpts, abseps, releps)[0] + + out = np.apply_along_axis(func1d, -1, limits) * signs + return _squeeze_output(out) + + def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, + abseps=1e-5, releps=1e-5, *, lower_limit=None): + """Log of the multivariate normal cumulative distribution function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + maxpts : integer, optional + The maximum number of points to use for integration + (default `1000000*dim`) + abseps : float, optional + Absolute error tolerance (default 1e-5) + releps : float, optional + Relative error tolerance (default 1e-5) + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + + Returns + ------- + cdf : ndarray or scalar + Log of the cumulative distribution function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + .. versionadded:: 1.0.0 + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + cov = cov_object.covariance + x = self._process_quantiles(x, dim) + if not maxpts: + maxpts = 1000000 * dim + cdf = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit) + # the log of a negative real is complex, and cdf can be negative + # if lower limit is greater than upper limit + cdf = cdf + 0j if np.any(cdf < 0) else cdf + out = np.log(cdf) + return out + + def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, + abseps=1e-5, releps=1e-5, *, lower_limit=None): + """Multivariate normal cumulative distribution function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + maxpts : integer, optional + The maximum number of points to use for integration + (default `1000000*dim`) + abseps : float, optional + Absolute error tolerance (default 1e-5) + releps : float, optional + Relative error tolerance (default 1e-5) + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + + Returns + ------- + cdf : ndarray or scalar + Cumulative distribution function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + .. versionadded:: 1.0.0 + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + cov = cov_object.covariance + x = self._process_quantiles(x, dim) + if not maxpts: + maxpts = 1000000 * dim + out = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit) + return out + + def rvs(self, mean=None, cov=1, size=1, random_state=None): + """Draw random samples from a multivariate normal distribution. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the random variable. + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov_object = self._process_parameters(mean, cov) + random_state = self._get_random_state(random_state) + + if isinstance(cov_object, _covariance.CovViaPSD): + cov = cov_object.covariance + out = random_state.multivariate_normal(mean, cov, size) + out = _squeeze_output(out) + else: + size = size or tuple() + if not np.iterable(size): + size = (size,) + shape = tuple(size) + (cov_object.shape[-1],) + x = random_state.normal(size=shape) + out = mean + cov_object.colorize(x) + return out + + def entropy(self, mean=None, cov=1): + """Compute the differential entropy of the multivariate normal. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the multivariate normal distribution + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov_object = self._process_parameters(mean, cov) + return 0.5 * (cov_object.rank * (_LOG_2PI + 1) + cov_object.log_pdet) + + def fit(self, x, fix_mean=None, fix_cov=None): + """Fit a multivariate normal distribution to data. + + Parameters + ---------- + x : ndarray (m, n) + Data the distribution is fitted to. Must have two axes. + The first axis of length `m` represents the number of vectors + the distribution is fitted to. The second axis of length `n` + determines the dimensionality of the fitted distribution. + fix_mean : ndarray(n, ) + Fixed mean vector. Must have length `n`. + fix_cov: ndarray (n, n) + Fixed covariance matrix. Must have shape `(n, n)`. + + Returns + ------- + mean : ndarray (n, ) + Maximum likelihood estimate of the mean vector + cov : ndarray (n, n) + Maximum likelihood estimate of the covariance matrix + + """ + # input validation for data to be fitted + x = np.asarray(x) + if x.ndim != 2: + raise ValueError("`x` must be two-dimensional.") + + n_vectors, dim = x.shape + + # parameter estimation + # reference: https://home.ttic.edu/~shubhendu/Slides/Estimation.pdf + if fix_mean is not None: + # input validation for `fix_mean` + fix_mean = np.atleast_1d(fix_mean) + if fix_mean.shape != (dim, ): + msg = ("`fix_mean` must be a one-dimensional array the same " + "length as the dimensionality of the vectors `x`.") + raise ValueError(msg) + mean = fix_mean + else: + mean = x.mean(axis=0) + + if fix_cov is not None: + # input validation for `fix_cov` + fix_cov = np.atleast_2d(fix_cov) + # validate shape + if fix_cov.shape != (dim, dim): + msg = ("`fix_cov` must be a two-dimensional square array " + "of same side length as the dimensionality of the " + "vectors `x`.") + raise ValueError(msg) + # validate positive semidefiniteness + # a trimmed down copy from _PSD + s, u = scipy.linalg.eigh(fix_cov, lower=True, check_finite=True) + eps = _eigvalsh_to_eps(s) + if np.min(s) < -eps: + msg = "`fix_cov` must be symmetric positive semidefinite." + raise ValueError(msg) + cov = fix_cov + else: + centered_data = x - mean + cov = centered_data.T @ centered_data / n_vectors + return mean, cov + + +multivariate_normal = multivariate_normal_gen() + + +class multivariate_normal_frozen(multi_rv_frozen): + def __init__(self, mean=None, cov=1, allow_singular=False, seed=None, + maxpts=None, abseps=1e-5, releps=1e-5): + """Create a frozen multivariate normal distribution. + + Parameters + ---------- + mean : array_like, default: ``[0]`` + Mean of the distribution. + cov : array_like, default: ``[1]`` + Symmetric positive (semi)definite covariance matrix of the + distribution. + allow_singular : bool, default: ``False`` + Whether to allow a singular covariance matrix. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + maxpts : integer, optional + The maximum number of points to use for integration of the + cumulative distribution function (default `1000000*dim`) + abseps : float, optional + Absolute error tolerance for the cumulative distribution function + (default 1e-5) + releps : float, optional + Relative error tolerance for the cumulative distribution function + (default 1e-5) + + Examples + -------- + When called with the default parameters, this will create a 1D random + variable with mean 0 and covariance 1: + + >>> from scipy.stats import multivariate_normal + >>> r = multivariate_normal() + >>> r.mean + array([ 0.]) + >>> r.cov + array([[1.]]) + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = multivariate_normal_gen(seed) + self.dim, self.mean, self.cov_object = ( + self._dist._process_parameters(mean, cov, allow_singular)) + self.allow_singular = allow_singular or self.cov_object._allow_singular + if not maxpts: + maxpts = 1000000 * self.dim + self.maxpts = maxpts + self.abseps = abseps + self.releps = releps + + @property + def cov(self): + return self.cov_object.covariance + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._logpdf(x, self.mean, self.cov_object) + if np.any(self.cov_object.rank < self.dim): + out_of_bounds = ~self.cov_object._support_mask(x-self.mean) + out[out_of_bounds] = -np.inf + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def logcdf(self, x, *, lower_limit=None): + cdf = self.cdf(x, lower_limit=lower_limit) + # the log of a negative real is complex, and cdf can be negative + # if lower limit is greater than upper limit + cdf = cdf + 0j if np.any(cdf < 0) else cdf + out = np.log(cdf) + return out + + def cdf(self, x, *, lower_limit=None): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._cdf(x, self.mean, self.cov_object.covariance, + self.maxpts, self.abseps, self.releps, + lower_limit) + return _squeeze_output(out) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.mean, self.cov_object, size, random_state) + + def entropy(self): + """Computes the differential entropy of the multivariate normal. + + Returns + ------- + h : scalar + Entropy of the multivariate normal distribution + + """ + log_pdet = self.cov_object.log_pdet + rank = self.cov_object.rank + return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']: + method = multivariate_normal_gen.__dict__[name] + method_frozen = multivariate_normal_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + mvn_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params) + +_matnorm_doc_default_callparams = """\ +mean : array_like, optional + Mean of the distribution (default: `None`) +rowcov : array_like, optional + Among-row covariance matrix of the distribution (default: `1`) +colcov : array_like, optional + Among-column covariance matrix of the distribution (default: `1`) +""" + +_matnorm_doc_callparams_note = """\ +If `mean` is set to `None` then a matrix of zeros is used for the mean. +The dimensions of this matrix are inferred from the shape of `rowcov` and +`colcov`, if these are provided, or set to `1` if ambiguous. + +`rowcov` and `colcov` can be two-dimensional array_likes specifying the +covariance matrices directly. Alternatively, a one-dimensional array will +be be interpreted as the entries of a diagonal matrix, and a scalar or +zero-dimensional array will be interpreted as this value times the +identity matrix. +""" + +_matnorm_doc_frozen_callparams = "" + +_matnorm_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +matnorm_docdict_params = { + '_matnorm_doc_default_callparams': _matnorm_doc_default_callparams, + '_matnorm_doc_callparams_note': _matnorm_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +matnorm_docdict_noparams = { + '_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams, + '_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class matrix_normal_gen(multi_rv_generic): + r"""A matrix normal random variable. + + The `mean` keyword specifies the mean. The `rowcov` keyword specifies the + among-row covariance matrix. The 'colcov' keyword specifies the + among-column covariance matrix. + + Methods + ------- + pdf(X, mean=None, rowcov=1, colcov=1) + Probability density function. + logpdf(X, mean=None, rowcov=1, colcov=1) + Log of the probability density function. + rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None) + Draw random samples. + entropy(rowcol=1, colcov=1) + Differential entropy. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + The covariance matrices specified by `rowcov` and `colcov` must be + (symmetric) positive definite. If the samples in `X` are + :math:`m \times n`, then `rowcov` must be :math:`m \times m` and + `colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`. + + The probability density function for `matrix_normal` is + + .. math:: + + f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}} + \exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1} + (X-M)^T \right] \right), + + where :math:`M` is the mean, :math:`U` the among-row covariance matrix, + :math:`V` the among-column covariance matrix. + + The `allow_singular` behaviour of the `multivariate_normal` + distribution is not currently supported. Covariance matrices must be + full rank. + + The `matrix_normal` distribution is closely related to the + `multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)` + (the vector formed by concatenating the columns of :math:`X`) has a + multivariate normal distribution with mean :math:`\mathrm{Vec}(M)` + and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker + product). Sampling and pdf evaluation are + :math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but + :math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal, + making this equivalent form algorithmically inefficient. + + .. versionadded:: 0.17.0 + + Examples + -------- + + >>> import numpy as np + >>> from scipy.stats import matrix_normal + + >>> M = np.arange(6).reshape(3,2); M + array([[0, 1], + [2, 3], + [4, 5]]) + >>> U = np.diag([1,2,3]); U + array([[1, 0, 0], + [0, 2, 0], + [0, 0, 3]]) + >>> V = 0.3*np.identity(2); V + array([[ 0.3, 0. ], + [ 0. , 0.3]]) + >>> X = M + 0.1; X + array([[ 0.1, 1.1], + [ 2.1, 3.1], + [ 4.1, 5.1]]) + >>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V) + 0.023410202050005054 + + >>> # Equivalent multivariate normal + >>> from scipy.stats import multivariate_normal + >>> vectorised_X = X.T.flatten() + >>> equiv_mean = M.T.flatten() + >>> equiv_cov = np.kron(V,U) + >>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov) + 0.023410202050005054 + + Alternatively, the object may be called (as a function) to fix the mean + and covariance parameters, returning a "frozen" matrix normal + random variable: + + >>> rv = matrix_normal(mean=None, rowcov=1, colcov=1) + >>> # Frozen object with the same methods but holding the given + >>> # mean and covariance fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params) + + def __call__(self, mean=None, rowcov=1, colcov=1, seed=None): + """Create a frozen matrix normal distribution. + + See `matrix_normal_frozen` for more information. + + """ + return matrix_normal_frozen(mean, rowcov, colcov, seed=seed) + + def _process_parameters(self, mean, rowcov, colcov): + """ + Infer dimensionality from mean or covariance matrices. Handle + defaults. Ensure compatible dimensions. + """ + + # Process mean + if mean is not None: + mean = np.asarray(mean, dtype=float) + meanshape = mean.shape + if len(meanshape) != 2: + raise ValueError("Array `mean` must be two dimensional.") + if np.any(meanshape == 0): + raise ValueError("Array `mean` has invalid shape.") + + # Process among-row covariance + rowcov = np.asarray(rowcov, dtype=float) + if rowcov.ndim == 0: + if mean is not None: + rowcov = rowcov * np.identity(meanshape[0]) + else: + rowcov = rowcov * np.identity(1) + elif rowcov.ndim == 1: + rowcov = np.diag(rowcov) + rowshape = rowcov.shape + if len(rowshape) != 2: + raise ValueError("`rowcov` must be a scalar or a 2D array.") + if rowshape[0] != rowshape[1]: + raise ValueError("Array `rowcov` must be square.") + if rowshape[0] == 0: + raise ValueError("Array `rowcov` has invalid shape.") + numrows = rowshape[0] + + # Process among-column covariance + colcov = np.asarray(colcov, dtype=float) + if colcov.ndim == 0: + if mean is not None: + colcov = colcov * np.identity(meanshape[1]) + else: + colcov = colcov * np.identity(1) + elif colcov.ndim == 1: + colcov = np.diag(colcov) + colshape = colcov.shape + if len(colshape) != 2: + raise ValueError("`colcov` must be a scalar or a 2D array.") + if colshape[0] != colshape[1]: + raise ValueError("Array `colcov` must be square.") + if colshape[0] == 0: + raise ValueError("Array `colcov` has invalid shape.") + numcols = colshape[0] + + # Ensure mean and covariances compatible + if mean is not None: + if meanshape[0] != numrows: + raise ValueError("Arrays `mean` and `rowcov` must have the " + "same number of rows.") + if meanshape[1] != numcols: + raise ValueError("Arrays `mean` and `colcov` must have the " + "same number of columns.") + else: + mean = np.zeros((numrows, numcols)) + + dims = (numrows, numcols) + + return dims, mean, rowcov, colcov + + def _process_quantiles(self, X, dims): + """ + Adjust quantiles array so that last two axes labels the components of + each data point. + """ + X = np.asarray(X, dtype=float) + if X.ndim == 2: + X = X[np.newaxis, :] + if X.shape[-2:] != dims: + raise ValueError("The shape of array `X` is not compatible " + "with the distribution parameters.") + return X + + def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov, + col_prec_rt, log_det_colcov): + """Log of the matrix normal probability density function. + + Parameters + ---------- + dims : tuple + Dimensions of the matrix variates + X : ndarray + Points at which to evaluate the log of the probability + density function + mean : ndarray + Mean of the distribution + row_prec_rt : ndarray + A decomposition such that np.dot(row_prec_rt, row_prec_rt.T) + is the inverse of the among-row covariance matrix + log_det_rowcov : float + Logarithm of the determinant of the among-row covariance matrix + col_prec_rt : ndarray + A decomposition such that np.dot(col_prec_rt, col_prec_rt.T) + is the inverse of the among-column covariance matrix + log_det_colcov : float + Logarithm of the determinant of the among-column covariance matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + numrows, numcols = dims + roll_dev = np.moveaxis(X-mean, -1, 0) + scale_dev = np.tensordot(col_prec_rt.T, + np.dot(roll_dev, row_prec_rt), 1) + maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0) + return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov + + numrows*log_det_colcov + maha) + + def logpdf(self, X, mean=None, rowcov=1, colcov=1): + """Log of the matrix normal probability density function. + + Parameters + ---------- + X : array_like + Quantiles, with the last two axes of `X` denoting the components. + %(_matnorm_doc_default_callparams)s + + Returns + ------- + logpdf : ndarray + Log of the probability density function evaluated at `X` + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, + colcov) + X = self._process_quantiles(X, dims) + rowpsd = _PSD(rowcov, allow_singular=False) + colpsd = _PSD(colcov, allow_singular=False) + out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U, + colpsd.log_pdet) + return _squeeze_output(out) + + def pdf(self, X, mean=None, rowcov=1, colcov=1): + """Matrix normal probability density function. + + Parameters + ---------- + X : array_like + Quantiles, with the last two axes of `X` denoting the components. + %(_matnorm_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `X` + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + return np.exp(self.logpdf(X, mean, rowcov, colcov)) + + def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None): + """Draw random samples from a matrix normal distribution. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `dims`), where `dims` is the + dimension of the random matrices. + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + size = int(size) + dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, + colcov) + rowchol = scipy.linalg.cholesky(rowcov, lower=True) + colchol = scipy.linalg.cholesky(colcov, lower=True) + random_state = self._get_random_state(random_state) + # We aren't generating standard normal variates with size=(size, + # dims[0], dims[1]) directly to ensure random variates remain backwards + # compatible. See https://github.com/scipy/scipy/pull/12312 for more + # details. + std_norm = random_state.standard_normal( + size=(dims[1], size, dims[0]) + ).transpose(1, 2, 0) + out = mean + np.einsum('jp,ipq,kq->ijk', + rowchol, std_norm, colchol, + optimize=True) + if size == 1: + out = out.reshape(mean.shape) + return out + + def entropy(self, rowcov=1, colcov=1): + """Log of the matrix normal probability density function. + + Parameters + ---------- + rowcov : array_like, optional + Among-row covariance matrix of the distribution (default: `1`) + colcov : array_like, optional + Among-column covariance matrix of the distribution (default: `1`) + + Returns + ------- + entropy : float + Entropy of the distribution + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + dummy_mean = np.zeros((rowcov.shape[0], colcov.shape[0])) + dims, _, rowcov, colcov = self._process_parameters(dummy_mean, + rowcov, + colcov) + rowpsd = _PSD(rowcov, allow_singular=False) + colpsd = _PSD(colcov, allow_singular=False) + + return self._entropy(dims, rowpsd.log_pdet, colpsd.log_pdet) + + def _entropy(self, dims, row_cov_logdet, col_cov_logdet): + n, p = dims + return (0.5 * n * p * (1 + _LOG_2PI) + 0.5 * p * row_cov_logdet + + 0.5 * n * col_cov_logdet) + + +matrix_normal = matrix_normal_gen() + + +class matrix_normal_frozen(multi_rv_frozen): + """ + Create a frozen matrix normal distribution. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is `None` the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import matrix_normal + + >>> distn = matrix_normal(mean=np.zeros((3,3))) + >>> X = distn.rvs(); X + array([[-0.02976962, 0.93339138, -0.09663178], + [ 0.67405524, 0.28250467, -0.93308929], + [-0.31144782, 0.74535536, 1.30412916]]) + >>> distn.pdf(X) + 2.5160642368346784e-05 + >>> distn.logpdf(X) + -10.590229595124615 + """ + + def __init__(self, mean=None, rowcov=1, colcov=1, seed=None): + self._dist = matrix_normal_gen(seed) + self.dims, self.mean, self.rowcov, self.colcov = \ + self._dist._process_parameters(mean, rowcov, colcov) + self.rowpsd = _PSD(self.rowcov, allow_singular=False) + self.colpsd = _PSD(self.colcov, allow_singular=False) + + def logpdf(self, X): + X = self._dist._process_quantiles(X, self.dims) + out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U, + self.rowpsd.log_pdet, self.colpsd.U, + self.colpsd.log_pdet) + return _squeeze_output(out) + + def pdf(self, X): + return np.exp(self.logpdf(X)) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.mean, self.rowcov, self.colcov, size, + random_state) + + def entropy(self): + return self._dist._entropy(self.dims, self.rowpsd.log_pdet, + self.colpsd.log_pdet) + + +# Set frozen generator docstrings from corresponding docstrings in +# matrix_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'entropy']: + method = matrix_normal_gen.__dict__[name] + method_frozen = matrix_normal_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + matnorm_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params) + +_dirichlet_doc_default_callparams = """\ +alpha : array_like + The concentration parameters. The number of entries determines the + dimensionality of the distribution. +""" +_dirichlet_doc_frozen_callparams = "" + +_dirichlet_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +dirichlet_docdict_params = { + '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams, + '_doc_random_state': _doc_random_state +} + +dirichlet_docdict_noparams = { + '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams, + '_doc_random_state': _doc_random_state +} + + +def _dirichlet_check_parameters(alpha): + alpha = np.asarray(alpha) + if np.min(alpha) <= 0: + raise ValueError("All parameters must be greater than 0") + elif alpha.ndim != 1: + raise ValueError("Parameter vector 'a' must be one dimensional, " + f"but a.shape = {alpha.shape}.") + return alpha + + +def _dirichlet_check_input(alpha, x): + x = np.asarray(x) + + if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]: + raise ValueError("Vector 'x' must have either the same number " + "of entries as, or one entry fewer than, " + f"parameter vector 'a', but alpha.shape = {alpha.shape} " + f"and x.shape = {x.shape}.") + + if x.shape[0] != alpha.shape[0]: + xk = np.array([1 - np.sum(x, 0)]) + if xk.ndim == 1: + x = np.append(x, xk) + elif xk.ndim == 2: + x = np.vstack((x, xk)) + else: + raise ValueError("The input must be one dimensional or a two " + "dimensional matrix containing the entries.") + + if np.min(x) < 0: + raise ValueError("Each entry in 'x' must be greater than or equal " + "to zero.") + + if np.max(x) > 1: + raise ValueError("Each entry in 'x' must be smaller or equal one.") + + # Check x_i > 0 or alpha_i > 1 + xeq0 = (x == 0) + alphalt1 = (alpha < 1) + if x.shape != alpha.shape: + alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape) + chk = np.logical_and(xeq0, alphalt1) + + if np.sum(chk): + raise ValueError("Each entry in 'x' must be greater than zero if its " + "alpha is less than one.") + + if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any(): + raise ValueError("The input vector 'x' must lie within the normal " + "simplex. but np.sum(x, 0) = %s." % np.sum(x, 0)) + + return x + + +def _lnB(alpha): + r"""Internal helper function to compute the log of the useful quotient. + + .. math:: + + B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)} + {\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)} + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + B : scalar + Helper quotient, internal use only + + """ + return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha)) + + +class dirichlet_gen(multi_rv_generic): + r"""A Dirichlet random variable. + + The ``alpha`` keyword specifies the concentration parameters of the + distribution. + + .. versionadded:: 0.15.0 + + Methods + ------- + pdf(x, alpha) + Probability density function. + logpdf(x, alpha) + Log of the probability density function. + rvs(alpha, size=1, random_state=None) + Draw random samples from a Dirichlet distribution. + mean(alpha) + The mean of the Dirichlet distribution + var(alpha) + The variance of the Dirichlet distribution + cov(alpha) + The covariance of the Dirichlet distribution + entropy(alpha) + Compute the differential entropy of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + Each :math:`\alpha` entry must be positive. The distribution has only + support on the simplex defined by + + .. math:: + \sum_{i=1}^{K} x_i = 1 + + where :math:`0 < x_i < 1`. + + If the quantiles don't lie within the simplex, a ValueError is raised. + + The probability density function for `dirichlet` is + + .. math:: + + f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1} + + where + + .. math:: + + \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)} + {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)} + + and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the + concentration parameters and :math:`K` is the dimension of the space + where :math:`x` takes values. + + Note that the `dirichlet` interface is somewhat inconsistent. + The array returned by the rvs function is transposed + with respect to the format expected by the pdf and logpdf. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import dirichlet + + Generate a dirichlet random variable + + >>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles + >>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters + >>> dirichlet.pdf(quantiles, alpha) + 0.2843831684937255 + + The same PDF but following a log scale + + >>> dirichlet.logpdf(quantiles, alpha) + -1.2574327653159187 + + Once we specify the dirichlet distribution + we can then calculate quantities of interest + + >>> dirichlet.mean(alpha) # get the mean of the distribution + array([0.01960784, 0.24509804, 0.73529412]) + >>> dirichlet.var(alpha) # get variance + array([0.00089829, 0.00864603, 0.00909517]) + >>> dirichlet.entropy(alpha) # calculate the differential entropy + -4.3280162474082715 + + We can also return random samples from the distribution + + >>> dirichlet.rvs(alpha, size=1, random_state=1) + array([[0.00766178, 0.24670518, 0.74563305]]) + >>> dirichlet.rvs(alpha, size=2, random_state=2) + array([[0.01639427, 0.1292273 , 0.85437844], + [0.00156917, 0.19033695, 0.80809388]]) + + Alternatively, the object may be called (as a function) to fix + concentration parameters, returning a "frozen" Dirichlet + random variable: + + >>> rv = dirichlet(alpha) + >>> # Frozen object with the same methods but holding the given + >>> # concentration parameters fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params) + + def __call__(self, alpha, seed=None): + return dirichlet_frozen(alpha, seed=seed) + + def _logpdf(self, x, alpha): + """Log of the Dirichlet probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + %(_dirichlet_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + lnB = _lnB(alpha) + return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0) + + def logpdf(self, x, alpha): + """Log of the Dirichlet probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Log of the probability density function evaluated at `x`. + + """ + alpha = _dirichlet_check_parameters(alpha) + x = _dirichlet_check_input(alpha, x) + + out = self._logpdf(x, alpha) + return _squeeze_output(out) + + def pdf(self, x, alpha): + """The Dirichlet probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + The probability density function evaluated at `x`. + + """ + alpha = _dirichlet_check_parameters(alpha) + x = _dirichlet_check_input(alpha, x) + + out = np.exp(self._logpdf(x, alpha)) + return _squeeze_output(out) + + def mean(self, alpha): + """Mean of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + mu : ndarray or scalar + Mean of the Dirichlet distribution. + + """ + alpha = _dirichlet_check_parameters(alpha) + + out = alpha / (np.sum(alpha)) + return _squeeze_output(out) + + def var(self, alpha): + """Variance of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + v : ndarray or scalar + Variance of the Dirichlet distribution. + + """ + + alpha = _dirichlet_check_parameters(alpha) + + alpha0 = np.sum(alpha) + out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1)) + return _squeeze_output(out) + + def cov(self, alpha): + """Covariance matrix of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + cov : ndarray + The covariance matrix of the distribution. + """ + + alpha = _dirichlet_check_parameters(alpha) + alpha0 = np.sum(alpha) + a = alpha / alpha0 + + cov = (np.diag(a) - np.outer(a, a)) / (alpha0 + 1) + return _squeeze_output(cov) + + def entropy(self, alpha): + """ + Differential entropy of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Dirichlet distribution + + """ + + alpha = _dirichlet_check_parameters(alpha) + + alpha0 = np.sum(alpha) + lnB = _lnB(alpha) + K = alpha.shape[0] + + out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum( + (alpha - 1) * scipy.special.psi(alpha)) + return _squeeze_output(out) + + def rvs(self, alpha, size=1, random_state=None): + """ + Draw random samples from a Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + size : int, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the random variable. + + """ + alpha = _dirichlet_check_parameters(alpha) + random_state = self._get_random_state(random_state) + return random_state.dirichlet(alpha, size=size) + + +dirichlet = dirichlet_gen() + + +class dirichlet_frozen(multi_rv_frozen): + def __init__(self, alpha, seed=None): + self.alpha = _dirichlet_check_parameters(alpha) + self._dist = dirichlet_gen(seed) + + def logpdf(self, x): + return self._dist.logpdf(x, self.alpha) + + def pdf(self, x): + return self._dist.pdf(x, self.alpha) + + def mean(self): + return self._dist.mean(self.alpha) + + def var(self): + return self._dist.var(self.alpha) + + def cov(self): + return self._dist.cov(self.alpha) + + def entropy(self): + return self._dist.entropy(self.alpha) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.alpha, size, random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'cov', 'entropy']: + method = dirichlet_gen.__dict__[name] + method_frozen = dirichlet_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, dirichlet_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params) + + +_wishart_doc_default_callparams = """\ +df : int + Degrees of freedom, must be greater than or equal to dimension of the + scale matrix +scale : array_like + Symmetric positive definite scale matrix of the distribution +""" + +_wishart_doc_callparams_note = "" + +_wishart_doc_frozen_callparams = "" + +_wishart_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +wishart_docdict_params = { + '_doc_default_callparams': _wishart_doc_default_callparams, + '_doc_callparams_note': _wishart_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +wishart_docdict_noparams = { + '_doc_default_callparams': _wishart_doc_frozen_callparams, + '_doc_callparams_note': _wishart_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class wishart_gen(multi_rv_generic): + r"""A Wishart random variable. + + The `df` keyword specifies the degrees of freedom. The `scale` keyword + specifies the scale matrix, which must be symmetric and positive definite. + In this context, the scale matrix is often interpreted in terms of a + multivariate normal precision matrix (the inverse of the covariance + matrix). These arguments must satisfy the relationship + ``df > scale.ndim - 1``, but see notes on using the `rvs` method with + ``df < scale.ndim``. + + Methods + ------- + pdf(x, df, scale) + Probability density function. + logpdf(x, df, scale) + Log of the probability density function. + rvs(df, scale, size=1, random_state=None) + Draw random samples from a Wishart distribution. + entropy() + Compute the differential entropy of the Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Raises + ------ + scipy.linalg.LinAlgError + If the scale matrix `scale` is not positive definite. + + See Also + -------- + invwishart, chi2 + + Notes + ----- + %(_doc_callparams_note)s + + The scale matrix `scale` must be a symmetric positive definite + matrix. Singular matrices, including the symmetric positive semi-definite + case, are not supported. Symmetry is not checked; only the lower triangular + portion is used. + + The Wishart distribution is often denoted + + .. math:: + + W_p(\nu, \Sigma) + + where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the + :math:`p \times p` scale matrix. + + The probability density function for `wishart` has support over positive + definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then + its PDF is given by: + + .. math:: + + f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} } + |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )} + \exp\left( -tr(\Sigma^{-1} S) / 2 \right) + + If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then + :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart). + + If the scale matrix is 1-dimensional and equal to one, then the Wishart + distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)` + distribution. + + The algorithm [2]_ implemented by the `rvs` method may + produce numerically singular matrices with :math:`p - 1 < \nu < p`; the + user may wish to check for this condition and generate replacement samples + as necessary. + + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", + Wiley, 1983. + .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate + Generator", Applied Statistics, vol. 21, pp. 341-345, 1972. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import wishart, chi2 + >>> x = np.linspace(1e-5, 8, 100) + >>> w = wishart.pdf(x, df=3, scale=1); w[:5] + array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) + >>> c = chi2.pdf(x, 3); c[:5] + array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) + >>> plt.plot(x, w) + >>> plt.show() + + The input quantiles can be any shape of array, as long as the last + axis labels the components. + + Alternatively, the object may be called (as a function) to fix the degrees + of freedom and scale parameters, returning a "frozen" Wishart random + variable: + + >>> rv = wishart(df=1, scale=1) + >>> # Frozen object with the same methods but holding the given + >>> # degrees of freedom and scale fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) + + def __call__(self, df=None, scale=None, seed=None): + """Create a frozen Wishart distribution. + + See `wishart_frozen` for more information. + """ + return wishart_frozen(df, scale, seed) + + def _process_parameters(self, df, scale): + if scale is None: + scale = 1.0 + scale = np.asarray(scale, dtype=float) + + if scale.ndim == 0: + scale = scale[np.newaxis, np.newaxis] + elif scale.ndim == 1: + scale = np.diag(scale) + elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]: + raise ValueError("Array 'scale' must be square if it is two" + " dimensional, but scale.scale = %s." + % str(scale.shape)) + elif scale.ndim > 2: + raise ValueError("Array 'scale' must be at most two-dimensional," + " but scale.ndim = %d" % scale.ndim) + + dim = scale.shape[0] + + if df is None: + df = dim + elif not np.isscalar(df): + raise ValueError("Degrees of freedom must be a scalar.") + elif df <= dim - 1: + raise ValueError("Degrees of freedom must be greater than the " + "dimension of scale matrix minus 1.") + + return dim, df, scale + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + + if x.ndim == 0: + x = x * np.eye(dim)[:, :, np.newaxis] + if x.ndim == 1: + if dim == 1: + x = x[np.newaxis, np.newaxis, :] + else: + x = np.diag(x)[:, :, np.newaxis] + elif x.ndim == 2: + if not x.shape[0] == x.shape[1]: + raise ValueError("Quantiles must be square if they are two" + " dimensional, but x.shape = %s." + % str(x.shape)) + x = x[:, :, np.newaxis] + elif x.ndim == 3: + if not x.shape[0] == x.shape[1]: + raise ValueError("Quantiles must be square in the first two" + " dimensions if they are three dimensional" + ", but x.shape = %s." % str(x.shape)) + elif x.ndim > 3: + raise ValueError("Quantiles must be at most two-dimensional with" + " an additional dimension for multiple" + "components, but x.ndim = %d" % x.ndim) + + # Now we have 3-dim array; should have shape [dim, dim, *] + if not x.shape[0:2] == (dim, dim): + raise ValueError('Quantiles have incompatible dimensions: should' + f' be {(dim, dim)}, got {x.shape[0:2]}.') + + return x + + def _process_size(self, size): + size = np.asarray(size) + + if size.ndim == 0: + size = size[np.newaxis] + elif size.ndim > 1: + raise ValueError('Size must be an integer or tuple of integers;' + ' thus must have dimension <= 1.' + ' Got size.ndim = %s' % str(tuple(size))) + n = size.prod() + shape = tuple(size) + + return n, shape + + def _logpdf(self, x, dim, df, scale, log_det_scale, C): + """Log of the Wishart probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + scale : ndarray + Scale matrix + log_det_scale : float + Logarithm of the determinant of the scale matrix + C : ndarray + Cholesky factorization of the scale matrix, lower triagular. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + # log determinant of x + # Note: x has components along the last axis, so that x.T has + # components alone the 0-th axis. Then since det(A) = det(A'), this + # gives us a 1-dim vector of determinants + + # Retrieve tr(scale^{-1} x) + log_det_x = np.empty(x.shape[-1]) + scale_inv_x = np.empty(x.shape) + tr_scale_inv_x = np.empty(x.shape[-1]) + for i in range(x.shape[-1]): + _, log_det_x[i] = self._cholesky_logdet(x[:, :, i]) + scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i]) + tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace() + + # Log PDF + out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) - + (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale + + multigammaln(0.5*df, dim))) + + return out + + def logpdf(self, x, df, scale): + """Log of the Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + x = self._process_quantiles(x, dim) + + # Cholesky decomposition of scale, get log(det(scale)) + C, log_det_scale = self._cholesky_logdet(scale) + + out = self._logpdf(x, dim, df, scale, log_det_scale, C) + return _squeeze_output(out) + + def pdf(self, x, df, scale): + """Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + return np.exp(self.logpdf(x, df, scale)) + + def _mean(self, dim, df, scale): + """Mean of the Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mean' instead. + + """ + return df * scale + + def mean(self, df, scale): + """Mean of the Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float + The mean of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mean(dim, df, scale) + return _squeeze_output(out) + + def _mode(self, dim, df, scale): + """Mode of the Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mode' instead. + + """ + if df >= dim + 1: + out = (df-dim-1) * scale + else: + out = None + return out + + def mode(self, df, scale): + """Mode of the Wishart distribution + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mode : float or None + The Mode of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mode(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _var(self, dim, df, scale): + """Variance of the Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'var' instead. + + """ + var = scale**2 + diag = scale.diagonal() # 1 x dim array + var += np.outer(diag, diag) + var *= df + return var + + def var(self, df, scale): + """Variance of the Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + var : float + The variance of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._var(dim, df, scale) + return _squeeze_output(out) + + def _standard_rvs(self, n, shape, dim, df, random_state): + """ + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + # Random normal variates for off-diagonal elements + n_tril = dim * (dim-1) // 2 + covariances = random_state.normal( + size=n*n_tril).reshape(shape+(n_tril,)) + + # Random chi-square variates for diagonal elements + variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5 + for i in range(dim)]].reshape((dim,) + + shape[::-1]).T) + + # Create the A matri(ces) - lower triangular + A = np.zeros(shape + (dim, dim)) + + # Input the covariances + size_idx = tuple([slice(None, None, None)]*len(shape)) + tril_idx = np.tril_indices(dim, k=-1) + A[size_idx + tril_idx] = covariances + + # Input the variances + diag_idx = np.diag_indices(dim) + A[size_idx + diag_idx] = variances + + return A + + def _rvs(self, n, shape, dim, df, C, random_state): + """Draw random samples from a Wishart distribution. + + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + C : ndarray + Cholesky factorization of the scale matrix, lower triangular. + %(_doc_random_state)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + random_state = self._get_random_state(random_state) + # Calculate the matrices A, which are actually lower triangular + # Cholesky factorizations of a matrix B such that B ~ W(df, I) + A = self._standard_rvs(n, shape, dim, df, random_state) + + # Calculate SA = C A A' C', where SA ~ W(df, scale) + # Note: this is the product of a (lower) (lower) (lower)' (lower)' + # or, denoting B = AA', it is C B C' where C is the lower + # triangular Cholesky factorization of the scale matrix. + # this appears to conflict with the instructions in [1]_, which + # suggest that it should be D' B D where D is the lower + # triangular factorization of the scale matrix. However, it is + # meant to refer to the Bartlett (1933) representation of a + # Wishart random variate as L A A' L' where L is lower triangular + # so it appears that understanding D' to be upper triangular + # is either a typo in or misreading of [1]_. + for index in np.ndindex(shape): + CA = np.dot(C, A[index]) + A[index] = np.dot(CA, CA.T) + + return A + + def rvs(self, df, scale, size=1, random_state=None): + """Draw random samples from a Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`) + (``dim``, ``dim``), where + ``dim`` is the dimension of the scale matrix. + + Notes + ----- + %(_doc_callparams_note)s + + """ + n, shape = self._process_size(size) + dim, df, scale = self._process_parameters(df, scale) + + # Cholesky decomposition of scale + C = scipy.linalg.cholesky(scale, lower=True) + + out = self._rvs(n, shape, dim, df, C, random_state) + + return _squeeze_output(out) + + def _entropy(self, dim, df, log_det_scale): + """Compute the differential entropy of the Wishart. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + log_det_scale : float + Logarithm of the determinant of the scale matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'entropy' instead. + + """ + return ( + 0.5 * (dim+1) * log_det_scale + + 0.5 * dim * (dim+1) * _LOG_2 + + multigammaln(0.5*df, dim) - + 0.5 * (df - dim - 1) * np.sum( + [psi(0.5*(df + 1 - (i+1))) for i in range(dim)] + ) + + 0.5 * df * dim + ) + + def entropy(self, df, scale): + """Compute the differential entropy of the Wishart. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Wishart distribution + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + _, log_det_scale = self._cholesky_logdet(scale) + return self._entropy(dim, df, log_det_scale) + + def _cholesky_logdet(self, scale): + """Compute Cholesky decomposition and determine (log(det(scale)). + + Parameters + ---------- + scale : ndarray + Scale matrix. + + Returns + ------- + c_decomp : ndarray + The Cholesky decomposition of `scale`. + logdet : scalar + The log of the determinant of `scale`. + + Notes + ----- + This computation of ``logdet`` is equivalent to + ``np.linalg.slogdet(scale)``. It is ~2x faster though. + + """ + c_decomp = scipy.linalg.cholesky(scale, lower=True) + logdet = 2 * np.sum(np.log(c_decomp.diagonal())) + return c_decomp, logdet + + +wishart = wishart_gen() + + +class wishart_frozen(multi_rv_frozen): + """Create a frozen Wishart distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution + scale : array_like + Scale matrix of the distribution + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + """ + def __init__(self, df, scale, seed=None): + self._dist = wishart_gen(seed) + self.dim, self.df, self.scale = self._dist._process_parameters( + df, scale) + self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + + out = self._dist._logpdf(x, self.dim, self.df, self.scale, + self.log_det_scale, self.C) + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def mean(self): + out = self._dist._mean(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def mode(self): + out = self._dist._mode(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def var(self): + out = self._dist._var(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def rvs(self, size=1, random_state=None): + n, shape = self._dist._process_size(size) + out = self._dist._rvs(n, shape, self.dim, self.df, + self.C, random_state) + return _squeeze_output(out) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.log_det_scale) + + +# Set frozen generator docstrings from corresponding docstrings in +# Wishart and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']: + method = wishart_gen.__dict__[name] + method_frozen = wishart_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, wishart_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) + + +class invwishart_gen(wishart_gen): + r"""An inverse Wishart random variable. + + The `df` keyword specifies the degrees of freedom. The `scale` keyword + specifies the scale matrix, which must be symmetric and positive definite. + In this context, the scale matrix is often interpreted in terms of a + multivariate normal covariance matrix. + + Methods + ------- + pdf(x, df, scale) + Probability density function. + logpdf(x, df, scale) + Log of the probability density function. + rvs(df, scale, size=1, random_state=None) + Draw random samples from an inverse Wishart distribution. + entropy(df, scale) + Differential entropy of the distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Raises + ------ + scipy.linalg.LinAlgError + If the scale matrix `scale` is not positive definite. + + See Also + -------- + wishart + + Notes + ----- + %(_doc_callparams_note)s + + The scale matrix `scale` must be a symmetric positive definite + matrix. Singular matrices, including the symmetric positive semi-definite + case, are not supported. Symmetry is not checked; only the lower triangular + portion is used. + + The inverse Wishart distribution is often denoted + + .. math:: + + W_p^{-1}(\nu, \Psi) + + where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the + :math:`p \times p` scale matrix. + + The probability density function for `invwishart` has support over positive + definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`, + then its PDF is given by: + + .. math:: + + f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} } + |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)} + \exp\left( -tr(\Sigma S^{-1}) / 2 \right) + + If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then + :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart). + + If the scale matrix is 1-dimensional and equal to one, then the inverse + Wishart distribution :math:`W_1(\nu, 1)` collapses to the + inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}` + and scale = :math:`\frac{1}{2}`. + + Instead of inverting a randomly generated Wishart matrix as described in [2], + here the algorithm in [4] is used to directly generate a random inverse-Wishart + matrix without inversion. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", + Wiley, 1983. + .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications + in Statistics - Simulation and Computation, vol. 14.2, pp.511-514, + 1985. + .. [3] Gupta, M. and Srivastava, S. "Parametric Bayesian Estimation of + Differential Entropy and Relative Entropy". Entropy 12, 818 - 843. + 2010. + .. [4] S.D. Axen, "Efficiently generating inverse-Wishart matrices and + their Cholesky factors", :arXiv:`2310.15884v1`. 2023. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import invwishart, invgamma + >>> x = np.linspace(0.01, 1, 100) + >>> iw = invwishart.pdf(x, df=6, scale=1) + >>> iw[:3] + array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) + >>> ig = invgamma.pdf(x, 6/2., scale=1./2) + >>> ig[:3] + array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) + >>> plt.plot(x, iw) + >>> plt.show() + + The input quantiles can be any shape of array, as long as the last + axis labels the components. + + Alternatively, the object may be called (as a function) to fix the degrees + of freedom and scale parameters, returning a "frozen" inverse Wishart + random variable: + + >>> rv = invwishart(df=1, scale=1) + >>> # Frozen object with the same methods but holding the given + >>> # degrees of freedom and scale fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) + + def __call__(self, df=None, scale=None, seed=None): + """Create a frozen inverse Wishart distribution. + + See `invwishart_frozen` for more information. + + """ + return invwishart_frozen(df, scale, seed) + + def _logpdf(self, x, dim, df, log_det_scale, C): + """Log of the inverse Wishart probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function. + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + log_det_scale : float + Logarithm of the determinant of the scale matrix + C : ndarray + Cholesky factorization of the scale matrix, lower triagular. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + # Retrieve tr(scale x^{-1}) + log_det_x = np.empty(x.shape[-1]) + tr_scale_x_inv = np.empty(x.shape[-1]) + trsm = get_blas_funcs(('trsm'), (x,)) + if dim > 1: + for i in range(x.shape[-1]): + Cx, log_det_x[i] = self._cholesky_logdet(x[:, :, i]) + A = trsm(1., Cx, C, side=0, lower=True) + tr_scale_x_inv[i] = np.linalg.norm(A)**2 + else: + log_det_x[:] = np.log(x[0, 0]) + tr_scale_x_inv[:] = C[0, 0]**2 / x[0, 0] + + # Log PDF + out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) - + (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) - + multigammaln(0.5*df, dim)) + + return out + + def logpdf(self, x, df, scale): + """Log of the inverse Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + x = self._process_quantiles(x, dim) + C, log_det_scale = self._cholesky_logdet(scale) + out = self._logpdf(x, dim, df, log_det_scale, C) + return _squeeze_output(out) + + def pdf(self, x, df, scale): + """Inverse Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + return np.exp(self.logpdf(x, df, scale)) + + def _mean(self, dim, df, scale): + """Mean of the inverse Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mean' instead. + + """ + if df > dim + 1: + out = scale / (df - dim - 1) + else: + out = None + return out + + def mean(self, df, scale): + """Mean of the inverse Wishart distribution. + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix plus one. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float or None + The mean of the distribution + + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mean(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _mode(self, dim, df, scale): + """Mode of the inverse Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mode' instead. + + """ + return scale / (df + dim + 1) + + def mode(self, df, scale): + """Mode of the inverse Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mode : float + The Mode of the distribution + + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mode(dim, df, scale) + return _squeeze_output(out) + + def _var(self, dim, df, scale): + """Variance of the inverse Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'var' instead. + + """ + if df > dim + 3: + var = (df - dim + 1) * scale**2 + diag = scale.diagonal() # 1 x dim array + var += (df - dim - 1) * np.outer(diag, diag) + var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3) + else: + var = None + return var + + def var(self, df, scale): + """Variance of the inverse Wishart distribution. + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix plus three. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + var : float + The variance of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._var(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _inv_standard_rvs(self, n, shape, dim, df, random_state): + """ + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Returns + ------- + A : ndarray + Random variates of shape (`shape`) + (``dim``, ``dim``). + Each slice `A[..., :, :]` is lower-triangular, and its + inverse is the lower Cholesky factor of a draw from + `invwishart(df, np.eye(dim))`. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + A = np.zeros(shape + (dim, dim)) + + # Random normal variates for off-diagonal elements + tri_rows, tri_cols = np.tril_indices(dim, k=-1) + n_tril = dim * (dim-1) // 2 + A[..., tri_rows, tri_cols] = random_state.normal( + size=(*shape, n_tril), + ) + + # Random chi variates for diagonal elements + rows = np.arange(dim) + chi_dfs = (df - dim + 1) + rows + A[..., rows, rows] = random_state.chisquare( + df=chi_dfs, size=(*shape, dim), + )**0.5 + + return A + + def _rvs(self, n, shape, dim, df, C, random_state): + """Draw random samples from an inverse Wishart distribution. + + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + C : ndarray + Cholesky factorization of the scale matrix, lower triagular. + %(_doc_random_state)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + random_state = self._get_random_state(random_state) + # Get random draws A such that inv(A) ~ iW(df, I) + A = self._inv_standard_rvs(n, shape, dim, df, random_state) + + # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale) + trsm = get_blas_funcs(('trsm'), (A,)) + trmm = get_blas_funcs(('trmm'), (A,)) + + for index in np.ndindex(A.shape[:-2]): + if dim > 1: + # Calculate CA + # Get CA = C A^{-1} via triangular solver + CA = trsm(1., A[index], C, side=1, lower=True) + # get SA + A[index] = trmm(1., CA, CA, side=1, lower=True, trans_a=True) + else: + A[index][0, 0] = (C[0, 0] / A[index][0, 0])**2 + + return A + + def rvs(self, df, scale, size=1, random_state=None): + """Draw random samples from an inverse Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`) + (``dim``, ``dim``), where + ``dim`` is the dimension of the scale matrix. + + Notes + ----- + %(_doc_callparams_note)s + + """ + n, shape = self._process_size(size) + dim, df, scale = self._process_parameters(df, scale) + + # Cholesky decomposition of scale + C = scipy.linalg.cholesky(scale, lower=True) + + out = self._rvs(n, shape, dim, df, C, random_state) + + return _squeeze_output(out) + + def _entropy(self, dim, df, log_det_scale): + # reference: eq. (17) from ref. 3 + psi_eval_points = [0.5 * (df - dim + i) for i in range(1, dim + 1)] + psi_eval_points = np.asarray(psi_eval_points) + return multigammaln(0.5 * df, dim) + 0.5 * dim * df + \ + 0.5 * (dim + 1) * (log_det_scale - _LOG_2) - \ + 0.5 * (df + dim + 1) * \ + psi(psi_eval_points, out=psi_eval_points).sum() + + def entropy(self, df, scale): + dim, df, scale = self._process_parameters(df, scale) + _, log_det_scale = self._cholesky_logdet(scale) + return self._entropy(dim, df, log_det_scale) + + +invwishart = invwishart_gen() + + +class invwishart_frozen(multi_rv_frozen): + def __init__(self, df, scale, seed=None): + """Create a frozen inverse Wishart distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution + scale : array_like + Scale matrix of the distribution + seed : {None, int, `numpy.random.Generator`}, optional + If `seed` is None the `numpy.random.Generator` singleton is used. + If `seed` is an int, a new ``Generator`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` instance then that instance is + used. + + """ + self._dist = invwishart_gen(seed) + self.dim, self.df, self.scale = self._dist._process_parameters( + df, scale + ) + + # Get the determinant via Cholesky factorization + self.C = scipy.linalg.cholesky(self.scale, lower=True) + self.log_det_scale = 2 * np.sum(np.log(self.C.diagonal())) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._logpdf(x, self.dim, self.df, + self.log_det_scale, self.C) + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def mean(self): + out = self._dist._mean(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def mode(self): + out = self._dist._mode(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def var(self): + out = self._dist._var(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def rvs(self, size=1, random_state=None): + n, shape = self._dist._process_size(size) + + out = self._dist._rvs(n, shape, self.dim, self.df, + self.C, random_state) + + return _squeeze_output(out) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.log_det_scale) + + +# Set frozen generator docstrings from corresponding docstrings in +# inverse Wishart and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']: + method = invwishart_gen.__dict__[name] + method_frozen = wishart_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, wishart_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) + +_multinomial_doc_default_callparams = """\ +n : int + Number of trials +p : array_like + Probability of a trial falling into each category; should sum to 1 +""" + +_multinomial_doc_callparams_note = """\ +`n` should be a nonnegative integer. Each element of `p` should be in the +interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to +1, the last element of the `p` array is not used and is replaced with the +remaining probability left over from the earlier elements. +""" + +_multinomial_doc_frozen_callparams = "" + +_multinomial_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +multinomial_docdict_params = { + '_doc_default_callparams': _multinomial_doc_default_callparams, + '_doc_callparams_note': _multinomial_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +multinomial_docdict_noparams = { + '_doc_default_callparams': _multinomial_doc_frozen_callparams, + '_doc_callparams_note': _multinomial_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multinomial_gen(multi_rv_generic): + r"""A multinomial random variable. + + Methods + ------- + pmf(x, n, p) + Probability mass function. + logpmf(x, n, p) + Log of the probability mass function. + rvs(n, p, size=1, random_state=None) + Draw random samples from a multinomial distribution. + entropy(n, p) + Compute the entropy of the multinomial distribution. + cov(n, p) + Compute the covariance matrix of the multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_callparams_note)s + + The probability mass function for `multinomial` is + + .. math:: + + f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k}, + + supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a + nonnegative integer and their sum is :math:`n`. + + .. versionadded:: 0.19.0 + + Examples + -------- + + >>> from scipy.stats import multinomial + >>> rv = multinomial(8, [0.3, 0.2, 0.5]) + >>> rv.pmf([1, 3, 4]) + 0.042000000000000072 + + The multinomial distribution for :math:`k=2` is identical to the + corresponding binomial distribution (tiny numerical differences + notwithstanding): + + >>> from scipy.stats import binom + >>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6]) + 0.29030399999999973 + >>> binom.pmf(3, 7, 0.4) + 0.29030400000000012 + + The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support + broadcasting, under the convention that the vector parameters (``x`` and + ``p``) are interpreted as if each row along the last axis is a single + object. For instance: + + >>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7]) + array([0.2268945, 0.25412184]) + + Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``, + but following the rules mentioned above they behave as if the rows + ``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single + object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and + ``p.shape = ()``. To obtain the individual elements without broadcasting, + we would do this: + + >>> multinomial.pmf([3, 4], n=7, p=[.3, .7]) + 0.2268945 + >>> multinomial.pmf([3, 5], 8, p=[.3, .7]) + 0.25412184 + + This broadcasting also works for ``cov``, where the output objects are + square matrices of size ``p.shape[-1]``. For example: + + >>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]]) + array([[[ 0.84, -0.84], + [-0.84, 0.84]], + [[ 1.2 , -1.2 ], + [-1.2 , 1.2 ]]]) + + In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and + following the rules above, these broadcast as if ``p.shape == (2,)``. + Thus the result should also be of shape ``(2,)``, but since each output is + a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``, + where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and + ``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``. + + Alternatively, the object may be called (as a function) to fix the `n` and + `p` parameters, returning a "frozen" multinomial random variable: + + >>> rv = multinomial(n=7, p=[.3, .7]) + >>> # Frozen object with the same methods but holding the given + >>> # degrees of freedom and scale fixed. + + See also + -------- + scipy.stats.binom : The binomial distribution. + numpy.random.Generator.multinomial : Sampling from the multinomial distribution. + scipy.stats.multivariate_hypergeom : + The multivariate hypergeometric distribution. + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = \ + doccer.docformat(self.__doc__, multinomial_docdict_params) + + def __call__(self, n, p, seed=None): + """Create a frozen multinomial distribution. + + See `multinomial_frozen` for more information. + """ + return multinomial_frozen(n, p, seed) + + def _process_parameters(self, n, p, eps=1e-15): + """Returns: n_, p_, npcond. + + n_ and p_ are arrays of the correct shape; npcond is a boolean array + flagging values out of the domain. + """ + p = np.array(p, dtype=np.float64, copy=True) + p_adjusted = 1. - p[..., :-1].sum(axis=-1) + i_adjusted = np.abs(p_adjusted) > eps + p[i_adjusted, -1] = p_adjusted[i_adjusted] + + # true for bad p + pcond = np.any(p < 0, axis=-1) + pcond |= np.any(p > 1, axis=-1) + + n = np.array(n, dtype=int, copy=True) + + # true for bad n + ncond = n < 0 + + return n, p, ncond | pcond + + def _process_quantiles(self, x, n, p): + """Returns: x_, xcond. + + x_ is an int array; xcond is a boolean array flagging values out of the + domain. + """ + xx = np.asarray(x, dtype=int) + + if xx.ndim == 0: + raise ValueError("x must be an array.") + + if xx.size != 0 and not xx.shape[-1] == p.shape[-1]: + raise ValueError("Size of each quantile should be size of p: " + "received %d, but expected %d." % + (xx.shape[-1], p.shape[-1])) + + # true for x out of the domain + cond = np.any(xx != x, axis=-1) + cond |= np.any(xx < 0, axis=-1) + cond = cond | (np.sum(xx, axis=-1) != n) + + return xx, cond + + def _checkresult(self, result, cond, bad_value): + result = np.asarray(result) + + if cond.ndim != 0: + result[cond] = bad_value + elif cond: + if result.ndim == 0: + return bad_value + result[...] = bad_value + return result + + def _logpmf(self, x, n, p): + return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1) + + def logpmf(self, x, n, p): + """Log of the Multinomial probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + x, xcond = self._process_quantiles(x, n, p) + + result = self._logpmf(x, n, p) + + # replace values for which x was out of the domain; broadcast + # xcond to the right shape + xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_) + result = self._checkresult(result, xcond_, -np.inf) + + # replace values bad for n or p; broadcast npcond to the right shape + npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_) + return self._checkresult(result, npcond_, np.nan) + + def pmf(self, x, n, p): + """Multinomial probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + pmf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + return np.exp(self.logpmf(x, n, p)) + + def mean(self, n, p): + """Mean of the Multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float + The mean of the distribution + """ + n, p, npcond = self._process_parameters(n, p) + result = n[..., np.newaxis]*p + return self._checkresult(result, npcond, np.nan) + + def cov(self, n, p): + """Covariance matrix of the multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + cov : ndarray + The covariance matrix of the distribution + """ + n, p, npcond = self._process_parameters(n, p) + + nn = n[..., np.newaxis, np.newaxis] + result = nn * np.einsum('...j,...k->...jk', -p, p) + + # change the diagonal + for i in range(p.shape[-1]): + result[..., i, i] += n*p[..., i] + + return self._checkresult(result, npcond, np.nan) + + def entropy(self, n, p): + r"""Compute the entropy of the multinomial distribution. + + The entropy is computed using this expression: + + .. math:: + + f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i + + \sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x! + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Multinomial distribution + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + + x = np.r_[1:np.max(n)+1] + + term1 = n*np.sum(entr(p), axis=-1) + term1 -= gammaln(n+1) + + n = n[..., np.newaxis] + new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1 + x.shape += (1,)*new_axes_needed + + term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1), + axis=(-1, -1-new_axes_needed)) + + return self._checkresult(term1 + term2, npcond, np.nan) + + def rvs(self, n, p, size=None, random_state=None): + """Draw random samples from a Multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of shape (`size`, `len(p)`) + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + random_state = self._get_random_state(random_state) + return random_state.multinomial(n, p, size) + + +multinomial = multinomial_gen() + + +class multinomial_frozen(multi_rv_frozen): + r"""Create a frozen Multinomial distribution. + + Parameters + ---------- + n : int + number of trials + p: array_like + probability of a trial falling into each category; should sum to 1 + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + """ + def __init__(self, n, p, seed=None): + self._dist = multinomial_gen(seed) + self.n, self.p, self.npcond = self._dist._process_parameters(n, p) + + # monkey patch self._dist + def _process_parameters(n, p): + return self.n, self.p, self.npcond + + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, self.n, self.p) + + def pmf(self, x): + return self._dist.pmf(x, self.n, self.p) + + def mean(self): + return self._dist.mean(self.n, self.p) + + def cov(self): + return self._dist.cov(self.n, self.p) + + def entropy(self): + return self._dist.entropy(self.n, self.p) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.n, self.p, size, random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multinomial and fill in default strings in class docstrings +for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']: + method = multinomial_gen.__dict__[name] + method_frozen = multinomial_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, multinomial_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + multinomial_docdict_params) + + +class special_ortho_group_gen(multi_rv_generic): + r"""A Special Orthogonal matrix (SO(N)) random variable. + + Return a random rotation matrix, drawn from the Haar distribution + (the only uniform distribution on SO(N)) with a determinant of +1. + + The `dim` keyword specifies the dimension N. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random samples from SO(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This class is wrapping the random_rot code from the MDP Toolkit, + https://github.com/mdp-toolkit/mdp-toolkit + + Return a random rotation matrix, drawn from the Haar distribution + (the only uniform distribution on SO(N)). + The algorithm is described in the paper + Stewart, G.W., "The efficient generation of random orthogonal + matrices with an application to condition estimators", SIAM Journal + on Numerical Analysis, 17(3), pp. 403-409, 1980. + For more information see + https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization + + See also the similar `ortho_group`. For a random rotation in three + dimensions, see `scipy.spatial.transform.Rotation.random`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import special_ortho_group + >>> x = special_ortho_group.rvs(3) + + >>> np.dot(x, x.T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + >>> import scipy.linalg + >>> scipy.linalg.det(x) + 1.0 + + This generates one random matrix from SO(3). It is orthogonal and + has a determinant of 1. + + Alternatively, the object may be called (as a function) to fix the `dim` + parameter, returning a "frozen" special_ortho_group random variable: + + >>> rv = special_ortho_group(5) + >>> # Frozen object with the same methods but holding the + >>> # dimension parameter fixed. + + See Also + -------- + ortho_group, scipy.spatial.transform.Rotation.random + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen SO(N) distribution. + + See `special_ortho_group_frozen` for more information. + """ + return special_ortho_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("""Dimension of rotation must be specified, + and must be a scalar greater than 1.""") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """Draw random samples from SO(N). + + Parameters + ---------- + dim : integer + Dimension of rotation space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + size = (size,) if size > 1 else () + + dim = self._process_parameters(dim) + + # H represents a (dim, dim) matrix, while D represents the diagonal of + # a (dim, dim) diagonal matrix. The algorithm that follows is + # broadcasted on the leading shape in `size` to vectorize along + # samples. + H = np.empty(size + (dim, dim)) + H[..., :, :] = np.eye(dim) + D = np.empty(size + (dim,)) + + for n in range(dim-1): + + # x is a vector with length dim-n, xrow and xcol are views of it as + # a row vector and column vector respectively. It's important they + # are views and not copies because we are going to modify x + # in-place. + x = random_state.normal(size=size + (dim-n,)) + xrow = x[..., None, :] + xcol = x[..., :, None] + + # This is the squared norm of x, without vectorization it would be + # dot(x, x), to have proper broadcasting we use matmul and squeeze + # out (convert to scalar) the resulting 1x1 matrix + norm2 = np.matmul(xrow, xcol).squeeze((-2, -1)) + + x0 = x[..., 0].copy() + D[..., n] = np.where(x0 != 0, np.sign(x0), 1) + x[..., 0] += D[..., n]*np.sqrt(norm2) + + # In renormalizing x we have to append an additional axis with + # [..., None] to broadcast the scalar against the vector x + x /= np.sqrt((norm2 - x0**2 + x[..., 0]**2) / 2.)[..., None] + + # Householder transformation, without vectorization the RHS can be + # written as outer(H @ x, x) (apart from the slicing) + H[..., :, n:] -= np.matmul(H[..., :, n:], xcol) * xrow + + D[..., -1] = (-1)**(dim-1)*D[..., :-1].prod(axis=-1) + + # Without vectorization this could be written as H = diag(D) @ H, + # left-multiplication by a diagonal matrix amounts to multiplying each + # row of H by an element of the diagonal, so we add a dummy axis for + # the column index + H *= D[..., :, None] + return H + + +special_ortho_group = special_ortho_group_gen() + + +class special_ortho_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen SO(N) distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import special_ortho_group + >>> g = special_ortho_group(5) + >>> x = g.rvs() + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = special_ortho_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +class ortho_group_gen(multi_rv_generic): + r"""An Orthogonal matrix (O(N)) random variable. + + Return a random orthogonal matrix, drawn from the O(N) Haar + distribution (the only uniform distribution on O(N)). + + The `dim` keyword specifies the dimension N. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random samples from O(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This class is closely related to `special_ortho_group`. + + Some care is taken to avoid numerical error, as per the paper by Mezzadri. + + References + ---------- + .. [1] F. Mezzadri, "How to generate random matrices from the classical + compact groups", :arXiv:`math-ph/0609050v2`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import ortho_group + >>> x = ortho_group.rvs(3) + + >>> np.dot(x, x.T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + >>> import scipy.linalg + >>> np.fabs(scipy.linalg.det(x)) + 1.0 + + This generates one random matrix from O(3). It is orthogonal and + has a determinant of +1 or -1. + + Alternatively, the object may be called (as a function) to fix the `dim` + parameter, returning a "frozen" ortho_group random variable: + + >>> rv = ortho_group(5) + >>> # Frozen object with the same methods but holding the + >>> # dimension parameter fixed. + + See Also + -------- + special_ortho_group + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen O(N) distribution. + + See `ortho_group_frozen` for more information. + """ + return ortho_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("Dimension of rotation must be specified," + "and must be a scalar greater than 1.") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """Draw random samples from O(N). + + Parameters + ---------- + dim : integer + Dimension of rotation space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + + dim = self._process_parameters(dim) + + size = (size,) if size > 1 else () + z = random_state.normal(size=size + (dim, dim)) + q, r = np.linalg.qr(z) + # The last two dimensions are the rows and columns of R matrices. + # Extract the diagonals. Note that this eliminates a dimension. + d = r.diagonal(offset=0, axis1=-2, axis2=-1) + # Add back a dimension for proper broadcasting: we're dividing + # each row of each R matrix by the diagonal of the R matrix. + q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly + return q + + +ortho_group = ortho_group_gen() + + +class ortho_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen O(N) distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import ortho_group + >>> g = ortho_group(5) + >>> x = g.rvs() + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = ortho_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +class random_correlation_gen(multi_rv_generic): + r"""A random correlation matrix. + + Return a random correlation matrix, given a vector of eigenvalues. + + The `eigs` keyword specifies the eigenvalues of the correlation matrix, + and implies the dimension. + + Methods + ------- + rvs(eigs=None, random_state=None) + Draw random correlation matrices, all with eigenvalues eigs. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + + Notes + ----- + + Generates a random correlation matrix following a numerically stable + algorithm spelled out by Davies & Higham. This algorithm uses a single O(N) + similarity transformation to construct a symmetric positive semi-definite + matrix, and applies a series of Givens rotations to scale it to have ones + on the diagonal. + + References + ---------- + + .. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation + of correlation matrices and their factors", BIT 2000, Vol. 40, + No. 4, pp. 640 651 + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import random_correlation + >>> rng = np.random.default_rng() + >>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng) + >>> x + array([[ 1. , -0.02423399, 0.03130519, 0.4946965 ], + [-0.02423399, 1. , 0.20334736, 0.04039817], + [ 0.03130519, 0.20334736, 1. , 0.02694275], + [ 0.4946965 , 0.04039817, 0.02694275, 1. ]]) + >>> import scipy.linalg + >>> e, v = scipy.linalg.eigh(x) + >>> e + array([ 0.5, 0.8, 1.2, 1.5]) + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7): + """Create a frozen random correlation matrix. + + See `random_correlation_frozen` for more information. + """ + return random_correlation_frozen(eigs, seed=seed, tol=tol, + diag_tol=diag_tol) + + def _process_parameters(self, eigs, tol): + eigs = np.asarray(eigs, dtype=float) + dim = eigs.size + + if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1: + raise ValueError("Array 'eigs' must be a vector of length " + "greater than 1.") + + if np.fabs(np.sum(eigs) - dim) > tol: + raise ValueError("Sum of eigenvalues must equal dimensionality.") + + for x in eigs: + if x < -tol: + raise ValueError("All eigenvalues must be non-negative.") + + return dim, eigs + + def _givens_to_1(self, aii, ajj, aij): + """Computes a 2x2 Givens matrix to put 1's on the diagonal. + + The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ]. + + The output matrix g is a 2x2 anti-symmetric matrix of the form + [ c s ; -s c ]; the elements c and s are returned. + + Applying the output matrix to the input matrix (as b=g.T M g) + results in a matrix with bii=1, provided tr(M) - det(M) >= 1 + and floating point issues do not occur. Otherwise, some other + valid rotation is returned. When tr(M)==2, also bjj=1. + + """ + aiid = aii - 1. + ajjd = ajj - 1. + + if ajjd == 0: + # ajj==1, so swap aii and ajj to avoid division by zero + return 0., 1. + + dd = math.sqrt(max(aij**2 - aiid*ajjd, 0)) + + # The choice of t should be chosen to avoid cancellation [1] + t = (aij + math.copysign(dd, aij)) / ajjd + c = 1. / math.sqrt(1. + t*t) + if c == 0: + # Underflow + s = 1.0 + else: + s = c*t + return c, s + + def _to_corr(self, m): + """ + Given a psd matrix m, rotate to put one's on the diagonal, turning it + into a correlation matrix. This also requires the trace equal the + dimensionality. Note: modifies input matrix + """ + # Check requirements for in-place Givens + if not (m.flags.c_contiguous and m.dtype == np.float64 and + m.shape[0] == m.shape[1]): + raise ValueError() + + d = m.shape[0] + for i in range(d-1): + if m[i, i] == 1: + continue + elif m[i, i] > 1: + for j in range(i+1, d): + if m[j, j] < 1: + break + else: + for j in range(i+1, d): + if m[j, j] > 1: + break + + c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j]) + + # Use BLAS to apply Givens rotations in-place. Equivalent to: + # g = np.eye(d) + # g[i, i] = g[j,j] = c + # g[j, i] = -s; g[i, j] = s + # m = np.dot(g.T, np.dot(m, g)) + mv = m.ravel() + drot(mv, mv, c, -s, n=d, + offx=i*d, incx=1, offy=j*d, incy=1, + overwrite_x=True, overwrite_y=True) + drot(mv, mv, c, -s, n=d, + offx=i, incx=d, offy=j, incy=d, + overwrite_x=True, overwrite_y=True) + + return m + + def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7): + """Draw random correlation matrices. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + + """ + dim, eigs = self._process_parameters(eigs, tol=tol) + + random_state = self._get_random_state(random_state) + + m = ortho_group.rvs(dim, random_state=random_state) + m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m + m = self._to_corr(m) # Carefully rotate to unit diagonal + + # Check diagonal + if abs(m.diagonal() - 1).max() > diag_tol: + raise RuntimeError("Failed to generate a valid correlation matrix") + + return m + + +random_correlation = random_correlation_gen() + + +class random_correlation_frozen(multi_rv_frozen): + def __init__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7): + """Create a frozen random correlation matrix distribution. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + """ # numpy/numpydoc#87 # noqa: E501 + + self._dist = random_correlation_gen(seed) + self.tol = tol + self.diag_tol = diag_tol + _, self.eigs = self._dist._process_parameters(eigs, tol=self.tol) + + def rvs(self, random_state=None): + return self._dist.rvs(self.eigs, random_state=random_state, + tol=self.tol, diag_tol=self.diag_tol) + + +class unitary_group_gen(multi_rv_generic): + r"""A matrix-valued U(N) random variable. + + Return a random unitary matrix. + + The `dim` keyword specifies the dimension N. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random samples from U(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices, must be greater than 1. + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This class is similar to `ortho_group`. + + References + ---------- + .. [1] F. Mezzadri, "How to generate random matrices from the classical + compact groups", :arXiv:`math-ph/0609050v2`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import unitary_group + >>> x = unitary_group.rvs(3) + + >>> np.dot(x, x.conj().T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + This generates one random matrix from U(3). The dot product confirms that + it is unitary up to machine precision. + + Alternatively, the object may be called (as a function) to fix the `dim` + parameter, return a "frozen" unitary_group random variable: + + >>> rv = unitary_group(5) + + See Also + -------- + ortho_group + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen (U(N)) n-dimensional unitary matrix distribution. + + See `unitary_group_frozen` for more information. + """ + return unitary_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("Dimension of rotation must be specified," + "and must be a scalar greater than 1.") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """Draw random samples from U(N). + + Parameters + ---------- + dim : integer + Dimension of space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + + dim = self._process_parameters(dim) + + size = (size,) if size > 1 else () + z = 1/math.sqrt(2)*(random_state.normal(size=size + (dim, dim)) + + 1j*random_state.normal(size=size + (dim, dim))) + q, r = np.linalg.qr(z) + # The last two dimensions are the rows and columns of R matrices. + # Extract the diagonals. Note that this eliminates a dimension. + d = r.diagonal(offset=0, axis1=-2, axis2=-1) + # Add back a dimension for proper broadcasting: we're dividing + # each row of each R matrix by the diagonal of the R matrix. + q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly + return q + + +unitary_group = unitary_group_gen() + + +class unitary_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen (U(N)) n-dimensional unitary matrix distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import unitary_group + >>> x = unitary_group(3) + >>> x.rvs() + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = unitary_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +_mvt_doc_default_callparams = """\ +loc : array_like, optional + Location of the distribution. (default ``0``) +shape : array_like, optional + Positive semidefinite matrix of the distribution. (default ``1``) +df : float, optional + Degrees of freedom of the distribution; must be greater than zero. + If ``np.inf`` then results are multivariate normal. The default is ``1``. +allow_singular : bool, optional + Whether to allow a singular matrix. (default ``False``) +""" + +_mvt_doc_callparams_note = """\ +Setting the parameter `loc` to ``None`` is equivalent to having `loc` +be the zero-vector. The parameter `shape` can be a scalar, in which case +the shape matrix is the identity times that value, a vector of +diagonal entries for the shape matrix, or a two-dimensional array_like. +""" + +_mvt_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +mvt_docdict_params = { + '_mvt_doc_default_callparams': _mvt_doc_default_callparams, + '_mvt_doc_callparams_note': _mvt_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mvt_docdict_noparams = { + '_mvt_doc_default_callparams': "", + '_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_t_gen(multi_rv_generic): + r"""A multivariate t-distributed random variable. + + The `loc` parameter specifies the location. The `shape` parameter specifies + the positive semidefinite shape matrix. The `df` parameter specifies the + degrees of freedom. + + In addition to calling the methods below, the object itself may be called + as a function to fix the location, shape matrix, and degrees of freedom + parameters, returning a "frozen" multivariate t-distribution random. + + Methods + ------- + pdf(x, loc=None, shape=1, df=1, allow_singular=False) + Probability density function. + logpdf(x, loc=None, shape=1, df=1, allow_singular=False) + Log of the probability density function. + cdf(x, loc=None, shape=1, df=1, allow_singular=False, *, + maxpts=None, lower_limit=None, random_state=None) + Cumulative distribution function. + rvs(loc=None, shape=1, df=1, size=1, random_state=None) + Draw random samples from a multivariate t-distribution. + entropy(loc=None, shape=1, df=1) + Differential entropy of a multivariate t-distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_mvt_doc_callparams_note)s + The matrix `shape` must be a (symmetric) positive semidefinite matrix. The + determinant and inverse of `shape` are computed as the pseudo-determinant + and pseudo-inverse, respectively, so that `shape` does not need to have + full rank. + + The probability density function for `multivariate_t` is + + .. math:: + + f(x) = \frac{\Gamma((\nu + p)/2)}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}} + \left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top} + \boldsymbol{\Sigma}^{-1} + (\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2}, + + where :math:`p` is the dimension of :math:`\mathbf{x}`, + :math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location, + :math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape + matrix, and :math:`\nu` is the degrees of freedom. + + .. versionadded:: 1.6.0 + + References + ---------- + .. [1] Arellano-Valle et al. "Shannon Entropy and Mutual Information for + Multivariate Skew-Elliptical Distributions". Scandinavian Journal + of Statistics. Vol. 40, issue 1. + + Examples + -------- + The object may be called (as a function) to fix the `loc`, `shape`, + `df`, and `allow_singular` parameters, returning a "frozen" + multivariate_t random variable: + + >>> import numpy as np + >>> from scipy.stats import multivariate_t + >>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2) + >>> # Frozen object with the same methods but holding the given location, + >>> # scale, and degrees of freedom fixed. + + Create a contour plot of the PDF. + + >>> import matplotlib.pyplot as plt + >>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01] + >>> pos = np.dstack((x, y)) + >>> fig, ax = plt.subplots(1, 1) + >>> ax.set_aspect('equal') + >>> plt.contourf(x, y, rv.pdf(pos)) + + """ + + def __init__(self, seed=None): + """Initialize a multivariate t-distributed random variable. + + Parameters + ---------- + seed : Random state. + + """ + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params) + self._random_state = check_random_state(seed) + + def __call__(self, loc=None, shape=1, df=1, allow_singular=False, + seed=None): + """Create a frozen multivariate t-distribution. + + See `multivariate_t_frozen` for parameters. + """ + if df == np.inf: + return multivariate_normal_frozen(mean=loc, cov=shape, + allow_singular=allow_singular, + seed=seed) + return multivariate_t_frozen(loc=loc, shape=shape, df=df, + allow_singular=allow_singular, seed=seed) + + def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False): + """Multivariate t-distribution probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the probability density function. + %(_mvt_doc_default_callparams)s + + Returns + ------- + pdf : Probability density function evaluated at `x`. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.pdf(x, loc, shape, df) + 0.00075713 + + """ + dim, loc, shape, df = self._process_parameters(loc, shape, df) + x = self._process_quantiles(x, dim) + shape_info = _PSD(shape, allow_singular=allow_singular) + logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, + dim, shape_info.rank) + return np.exp(logpdf) + + def logpdf(self, x, loc=None, shape=1, df=1): + """Log of the multivariate t-distribution probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability density + function. + %(_mvt_doc_default_callparams)s + + Returns + ------- + logpdf : Log of the probability density function evaluated at `x`. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.logpdf(x, loc, shape, df) + -7.1859802 + + See Also + -------- + pdf : Probability density function. + + """ + dim, loc, shape, df = self._process_parameters(loc, shape, df) + x = self._process_quantiles(x, dim) + shape_info = _PSD(shape) + return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim, + shape_info.rank) + + def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank): + """Utility method `pdf`, `logpdf` for parameters. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability density + function. + loc : ndarray + Location of the distribution. + prec_U : ndarray + A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse + of the shape matrix. + log_pdet : float + Logarithm of the determinant of the shape matrix. + df : float + Degrees of freedom of the distribution. + dim : int + Dimension of the quantiles x. + rank : int + Rank of the shape matrix. + + Notes + ----- + As this function does no argument checking, it should not be called + directly; use 'logpdf' instead. + + """ + if df == np.inf: + return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank) + + dev = x - loc + maha = np.square(np.dot(dev, prec_U)).sum(axis=-1) + + t = 0.5 * (df + dim) + A = gammaln(t) + B = gammaln(0.5 * df) + C = dim/2. * np.log(df * np.pi) + D = 0.5 * log_pdet + E = -t * np.log(1 + (1./df) * maha) + + return _squeeze_output(A - B - C - D + E) + + def _cdf(self, x, loc, shape, df, dim, maxpts=None, lower_limit=None, + random_state=None): + + # All of this - random state validation, maxpts, apply_along_axis, + # etc. needs to go in this private method unless we want + # frozen distribution's `cdf` method to duplicate it or call `cdf`, + # which would require re-processing parameters + if random_state is not None: + rng = check_random_state(random_state) + else: + rng = self._random_state + + if not maxpts: + maxpts = 1000 * dim + + x = self._process_quantiles(x, dim) + lower_limit = (np.full(loc.shape, -np.inf) + if lower_limit is None else lower_limit) + + # remove the mean + x, lower_limit = x - loc, lower_limit - loc + + b, a = np.broadcast_arrays(x, lower_limit) + i_swap = b < a + signs = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative + a, b = a.copy(), b.copy() + a[i_swap], b[i_swap] = b[i_swap], a[i_swap] + n = x.shape[-1] + limits = np.concatenate((a, b), axis=-1) + + def func1d(limits): + a, b = limits[:n], limits[n:] + return _qmvt(maxpts, df, shape, a, b, rng)[0] + + res = np.apply_along_axis(func1d, -1, limits) * signs + # Fixing the output shape for existing distributions is a separate + # issue. For now, let's keep this consistent with pdf. + return _squeeze_output(res) + + def cdf(self, x, loc=None, shape=1, df=1, allow_singular=False, *, + maxpts=None, lower_limit=None, random_state=None): + """Multivariate t-distribution cumulative distribution function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the cumulative distribution function. + %(_mvt_doc_default_callparams)s + maxpts : int, optional + Maximum number of points to use for integration. The default is + 1000 times the number of dimensions. + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + %(_doc_random_state)s + + Returns + ------- + cdf : ndarray or scalar + Cumulative distribution function evaluated at `x`. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.cdf(x, loc, shape, df) + 0.64798491 + + """ + dim, loc, shape, df = self._process_parameters(loc, shape, df) + shape = _PSD(shape, allow_singular=allow_singular)._M + + return self._cdf(x, loc, shape, df, dim, maxpts, + lower_limit, random_state) + + def _entropy(self, dim, df=1, shape=1): + if df == np.inf: + return multivariate_normal(None, cov=shape).entropy() + + shape_info = _PSD(shape) + shape_term = 0.5 * shape_info.log_pdet + + def regular(dim, df): + halfsum = 0.5 * (dim + df) + half_df = 0.5 * df + return ( + -gammaln(halfsum) + gammaln(half_df) + + 0.5 * dim * np.log(df * np.pi) + halfsum + * (psi(halfsum) - psi(half_df)) + + shape_term + ) + + def asymptotic(dim, df): + # Formula from Wolfram Alpha: + # "asymptotic expansion -gammaln((m+d)/2) + gammaln(d/2) + (m*log(d*pi))/2 + # + ((m+d)/2) * (digamma((m+d)/2) - digamma(d/2))" + return ( + dim * norm._entropy() + dim / df + - dim * (dim - 2) * df**-2.0 / 4 + + dim**2 * (dim - 2) * df**-3.0 / 6 + + dim * (-3 * dim**3 + 8 * dim**2 - 8) * df**-4.0 / 24 + + dim**2 * (3 * dim**3 - 10 * dim**2 + 16) * df**-5.0 / 30 + + shape_term + )[()] + + # preserves ~12 digits accuracy up to at least `dim=1e5`. See gh-18465. + threshold = dim * 100 * 4 / (np.log(dim) + 1) + return _lazywhere(df >= threshold, (dim, df), f=asymptotic, f2=regular) + + def entropy(self, loc=None, shape=1, df=1): + """Calculate the differential entropy of a multivariate + t-distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + + Returns + ------- + h : float + Differential entropy + + """ + dim, loc, shape, df = self._process_parameters(None, shape, df) + return self._entropy(dim, df, shape) + + def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None): + """Draw random samples from a multivariate t-distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `P`), where `P` is the + dimension of the random variable. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.rvs(loc, shape, df) + array([[0.93477495, 3.00408716]]) + + """ + # For implementation details, see equation (3): + # + # Hofert, "On Sampling from the Multivariatet Distribution", 2013 + # http://rjournal.github.io/archive/2013-2/hofert.pdf + # + dim, loc, shape, df = self._process_parameters(loc, shape, df) + if random_state is not None: + rng = check_random_state(random_state) + else: + rng = self._random_state + + if np.isinf(df): + x = np.ones(size) + else: + x = rng.chisquare(df, size=size) / df + + z = rng.multivariate_normal(np.zeros(dim), shape, size=size) + samples = loc + z / np.sqrt(x)[..., None] + return _squeeze_output(samples) + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + if x.ndim == 0: + x = x[np.newaxis] + elif x.ndim == 1: + if dim == 1: + x = x[:, np.newaxis] + else: + x = x[np.newaxis, :] + return x + + def _process_parameters(self, loc, shape, df): + """ + Infer dimensionality from location array and shape matrix, handle + defaults, and ensure compatible dimensions. + """ + if loc is None and shape is None: + loc = np.asarray(0, dtype=float) + shape = np.asarray(1, dtype=float) + dim = 1 + elif loc is None: + shape = np.asarray(shape, dtype=float) + if shape.ndim < 2: + dim = 1 + else: + dim = shape.shape[0] + loc = np.zeros(dim) + elif shape is None: + loc = np.asarray(loc, dtype=float) + dim = loc.size + shape = np.eye(dim) + else: + shape = np.asarray(shape, dtype=float) + loc = np.asarray(loc, dtype=float) + dim = loc.size + + if dim == 1: + loc = loc.reshape(1) + shape = shape.reshape(1, 1) + + if loc.ndim != 1 or loc.shape[0] != dim: + raise ValueError("Array 'loc' must be a vector of length %d." % + dim) + if shape.ndim == 0: + shape = shape * np.eye(dim) + elif shape.ndim == 1: + shape = np.diag(shape) + elif shape.ndim == 2 and shape.shape != (dim, dim): + rows, cols = shape.shape + if rows != cols: + msg = ("Array 'cov' must be square if it is two dimensional," + " but cov.shape = %s." % str(shape.shape)) + else: + msg = ("Dimension mismatch: array 'cov' is of shape %s," + " but 'loc' is a vector of length %d.") + msg = msg % (str(shape.shape), len(loc)) + raise ValueError(msg) + elif shape.ndim > 2: + raise ValueError("Array 'cov' must be at most two-dimensional," + " but cov.ndim = %d" % shape.ndim) + + # Process degrees of freedom. + if df is None: + df = 1 + elif df <= 0: + raise ValueError("'df' must be greater than zero.") + elif np.isnan(df): + raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.") + + return dim, loc, shape, df + + +class multivariate_t_frozen(multi_rv_frozen): + + def __init__(self, loc=None, shape=1, df=1, allow_singular=False, + seed=None): + """Create a frozen multivariate t distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import multivariate_t + >>> loc = np.zeros(3) + >>> shape = np.eye(3) + >>> df = 10 + >>> dist = multivariate_t(loc, shape, df) + >>> dist.rvs() + array([[ 0.81412036, -1.53612361, 0.42199647]]) + >>> dist.pdf([1, 1, 1]) + array([0.01237803]) + + """ + self._dist = multivariate_t_gen(seed) + dim, loc, shape, df = self._dist._process_parameters(loc, shape, df) + self.dim, self.loc, self.shape, self.df = dim, loc, shape, df + self.shape_info = _PSD(shape, allow_singular=allow_singular) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + U = self.shape_info.U + log_pdet = self.shape_info.log_pdet + return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim, + self.shape_info.rank) + + def cdf(self, x, *, maxpts=None, lower_limit=None, random_state=None): + x = self._dist._process_quantiles(x, self.dim) + return self._dist._cdf(x, self.loc, self.shape, self.df, self.dim, + maxpts, lower_limit, random_state) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(loc=self.loc, + shape=self.shape, + df=self.df, + size=size, + random_state=random_state) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.shape) + + +multivariate_t = multivariate_t_gen() + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_t_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'cdf', 'entropy']: + method = multivariate_t_gen.__dict__[name] + method_frozen = multivariate_t_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + mvt_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params) + + +_mhg_doc_default_callparams = """\ +m : array_like + The number of each type of object in the population. + That is, :math:`m[i]` is the number of objects of + type :math:`i`. +n : array_like + The number of samples taken from the population. +""" + +_mhg_doc_callparams_note = """\ +`m` must be an array of positive integers. If the quantile +:math:`i` contains values out of the range :math:`[0, m_i]` +where :math:`m_i` is the number of objects of type :math:`i` +in the population or if the parameters are inconsistent with one +another (e.g. ``x.sum() != n``), methods return the appropriate +value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative +values, the result will contain ``nan`` there. +""" + +_mhg_doc_frozen_callparams = "" + +_mhg_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +mhg_docdict_params = { + '_doc_default_callparams': _mhg_doc_default_callparams, + '_doc_callparams_note': _mhg_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mhg_docdict_noparams = { + '_doc_default_callparams': _mhg_doc_frozen_callparams, + '_doc_callparams_note': _mhg_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_hypergeom_gen(multi_rv_generic): + r"""A multivariate hypergeometric random variable. + + Methods + ------- + pmf(x, m, n) + Probability mass function. + logpmf(x, m, n) + Log of the probability mass function. + rvs(m, n, size=1, random_state=None) + Draw random samples from a multivariate hypergeometric + distribution. + mean(m, n) + Mean of the multivariate hypergeometric distribution. + var(m, n) + Variance of the multivariate hypergeometric distribution. + cov(m, n) + Compute the covariance matrix of the multivariate + hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_callparams_note)s + + The probability mass function for `multivariate_hypergeom` is + + .. math:: + + P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1} + \binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad + (x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with } + \sum_{i=1}^k x_i = n + + where :math:`m_i` are the number of objects of type :math:`i`, :math:`M` + is the total number of objects in the population (sum of all the + :math:`m_i`), and :math:`n` is the size of the sample to be taken + from the population. + + .. versionadded:: 1.6.0 + + Examples + -------- + To evaluate the probability mass function of the multivariate + hypergeometric distribution, with a dichotomous population of size + :math:`10` and :math:`20`, at a sample of size :math:`12` with + :math:`8` objects of the first type and :math:`4` objects of the + second type, use: + + >>> from scipy.stats import multivariate_hypergeom + >>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12) + 0.0025207176631464523 + + The `multivariate_hypergeom` distribution is identical to the + corresponding `hypergeom` distribution (tiny numerical differences + notwithstanding) when only two types (good and bad) of objects + are present in the population as in the example above. Consider + another example for a comparison with the hypergeometric distribution: + + >>> from scipy.stats import hypergeom + >>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4) + 0.4395604395604395 + >>> hypergeom.pmf(k=3, M=15, n=4, N=10) + 0.43956043956044005 + + The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs`` + support broadcasting, under the convention that the vector parameters + (``x``, ``m``, and ``n``) are interpreted as if each row along the last + axis is a single object. For instance, we can combine the previous two + calls to `multivariate_hypergeom` as + + >>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]], + ... n=[12, 4]) + array([0.00252072, 0.43956044]) + + This broadcasting also works for ``cov``, where the output objects are + square matrices of size ``m.shape[-1]``. For example: + + >>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12]) + array([[[ 1.05, -1.05], + [-1.05, 1.05]], + [[ 1.56, -1.56], + [-1.56, 1.56]]]) + + That is, ``result[0]`` is equal to + ``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal + to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``. + + Alternatively, the object may be called (as a function) to fix the `m` + and `n` parameters, returning a "frozen" multivariate hypergeometric + random variable. + + >>> rv = multivariate_hypergeom(m=[10, 20], n=12) + >>> rv.pmf(x=[8, 4]) + 0.0025207176631464523 + + See Also + -------- + scipy.stats.hypergeom : The hypergeometric distribution. + scipy.stats.multinomial : The multinomial distribution. + + References + ---------- + .. [1] The Multivariate Hypergeometric Distribution, + http://www.randomservices.org/random/urn/MultiHypergeometric.html + .. [2] Thomas J. Sargent and John Stachurski, 2020, + Multivariate Hypergeometric Distribution + https://python.quantecon.org/multi_hyper.html + """ + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params) + + def __call__(self, m, n, seed=None): + """Create a frozen multivariate_hypergeom distribution. + + See `multivariate_hypergeom_frozen` for more information. + """ + return multivariate_hypergeom_frozen(m, n, seed=seed) + + def _process_parameters(self, m, n): + m = np.asarray(m) + n = np.asarray(n) + if m.size == 0: + m = m.astype(int) + if n.size == 0: + n = n.astype(int) + if not np.issubdtype(m.dtype, np.integer): + raise TypeError("'m' must an array of integers.") + if not np.issubdtype(n.dtype, np.integer): + raise TypeError("'n' must an array of integers.") + if m.ndim == 0: + raise ValueError("'m' must be an array with" + " at least one dimension.") + + # check for empty arrays + if m.size != 0: + n = n[..., np.newaxis] + + m, n = np.broadcast_arrays(m, n) + + # check for empty arrays + if m.size != 0: + n = n[..., 0] + + mcond = m < 0 + + M = m.sum(axis=-1) + + ncond = (n < 0) | (n > M) + return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond + + def _process_quantiles(self, x, M, m, n): + x = np.asarray(x) + if not np.issubdtype(x.dtype, np.integer): + raise TypeError("'x' must an array of integers.") + if x.ndim == 0: + raise ValueError("'x' must be an array with" + " at least one dimension.") + if not x.shape[-1] == m.shape[-1]: + raise ValueError(f"Size of each quantile must be size of 'm': " + f"received {x.shape[-1]}, " + f"but expected {m.shape[-1]}.") + + # check for empty arrays + if m.size != 0: + n = n[..., np.newaxis] + M = M[..., np.newaxis] + + x, m, n, M = np.broadcast_arrays(x, m, n, M) + + # check for empty arrays + if m.size != 0: + n, M = n[..., 0], M[..., 0] + + xcond = (x < 0) | (x > m) + return (x, M, m, n, xcond, + np.any(xcond, axis=-1) | (x.sum(axis=-1) != n)) + + def _checkresult(self, result, cond, bad_value): + result = np.asarray(result) + if cond.ndim != 0: + result[cond] = bad_value + elif cond: + return bad_value + if result.ndim == 0: + return result[()] + return result + + def _logpmf(self, x, M, m, n, mxcond, ncond): + # This equation of the pmf comes from the relation, + # n combine r = beta(n+1, 1) / beta(r+1, n-r+1) + num = np.zeros_like(m, dtype=np.float64) + den = np.zeros_like(n, dtype=np.float64) + m, x = m[~mxcond], x[~mxcond] + M, n = M[~ncond], n[~ncond] + num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1)) + den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1)) + num[mxcond] = np.nan + den[ncond] = np.nan + num = num.sum(axis=-1) + return num - den + + def logpmf(self, x, m, n): + """Log of the multivariate hypergeometric probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + M, m, n, mcond, ncond, mncond = self._process_parameters(m, n) + (x, M, m, n, xcond, + xcond_reduced) = self._process_quantiles(x, M, m, n) + mxcond = mcond | xcond + ncond = ncond | np.zeros(n.shape, dtype=np.bool_) + + result = self._logpmf(x, M, m, n, mxcond, ncond) + + # replace values for which x was out of the domain; broadcast + # xcond to the right shape + xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_) + result = self._checkresult(result, xcond_, -np.inf) + + # replace values bad for n or m; broadcast + # mncond to the right shape + mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_) + return self._checkresult(result, mncond_, np.nan) + + def pmf(self, x, m, n): + """Multivariate hypergeometric probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + pmf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + out = np.exp(self.logpmf(x, m, n)) + return out + + def mean(self, m, n): + """Mean of the multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : array_like or scalar + The mean of the distribution + """ + M, m, n, _, _, mncond = self._process_parameters(m, n) + # check for empty arrays + if m.size != 0: + M, n = M[..., np.newaxis], n[..., np.newaxis] + cond = (M == 0) + M = np.ma.masked_array(M, mask=cond) + mu = n*(m/M) + if m.size != 0: + mncond = (mncond[..., np.newaxis] | + np.zeros(mu.shape, dtype=np.bool_)) + return self._checkresult(mu, mncond, np.nan) + + def var(self, m, n): + """Variance of the multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + array_like + The variances of the components of the distribution. This is + the diagonal of the covariance matrix of the distribution + """ + M, m, n, _, _, mncond = self._process_parameters(m, n) + # check for empty arrays + if m.size != 0: + M, n = M[..., np.newaxis], n[..., np.newaxis] + cond = (M == 0) & (M-1 == 0) + M = np.ma.masked_array(M, mask=cond) + output = n * m/M * (M-m)/M * (M-n)/(M-1) + if m.size != 0: + mncond = (mncond[..., np.newaxis] | + np.zeros(output.shape, dtype=np.bool_)) + return self._checkresult(output, mncond, np.nan) + + def cov(self, m, n): + """Covariance matrix of the multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + cov : array_like + The covariance matrix of the distribution + """ + # see [1]_ for the formula and [2]_ for implementation + # cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2) + M, m, n, _, _, mncond = self._process_parameters(m, n) + # check for empty arrays + if m.size != 0: + M = M[..., np.newaxis, np.newaxis] + n = n[..., np.newaxis, np.newaxis] + cond = (M == 0) & (M-1 == 0) + M = np.ma.masked_array(M, mask=cond) + output = (-n * (M-n)/(M-1) * + np.einsum("...i,...j->...ij", m, m) / (M**2)) + # check for empty arrays + if m.size != 0: + M, n = M[..., 0, 0], n[..., 0, 0] + cond = cond[..., 0, 0] + dim = m.shape[-1] + # diagonal entries need to be computed differently + for i in range(dim): + output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i])) + output[..., i, i] = output[..., i, i] / (M-1) + output[..., i, i] = output[..., i, i] / (M**2) + if m.size != 0: + mncond = (mncond[..., np.newaxis, np.newaxis] | + np.zeros(output.shape, dtype=np.bool_)) + return self._checkresult(output, mncond, np.nan) + + def rvs(self, m, n, size=None, random_state=None): + """Draw random samples from a multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw. Default is ``None``, in which case a + single variate is returned as an array with shape ``m.shape``. + %(_doc_random_state)s + + Returns + ------- + rvs : array_like + Random variates of shape ``size`` or ``m.shape`` + (if ``size=None``). + + Notes + ----- + %(_doc_callparams_note)s + + Also note that NumPy's `multivariate_hypergeometric` sampler is not + used as it doesn't support broadcasting. + """ + M, m, n, _, _, _ = self._process_parameters(m, n) + + random_state = self._get_random_state(random_state) + + if size is not None and isinstance(size, int): + size = (size, ) + + if size is None: + rvs = np.empty(m.shape, dtype=m.dtype) + else: + rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype) + rem = M + + # This sampler has been taken from numpy gh-13794 + # https://github.com/numpy/numpy/pull/13794 + for c in range(m.shape[-1] - 1): + rem = rem - m[..., c] + n0mask = n == 0 + rvs[..., c] = (~n0mask * + random_state.hypergeometric(m[..., c], + rem + n0mask, + n + n0mask, + size=size)) + n = n - rvs[..., c] + rvs[..., m.shape[-1] - 1] = n + + return rvs + + +multivariate_hypergeom = multivariate_hypergeom_gen() + + +class multivariate_hypergeom_frozen(multi_rv_frozen): + def __init__(self, m, n, seed=None): + self._dist = multivariate_hypergeom_gen(seed) + (self.M, self.m, self.n, + self.mcond, self.ncond, + self.mncond) = self._dist._process_parameters(m, n) + + # monkey patch self._dist + def _process_parameters(m, n): + return (self.M, self.m, self.n, + self.mcond, self.ncond, + self.mncond) + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, self.m, self.n) + + def pmf(self, x): + return self._dist.pmf(x, self.m, self.n) + + def mean(self): + return self._dist.mean(self.m, self.n) + + def var(self): + return self._dist.var(self.m, self.n) + + def cov(self): + return self._dist.cov(self.m, self.n) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.m, self.n, + size=size, + random_state=random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_hypergeom and fill in default strings in class docstrings +for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']: + method = multivariate_hypergeom_gen.__dict__[name] + method_frozen = multivariate_hypergeom_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, mhg_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + mhg_docdict_params) + + +class random_table_gen(multi_rv_generic): + r"""Contingency tables from independent samples with fixed marginal sums. + + This is the distribution of random tables with given row and column vector + sums. This distribution represents the set of random tables under the null + hypothesis that rows and columns are independent. It is used in hypothesis + tests of independence. + + Because of assumed independence, the expected frequency of each table + element can be computed from the row and column sums, so that the + distribution is completely determined by these two vectors. + + Methods + ------- + logpmf(x) + Log-probability of table `x` to occur in the distribution. + pmf(x) + Probability of table `x` to occur in the distribution. + mean(row, col) + Mean table. + rvs(row, col, size=None, method=None, random_state=None) + Draw random tables with given row and column vector sums. + + Parameters + ---------- + %(_doc_row_col)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_row_col_note)s + + Random elements from the distribution are generated either with Boyett's + [1]_ or Patefield's algorithm [2]_. Boyett's algorithm has + O(N) time and space complexity, where N is the total sum of entries in the + table. Patefield's algorithm has O(K x log(N)) time complexity, where K is + the number of cells in the table and requires only a small constant work + space. By default, the `rvs` method selects the fastest algorithm based on + the input, but you can specify the algorithm with the keyword `method`. + Allowed values are "boyett" and "patefield". + + .. versionadded:: 1.10.0 + + Examples + -------- + >>> from scipy.stats import random_table + + >>> row = [1, 5] + >>> col = [2, 3, 1] + >>> random_table.mean(row, col) + array([[0.33333333, 0.5 , 0.16666667], + [1.66666667, 2.5 , 0.83333333]]) + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> dist = random_table(row, col) + >>> dist.rvs(random_state=123) + array([[1., 0., 0.], + [1., 3., 1.]]) + + References + ---------- + .. [1] J. Boyett, AS 144 Appl. Statist. 28 (1979) 329-332 + .. [2] W.M. Patefield, AS 159 Appl. Statist. 30 (1981) 91-97 + """ + + def __init__(self, seed=None): + super().__init__(seed) + + def __call__(self, row, col, *, seed=None): + """Create a frozen distribution of tables with given marginals. + + See `random_table_frozen` for more information. + """ + return random_table_frozen(row, col, seed=seed) + + def logpmf(self, x, row, col): + """Log-probability of table to occur in the distribution. + + Parameters + ---------- + %(_doc_x)s + %(_doc_row_col)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x`. + + Notes + ----- + %(_doc_row_col_note)s + + If row and column marginals of `x` do not match `row` and `col`, + negative infinity is returned. + + Examples + -------- + >>> from scipy.stats import random_table + >>> import numpy as np + + >>> x = [[1, 5, 1], [2, 3, 1]] + >>> row = np.sum(x, axis=1) + >>> col = np.sum(x, axis=0) + >>> random_table.logpmf(x, row, col) + -1.6306401200847027 + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.logpmf(x) + -1.6306401200847027 + """ + r, c, n = self._process_parameters(row, col) + x = np.asarray(x) + + if x.ndim < 2: + raise ValueError("`x` must be at least two-dimensional") + + dtype_is_int = np.issubdtype(x.dtype, np.integer) + with np.errstate(invalid='ignore'): + if not dtype_is_int and not np.all(x.astype(int) == x): + raise ValueError("`x` must contain only integral values") + + # x does not contain NaN if we arrive here + if np.any(x < 0): + raise ValueError("`x` must contain only non-negative values") + + r2 = np.sum(x, axis=-1) + c2 = np.sum(x, axis=-2) + + if r2.shape[-1] != len(r): + raise ValueError("shape of `x` must agree with `row`") + + if c2.shape[-1] != len(c): + raise ValueError("shape of `x` must agree with `col`") + + res = np.empty(x.shape[:-2]) + + mask = np.all(r2 == r, axis=-1) & np.all(c2 == c, axis=-1) + + def lnfac(x): + return gammaln(x + 1) + + res[mask] = (np.sum(lnfac(r), axis=-1) + np.sum(lnfac(c), axis=-1) + - lnfac(n) - np.sum(lnfac(x[mask]), axis=(-1, -2))) + res[~mask] = -np.inf + + return res[()] + + def pmf(self, x, row, col): + """Probability of table to occur in the distribution. + + Parameters + ---------- + %(_doc_x)s + %(_doc_row_col)s + + Returns + ------- + pmf : ndarray or scalar + Probability mass function evaluated at `x`. + + Notes + ----- + %(_doc_row_col_note)s + + If row and column marginals of `x` do not match `row` and `col`, + zero is returned. + + Examples + -------- + >>> from scipy.stats import random_table + >>> import numpy as np + + >>> x = [[1, 5, 1], [2, 3, 1]] + >>> row = np.sum(x, axis=1) + >>> col = np.sum(x, axis=0) + >>> random_table.pmf(x, row, col) + 0.19580419580419592 + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.pmf(x) + 0.19580419580419592 + """ + return np.exp(self.logpmf(x, row, col)) + + def mean(self, row, col): + """Mean of distribution of conditional tables. + %(_doc_mean_params)s + + Returns + ------- + mean: ndarray + Mean of the distribution. + + Notes + ----- + %(_doc_row_col_note)s + + Examples + -------- + >>> from scipy.stats import random_table + + >>> row = [1, 5] + >>> col = [2, 3, 1] + >>> random_table.mean(row, col) + array([[0.33333333, 0.5 , 0.16666667], + [1.66666667, 2.5 , 0.83333333]]) + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.mean() + array([[0.33333333, 0.5 , 0.16666667], + [1.66666667, 2.5 , 0.83333333]]) + """ + r, c, n = self._process_parameters(row, col) + return np.outer(r, c) / n + + def rvs(self, row, col, *, size=None, method=None, random_state=None): + """Draw random tables with fixed column and row marginals. + + Parameters + ---------- + %(_doc_row_col)s + size : integer, optional + Number of samples to draw (default 1). + method : str, optional + Which method to use, "boyett" or "patefield". If None (default), + selects the fastest method for this input. + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random 2D tables of shape (`size`, `len(row)`, `len(col)`). + + Notes + ----- + %(_doc_row_col_note)s + + Examples + -------- + >>> from scipy.stats import random_table + + >>> row = [1, 5] + >>> col = [2, 3, 1] + >>> random_table.rvs(row, col, random_state=123) + array([[1., 0., 0.], + [1., 3., 1.]]) + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.rvs(random_state=123) + array([[1., 0., 0.], + [1., 3., 1.]]) + """ + r, c, n = self._process_parameters(row, col) + size, shape = self._process_size_shape(size, r, c) + + random_state = self._get_random_state(random_state) + meth = self._process_rvs_method(method, r, c, n) + + return meth(r, c, n, size, random_state).reshape(shape) + + @staticmethod + def _process_parameters(row, col): + """ + Check that row and column vectors are one-dimensional, that they do + not contain negative or non-integer entries, and that the sums over + both vectors are equal. + """ + r = np.array(row, dtype=np.int64, copy=True) + c = np.array(col, dtype=np.int64, copy=True) + + if np.ndim(r) != 1: + raise ValueError("`row` must be one-dimensional") + if np.ndim(c) != 1: + raise ValueError("`col` must be one-dimensional") + + if np.any(r < 0): + raise ValueError("each element of `row` must be non-negative") + if np.any(c < 0): + raise ValueError("each element of `col` must be non-negative") + + n = np.sum(r) + if n != np.sum(c): + raise ValueError("sums over `row` and `col` must be equal") + + if not np.all(r == np.asarray(row)): + raise ValueError("each element of `row` must be an integer") + if not np.all(c == np.asarray(col)): + raise ValueError("each element of `col` must be an integer") + + return r, c, n + + @staticmethod + def _process_size_shape(size, r, c): + """ + Compute the number of samples to be drawn and the shape of the output + """ + shape = (len(r), len(c)) + + if size is None: + return 1, shape + + size = np.atleast_1d(size) + if not np.issubdtype(size.dtype, np.integer) or np.any(size < 0): + raise ValueError("`size` must be a non-negative integer or `None`") + + return np.prod(size), tuple(size) + shape + + @classmethod + def _process_rvs_method(cls, method, r, c, n): + known_methods = { + None: cls._rvs_select(r, c, n), + "boyett": cls._rvs_boyett, + "patefield": cls._rvs_patefield, + } + try: + return known_methods[method] + except KeyError: + raise ValueError(f"'{method}' not recognized, " + f"must be one of {set(known_methods)}") + + @classmethod + def _rvs_select(cls, r, c, n): + fac = 1.0 # benchmarks show that this value is about 1 + k = len(r) * len(c) # number of cells + # n + 1 guards against failure if n == 0 + if n > fac * np.log(n + 1) * k: + return cls._rvs_patefield + return cls._rvs_boyett + + @staticmethod + def _rvs_boyett(row, col, ntot, size, random_state): + return _rcont.rvs_rcont1(row, col, ntot, size, random_state) + + @staticmethod + def _rvs_patefield(row, col, ntot, size, random_state): + return _rcont.rvs_rcont2(row, col, ntot, size, random_state) + + +random_table = random_table_gen() + + +class random_table_frozen(multi_rv_frozen): + def __init__(self, row, col, *, seed=None): + self._dist = random_table_gen(seed) + self._params = self._dist._process_parameters(row, col) + + # monkey patch self._dist + def _process_parameters(r, c): + return self._params + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, None, None) + + def pmf(self, x): + return self._dist.pmf(x, None, None) + + def mean(self): + return self._dist.mean(None, None) + + def rvs(self, size=None, method=None, random_state=None): + # optimisations are possible here + return self._dist.rvs(None, None, size=size, method=method, + random_state=random_state) + + +_ctab_doc_row_col = """\ +row : array_like + Sum of table entries in each row. +col : array_like + Sum of table entries in each column.""" + +_ctab_doc_x = """\ +x : array-like + Two-dimensional table of non-negative integers, or a + multi-dimensional array with the last two dimensions + corresponding with the tables.""" + +_ctab_doc_row_col_note = """\ +The row and column vectors must be one-dimensional, not empty, +and each sum up to the same value. They cannot contain negative +or noninteger entries.""" + +_ctab_doc_mean_params = f""" +Parameters +---------- +{_ctab_doc_row_col}""" + +_ctab_doc_row_col_note_frozen = """\ +See class definition for a detailed description of parameters.""" + +_ctab_docdict = { + "_doc_random_state": _doc_random_state, + "_doc_row_col": _ctab_doc_row_col, + "_doc_x": _ctab_doc_x, + "_doc_mean_params": _ctab_doc_mean_params, + "_doc_row_col_note": _ctab_doc_row_col_note, +} + +_ctab_docdict_frozen = _ctab_docdict.copy() +_ctab_docdict_frozen.update({ + "_doc_row_col": "", + "_doc_mean_params": "", + "_doc_row_col_note": _ctab_doc_row_col_note_frozen, +}) + + +def _docfill(obj, docdict, template=None): + obj.__doc__ = doccer.docformat(template or obj.__doc__, docdict) + + +# Set frozen generator docstrings from corresponding docstrings in +# random_table and fill in default strings in class docstrings +_docfill(random_table_gen, _ctab_docdict) +for name in ['logpmf', 'pmf', 'mean', 'rvs']: + method = random_table_gen.__dict__[name] + method_frozen = random_table_frozen.__dict__[name] + _docfill(method_frozen, _ctab_docdict_frozen, method.__doc__) + _docfill(method, _ctab_docdict) + + +class uniform_direction_gen(multi_rv_generic): + r"""A vector-valued uniform direction. + + Return a random direction (unit vector). The `dim` keyword specifies + the dimensionality of the space. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random directions. + + Parameters + ---------- + dim : scalar + Dimension of directions. + seed : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This distribution generates unit vectors uniformly distributed on + the surface of a hypersphere. These can be interpreted as random + directions. + For example, if `dim` is 3, 3D vectors from the surface of :math:`S^2` + will be sampled. + + References + ---------- + .. [1] Marsaglia, G. (1972). "Choosing a Point from the Surface of a + Sphere". Annals of Mathematical Statistics. 43 (2): 645-646. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import uniform_direction + >>> x = uniform_direction.rvs(3) + >>> np.linalg.norm(x) + 1. + + This generates one random direction, a vector on the surface of + :math:`S^2`. + + Alternatively, the object may be called (as a function) to return a frozen + distribution with fixed `dim` parameter. Here, + we create a `uniform_direction` with ``dim=3`` and draw 5 observations. + The samples are then arranged in an array of shape 5x3. + + >>> rng = np.random.default_rng() + >>> uniform_sphere_dist = uniform_direction(3) + >>> unit_vectors = uniform_sphere_dist.rvs(5, random_state=rng) + >>> unit_vectors + array([[ 0.56688642, -0.1332634 , -0.81294566], + [-0.427126 , -0.74779278, 0.50830044], + [ 0.3793989 , 0.92346629, 0.05715323], + [ 0.36428383, -0.92449076, -0.11231259], + [-0.27733285, 0.94410968, -0.17816678]]) + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen n-dimensional uniform direction distribution. + + See `uniform_direction` for more information. + """ + return uniform_direction_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim < 1 or dim != int(dim): + raise ValueError("Dimension of vector must be specified, " + "and must be an integer greater than 0.") + + return int(dim) + + def rvs(self, dim, size=None, random_state=None): + """Draw random samples from S(N-1). + + Parameters + ---------- + dim : integer + Dimension of space (N). + size : int or tuple of ints, optional + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. + Because each sample is N-dimensional, the output shape + is (m,n,k,N). If no shape is specified, a single (N-D) + sample is returned. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is ``None`` (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + rvs : ndarray + Random direction vectors + + """ + random_state = self._get_random_state(random_state) + if size is None: + size = np.array([], dtype=int) + size = np.atleast_1d(size) + + dim = self._process_parameters(dim) + + samples = _sample_uniform_direction(dim, size, random_state) + return samples + + +uniform_direction = uniform_direction_gen() + + +class uniform_direction_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen n-dimensional uniform direction distribution. + + Parameters + ---------- + dim : int + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import uniform_direction + >>> x = uniform_direction(3) + >>> x.rvs() + + """ + self._dist = uniform_direction_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=None, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +def _sample_uniform_direction(dim, size, random_state): + """ + Private method to generate uniform directions + Reference: Marsaglia, G. (1972). "Choosing a Point from the Surface of a + Sphere". Annals of Mathematical Statistics. 43 (2): 645-646. + """ + samples_shape = np.append(size, dim) + samples = random_state.standard_normal(samples_shape) + samples /= np.linalg.norm(samples, axis=-1, keepdims=True) + return samples + + +_dirichlet_mn_doc_default_callparams = """\ +alpha : array_like + The concentration parameters. The number of entries along the last axis + determines the dimensionality of the distribution. Each entry must be + strictly positive. +n : int or array_like + The number of trials. Each element must be a strictly positive integer. +""" + +_dirichlet_mn_doc_frozen_callparams = "" + +_dirichlet_mn_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +dirichlet_mn_docdict_params = { + '_dirichlet_mn_doc_default_callparams': _dirichlet_mn_doc_default_callparams, + '_doc_random_state': _doc_random_state +} + +dirichlet_mn_docdict_noparams = { + '_dirichlet_mn_doc_default_callparams': _dirichlet_mn_doc_frozen_callparams, + '_doc_random_state': _doc_random_state +} + + +def _dirichlet_multinomial_check_parameters(alpha, n, x=None): + + alpha = np.asarray(alpha) + n = np.asarray(n) + + if x is not None: + # Ensure that `x` and `alpha` are arrays. If the shapes are + # incompatible, NumPy will raise an appropriate error. + try: + x, alpha = np.broadcast_arrays(x, alpha) + except ValueError as e: + msg = "`x` and `alpha` must be broadcastable." + raise ValueError(msg) from e + + x_int = np.floor(x) + if np.any(x < 0) or np.any(x != x_int): + raise ValueError("`x` must contain only non-negative integers.") + x = x_int + + if np.any(alpha <= 0): + raise ValueError("`alpha` must contain only positive values.") + + n_int = np.floor(n) + if np.any(n <= 0) or np.any(n != n_int): + raise ValueError("`n` must be a positive integer.") + n = n_int + + sum_alpha = np.sum(alpha, axis=-1) + sum_alpha, n = np.broadcast_arrays(sum_alpha, n) + + return (alpha, sum_alpha, n) if x is None else (alpha, sum_alpha, n, x) + + +class dirichlet_multinomial_gen(multi_rv_generic): + r"""A Dirichlet multinomial random variable. + + The Dirichlet multinomial distribution is a compound probability + distribution: it is the multinomial distribution with number of trials + `n` and class probabilities ``p`` randomly sampled from a Dirichlet + distribution with concentration parameters ``alpha``. + + Methods + ------- + logpmf(x, alpha, n): + Log of the probability mass function. + pmf(x, alpha, n): + Probability mass function. + mean(alpha, n): + Mean of the Dirichlet multinomial distribution. + var(alpha, n): + Variance of the Dirichlet multinomial distribution. + cov(alpha, n): + The covariance of the Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + %(_doc_random_state)s + + See Also + -------- + scipy.stats.dirichlet : The dirichlet distribution. + scipy.stats.multinomial : The multinomial distribution. + + References + ---------- + .. [1] Dirichlet-multinomial distribution, Wikipedia, + https://www.wikipedia.org/wiki/Dirichlet-multinomial_distribution + + Examples + -------- + >>> from scipy.stats import dirichlet_multinomial + + Get the PMF + + >>> n = 6 # number of trials + >>> alpha = [3, 4, 5] # concentration parameters + >>> x = [1, 2, 3] # counts + >>> dirichlet_multinomial.pmf(x, alpha, n) + 0.08484162895927604 + + If the sum of category counts does not equal the number of trials, + the probability mass is zero. + + >>> dirichlet_multinomial.pmf(x, alpha, n=7) + 0.0 + + Get the log of the PMF + + >>> dirichlet_multinomial.logpmf(x, alpha, n) + -2.4669689491013327 + + Get the mean + + >>> dirichlet_multinomial.mean(alpha, n) + array([1.5, 2. , 2.5]) + + Get the variance + + >>> dirichlet_multinomial.var(alpha, n) + array([1.55769231, 1.84615385, 2.01923077]) + + Get the covariance + + >>> dirichlet_multinomial.cov(alpha, n) + array([[ 1.55769231, -0.69230769, -0.86538462], + [-0.69230769, 1.84615385, -1.15384615], + [-0.86538462, -1.15384615, 2.01923077]]) + + Alternatively, the object may be called (as a function) to fix the + `alpha` and `n` parameters, returning a "frozen" Dirichlet multinomial + random variable. + + >>> dm = dirichlet_multinomial(alpha, n) + >>> dm.pmf(x) + 0.08484162895927579 + + All methods are fully vectorized. Each element of `x` and `alpha` is + a vector (along the last axis), each element of `n` is an + integer (scalar), and the result is computed element-wise. + + >>> x = [[1, 2, 3], [4, 5, 6]] + >>> alpha = [[1, 2, 3], [4, 5, 6]] + >>> n = [6, 15] + >>> dirichlet_multinomial.pmf(x, alpha, n) + array([0.06493506, 0.02626937]) + + >>> dirichlet_multinomial.cov(alpha, n).shape # both covariance matrices + (2, 3, 3) + + Broadcasting according to standard NumPy conventions is supported. Here, + we have four sets of concentration parameters (each a two element vector) + for each of three numbers of trials (each a scalar). + + >>> alpha = [[3, 4], [4, 5], [5, 6], [6, 7]] + >>> n = [[6], [7], [8]] + >>> dirichlet_multinomial.mean(alpha, n).shape + (3, 4, 2) + + """ + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, + dirichlet_mn_docdict_params) + + def __call__(self, alpha, n, seed=None): + return dirichlet_multinomial_frozen(alpha, n, seed=seed) + + def logpmf(self, x, alpha, n): + """The log of the probability mass function. + + Parameters + ---------- + x: ndarray + Category counts (non-negative integers). Must be broadcastable + with shape parameter ``alpha``. If multidimensional, the last axis + must correspond with the categories. + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: ndarray or scalar + Log of the probability mass function. + + """ + + a, Sa, n, x = _dirichlet_multinomial_check_parameters(alpha, n, x) + + out = np.asarray(loggamma(Sa) + loggamma(n + 1) - loggamma(n + Sa)) + out += (loggamma(x + a) - (loggamma(a) + loggamma(x + 1))).sum(axis=-1) + np.place(out, n != x.sum(axis=-1), -np.inf) + return out[()] + + def pmf(self, x, alpha, n): + """Probability mass function for a Dirichlet multinomial distribution. + + Parameters + ---------- + x: ndarray + Category counts (non-negative integers). Must be broadcastable + with shape parameter ``alpha``. If multidimensional, the last axis + must correspond with the categories. + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: ndarray or scalar + Probability mass function. + + """ + return np.exp(self.logpmf(x, alpha, n)) + + def mean(self, alpha, n): + """Mean of a Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: ndarray + Mean of a Dirichlet multinomial distribution. + + """ + a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + n, Sa = n[..., np.newaxis], Sa[..., np.newaxis] + return n * a / Sa + + def var(self, alpha, n): + """The variance of the Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: array_like + The variances of the components of the distribution. This is + the diagonal of the covariance matrix of the distribution. + + """ + a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + n, Sa = n[..., np.newaxis], Sa[..., np.newaxis] + return n * a / Sa * (1 - a/Sa) * (n + Sa) / (1 + Sa) + + def cov(self, alpha, n): + """Covariance matrix of a Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out : array_like + The covariance matrix of the distribution. + + """ + a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + var = dirichlet_multinomial.var(a, n) + + n, Sa = n[..., np.newaxis, np.newaxis], Sa[..., np.newaxis, np.newaxis] + aiaj = a[..., :, np.newaxis] * a[..., np.newaxis, :] + cov = -n * aiaj / Sa ** 2 * (n + Sa) / (1 + Sa) + + ii = np.arange(cov.shape[-1]) + cov[..., ii, ii] = var + return cov + + +dirichlet_multinomial = dirichlet_multinomial_gen() + + +class dirichlet_multinomial_frozen(multi_rv_frozen): + def __init__(self, alpha, n, seed=None): + alpha, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + self.alpha = alpha + self.n = n + self._dist = dirichlet_multinomial_gen(seed) + + def logpmf(self, x): + return self._dist.logpmf(x, self.alpha, self.n) + + def pmf(self, x): + return self._dist.pmf(x, self.alpha, self.n) + + def mean(self): + return self._dist.mean(self.alpha, self.n) + + def var(self): + return self._dist.var(self.alpha, self.n) + + def cov(self): + return self._dist.cov(self.alpha, self.n) + + +# Set frozen generator docstrings from corresponding docstrings in +# dirichlet_multinomial and fill in default strings in class docstrings. +for name in ['logpmf', 'pmf', 'mean', 'var', 'cov']: + method = dirichlet_multinomial_gen.__dict__[name] + method_frozen = dirichlet_multinomial_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, dirichlet_mn_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + dirichlet_mn_docdict_params) + + +class vonmises_fisher_gen(multi_rv_generic): + r"""A von Mises-Fisher variable. + + The `mu` keyword specifies the mean direction vector. The `kappa` keyword + specifies the concentration parameter. + + Methods + ------- + pdf(x, mu=None, kappa=1) + Probability density function. + logpdf(x, mu=None, kappa=1) + Log of the probability density function. + rvs(mu=None, kappa=1, size=1, random_state=None) + Draw random samples from a von Mises-Fisher distribution. + entropy(mu=None, kappa=1) + Compute the differential entropy of the von Mises-Fisher distribution. + fit(data) + Fit a von Mises-Fisher distribution to data. + + Parameters + ---------- + mu : array_like + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float + Concentration parameter. Must be positive. + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + See Also + -------- + scipy.stats.vonmises : Von-Mises Fisher distribution in 2D on a circle + uniform_direction : uniform distribution on the surface of a hypersphere + + Notes + ----- + The von Mises-Fisher distribution is a directional distribution on the + surface of the unit hypersphere. The probability density + function of a unit vector :math:`\mathbf{x}` is + + .. math:: + + f(\mathbf{x}) = \frac{\kappa^{d/2-1}}{(2\pi)^{d/2}I_{d/2-1}(\kappa)} + \exp\left(\kappa \mathbf{\mu}^T\mathbf{x}\right), + + where :math:`\mathbf{\mu}` is the mean direction, :math:`\kappa` the + concentration parameter, :math:`d` the dimension and :math:`I` the + modified Bessel function of the first kind. As :math:`\mu` represents + a direction, it must be a unit vector or in other words, a point + on the hypersphere: :math:`\mathbf{\mu}\in S^{d-1}`. :math:`\kappa` is a + concentration parameter, which means that it must be positive + (:math:`\kappa>0`) and that the distribution becomes more narrow with + increasing :math:`\kappa`. In that sense, the reciprocal value + :math:`1/\kappa` resembles the variance parameter of the normal + distribution. + + The von Mises-Fisher distribution often serves as an analogue of the + normal distribution on the sphere. Intuitively, for unit vectors, a + useful distance measure is given by the angle :math:`\alpha` between + them. This is exactly what the scalar product + :math:`\mathbf{\mu}^T\mathbf{x}=\cos(\alpha)` in the + von Mises-Fisher probability density function describes: the angle + between the mean direction :math:`\mathbf{\mu}` and the vector + :math:`\mathbf{x}`. The larger the angle between them, the smaller the + probability to observe :math:`\mathbf{x}` for this particular mean + direction :math:`\mathbf{\mu}`. + + In dimensions 2 and 3, specialized algorithms are used for fast sampling + [2]_, [3]_. For dimensions of 4 or higher the rejection sampling algorithm + described in [4]_ is utilized. This implementation is partially based on + the geomstats package [5]_, [6]_. + + .. versionadded:: 1.11 + + References + ---------- + .. [1] Von Mises-Fisher distribution, Wikipedia, + https://en.wikipedia.org/wiki/Von_Mises%E2%80%93Fisher_distribution + .. [2] Mardia, K., and Jupp, P. Directional statistics. Wiley, 2000. + .. [3] J. Wenzel. Numerically stable sampling of the von Mises Fisher + distribution on S2. + https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf + .. [4] Wood, A. Simulation of the von mises fisher distribution. + Communications in statistics-simulation and computation 23, + 1 (1994), 157-164. https://doi.org/10.1080/03610919408813161 + .. [5] geomstats, Github. MIT License. Accessed: 06.01.2023. + https://github.com/geomstats/geomstats + .. [6] Miolane, N. et al. Geomstats: A Python Package for Riemannian + Geometry in Machine Learning. Journal of Machine Learning Research + 21 (2020). http://jmlr.org/papers/v21/19-027.html + + Examples + -------- + **Visualization of the probability density** + + Plot the probability density in three dimensions for increasing + concentration parameter. The density is calculated by the ``pdf`` + method. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import vonmises_fisher + >>> from matplotlib.colors import Normalize + >>> n_grid = 100 + >>> u = np.linspace(0, np.pi, n_grid) + >>> v = np.linspace(0, 2 * np.pi, n_grid) + >>> u_grid, v_grid = np.meshgrid(u, v) + >>> vertices = np.stack([np.cos(v_grid) * np.sin(u_grid), + ... np.sin(v_grid) * np.sin(u_grid), + ... np.cos(u_grid)], + ... axis=2) + >>> x = np.outer(np.cos(v), np.sin(u)) + >>> y = np.outer(np.sin(v), np.sin(u)) + >>> z = np.outer(np.ones_like(u), np.cos(u)) + >>> def plot_vmf_density(ax, x, y, z, vertices, mu, kappa): + ... vmf = vonmises_fisher(mu, kappa) + ... pdf_values = vmf.pdf(vertices) + ... pdfnorm = Normalize(vmin=pdf_values.min(), vmax=pdf_values.max()) + ... ax.plot_surface(x, y, z, rstride=1, cstride=1, + ... facecolors=plt.cm.viridis(pdfnorm(pdf_values)), + ... linewidth=0) + ... ax.set_aspect('equal') + ... ax.view_init(azim=-130, elev=0) + ... ax.axis('off') + ... ax.set_title(rf"$\kappa={kappa}$") + >>> fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9, 4), + ... subplot_kw={"projection": "3d"}) + >>> left, middle, right = axes + >>> mu = np.array([-np.sqrt(0.5), -np.sqrt(0.5), 0]) + >>> plot_vmf_density(left, x, y, z, vertices, mu, 5) + >>> plot_vmf_density(middle, x, y, z, vertices, mu, 20) + >>> plot_vmf_density(right, x, y, z, vertices, mu, 100) + >>> plt.subplots_adjust(top=1, bottom=0.0, left=0.0, right=1.0, wspace=0.) + >>> plt.show() + + As we increase the concentration parameter, the points are getting more + clustered together around the mean direction. + + **Sampling** + + Draw 5 samples from the distribution using the ``rvs`` method resulting + in a 5x3 array. + + >>> rng = np.random.default_rng() + >>> mu = np.array([0, 0, 1]) + >>> samples = vonmises_fisher(mu, 20).rvs(5, random_state=rng) + >>> samples + array([[ 0.3884594 , -0.32482588, 0.86231516], + [ 0.00611366, -0.09878289, 0.99509023], + [-0.04154772, -0.01637135, 0.99900239], + [-0.14613735, 0.12553507, 0.98126695], + [-0.04429884, -0.23474054, 0.97104814]]) + + These samples are unit vectors on the sphere :math:`S^2`. To verify, + let us calculate their euclidean norms: + + >>> np.linalg.norm(samples, axis=1) + array([1., 1., 1., 1., 1.]) + + Plot 20 observations drawn from the von Mises-Fisher distribution for + increasing concentration parameter :math:`\kappa`. The red dot highlights + the mean direction :math:`\mu`. + + >>> def plot_vmf_samples(ax, x, y, z, mu, kappa): + ... vmf = vonmises_fisher(mu, kappa) + ... samples = vmf.rvs(20) + ... ax.plot_surface(x, y, z, rstride=1, cstride=1, linewidth=0, + ... alpha=0.2) + ... ax.scatter(samples[:, 0], samples[:, 1], samples[:, 2], c='k', s=5) + ... ax.scatter(mu[0], mu[1], mu[2], c='r', s=30) + ... ax.set_aspect('equal') + ... ax.view_init(azim=-130, elev=0) + ... ax.axis('off') + ... ax.set_title(rf"$\kappa={kappa}$") + >>> mu = np.array([-np.sqrt(0.5), -np.sqrt(0.5), 0]) + >>> fig, axes = plt.subplots(nrows=1, ncols=3, + ... subplot_kw={"projection": "3d"}, + ... figsize=(9, 4)) + >>> left, middle, right = axes + >>> plot_vmf_samples(left, x, y, z, mu, 5) + >>> plot_vmf_samples(middle, x, y, z, mu, 20) + >>> plot_vmf_samples(right, x, y, z, mu, 100) + >>> plt.subplots_adjust(top=1, bottom=0.0, left=0.0, + ... right=1.0, wspace=0.) + >>> plt.show() + + The plots show that with increasing concentration :math:`\kappa` the + resulting samples are centered more closely around the mean direction. + + **Fitting the distribution parameters** + + The distribution can be fitted to data using the ``fit`` method returning + the estimated parameters. As a toy example let's fit the distribution to + samples drawn from a known von Mises-Fisher distribution. + + >>> mu, kappa = np.array([0, 0, 1]), 20 + >>> samples = vonmises_fisher(mu, kappa).rvs(1000, random_state=rng) + >>> mu_fit, kappa_fit = vonmises_fisher.fit(samples) + >>> mu_fit, kappa_fit + (array([0.01126519, 0.01044501, 0.99988199]), 19.306398751730995) + + We see that the estimated parameters `mu_fit` and `kappa_fit` are + very close to the ground truth parameters. + + """ + def __init__(self, seed=None): + super().__init__(seed) + + def __call__(self, mu=None, kappa=1, seed=None): + """Create a frozen von Mises-Fisher distribution. + + See `vonmises_fisher_frozen` for more information. + """ + return vonmises_fisher_frozen(mu, kappa, seed=seed) + + def _process_parameters(self, mu, kappa): + """ + Infer dimensionality from mu and ensure that mu is a one-dimensional + unit vector and kappa positive. + """ + mu = np.asarray(mu) + if mu.ndim > 1: + raise ValueError("'mu' must have one-dimensional shape.") + if not np.allclose(np.linalg.norm(mu), 1.): + raise ValueError("'mu' must be a unit vector of norm 1.") + if not mu.size > 1: + raise ValueError("'mu' must have at least two entries.") + kappa_error_msg = "'kappa' must be a positive scalar." + if not np.isscalar(kappa) or kappa < 0: + raise ValueError(kappa_error_msg) + if float(kappa) == 0.: + raise ValueError("For 'kappa=0' the von Mises-Fisher distribution " + "becomes the uniform distribution on the sphere " + "surface. Consider using " + "'scipy.stats.uniform_direction' instead.") + dim = mu.size + + return dim, mu, kappa + + def _check_data_vs_dist(self, x, dim): + if x.shape[-1] != dim: + raise ValueError("The dimensionality of the last axis of 'x' must " + "match the dimensionality of the " + "von Mises Fisher distribution.") + if not np.allclose(np.linalg.norm(x, axis=-1), 1.): + msg = "'x' must be unit vectors of norm 1 along last dimension." + raise ValueError(msg) + + def _log_norm_factor(self, dim, kappa): + # normalization factor is given by + # c = kappa**(dim/2-1)/((2*pi)**(dim/2)*I[dim/2-1](kappa)) + # = kappa**(dim/2-1)*exp(-kappa) / + # ((2*pi)**(dim/2)*I[dim/2-1](kappa)*exp(-kappa) + # = kappa**(dim/2-1)*exp(-kappa) / + # ((2*pi)**(dim/2)*ive[dim/2-1](kappa) + # Then the log is given by + # log c = 1/2*(dim -1)*log(kappa) - kappa - -1/2*dim*ln(2*pi) - + # ive[dim/2-1](kappa) + halfdim = 0.5 * dim + return (0.5 * (dim - 2)*np.log(kappa) - halfdim * _LOG_2PI - + np.log(ive(halfdim - 1, kappa)) - kappa) + + def _logpdf(self, x, dim, mu, kappa): + """Log of the von Mises-Fisher probability density function. + + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + x = np.asarray(x) + self._check_data_vs_dist(x, dim) + dotproducts = np.einsum('i,...i->...', mu, x) + return self._log_norm_factor(dim, kappa) + kappa * dotproducts + + def logpdf(self, x, mu=None, kappa=1): + """Log of the von Mises-Fisher probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + mu : array_like, default: None + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float, default: 1 + Concentration parameter. Must be positive. + + Returns + ------- + logpdf : ndarray or scalar + Log of the probability density function evaluated at `x`. + + """ + dim, mu, kappa = self._process_parameters(mu, kappa) + return self._logpdf(x, dim, mu, kappa) + + def pdf(self, x, mu=None, kappa=1): + """Von Mises-Fisher probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + mu : array_like + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float + Concentration parameter. Must be positive. + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x`. + + """ + dim, mu, kappa = self._process_parameters(mu, kappa) + return np.exp(self._logpdf(x, dim, mu, kappa)) + + def _rvs_2d(self, mu, kappa, size, random_state): + """ + In 2D, the von Mises-Fisher distribution reduces to the + von Mises distribution which can be efficiently sampled by numpy. + This method is much faster than the general rejection + sampling based algorithm. + + """ + mean_angle = np.arctan2(mu[1], mu[0]) + angle_samples = random_state.vonmises(mean_angle, kappa, size=size) + samples = np.stack([np.cos(angle_samples), np.sin(angle_samples)], + axis=-1) + return samples + + def _rvs_3d(self, kappa, size, random_state): + """ + Generate samples from a von Mises-Fisher distribution + with mu = [1, 0, 0] and kappa. Samples then have to be + rotated towards the desired mean direction mu. + This method is much faster than the general rejection + sampling based algorithm. + Reference: https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf + + """ + if size is None: + sample_size = 1 + else: + sample_size = size + + # compute x coordinate acc. to equation from section 3.1 + x = random_state.random(sample_size) + x = 1. + np.log(x + (1. - x) * np.exp(-2 * kappa))/kappa + + # (y, z) are random 2D vectors that only have to be + # normalized accordingly. Then (x, y z) follow a VMF distribution + temp = np.sqrt(1. - np.square(x)) + uniformcircle = _sample_uniform_direction(2, sample_size, random_state) + samples = np.stack([x, temp * uniformcircle[..., 0], + temp * uniformcircle[..., 1]], + axis=-1) + if size is None: + samples = np.squeeze(samples) + return samples + + def _rejection_sampling(self, dim, kappa, size, random_state): + """ + Generate samples from a n-dimensional von Mises-Fisher distribution + with mu = [1, 0, ..., 0] and kappa via rejection sampling. + Samples then have to be rotated towards the desired mean direction mu. + Reference: https://doi.org/10.1080/03610919408813161 + """ + dim_minus_one = dim - 1 + # calculate number of requested samples + if size is not None: + if not np.iterable(size): + size = (size, ) + n_samples = math.prod(size) + else: + n_samples = 1 + # calculate envelope for rejection sampler (eq. 4) + sqrt = np.sqrt(4 * kappa ** 2. + dim_minus_one ** 2) + envelop_param = (-2 * kappa + sqrt) / dim_minus_one + if envelop_param == 0: + # the regular formula suffers from loss of precision for high + # kappa. This can only be detected by checking for 0 here. + # Workaround: expansion for sqrt variable + # https://www.wolframalpha.com/input?i=sqrt%284*x%5E2%2Bd%5E2%29 + # e = (-2 * k + sqrt(k**2 + d**2)) / d + # ~ (-2 * k + 2 * k + d**2/(4 * k) - d**4/(64 * k**3)) / d + # = d/(4 * k) - d**3/(64 * k**3) + envelop_param = (dim_minus_one/4 * kappa**-1. + - dim_minus_one**3/64 * kappa**-3.) + # reference step 0 + node = (1. - envelop_param) / (1. + envelop_param) + # t = ln(1 - ((1-x)/(1+x))**2) + # = ln(4 * x / (1+x)**2) + # = ln(4) + ln(x) - 2*log1p(x) + correction = (kappa * node + dim_minus_one + * (np.log(4) + np.log(envelop_param) + - 2 * np.log1p(envelop_param))) + n_accepted = 0 + x = np.zeros((n_samples, )) + halfdim = 0.5 * dim_minus_one + # main loop + while n_accepted < n_samples: + # generate candidates acc. to reference step 1 + sym_beta = random_state.beta(halfdim, halfdim, + size=n_samples - n_accepted) + coord_x = (1 - (1 + envelop_param) * sym_beta) / ( + 1 - (1 - envelop_param) * sym_beta) + # accept or reject: reference step 2 + # reformulation for numerical stability: + # t = ln(1 - (1-x)/(1+x) * y) + # = ln((1 + x - y +x*y)/(1 +x)) + accept_tol = random_state.random(n_samples - n_accepted) + criterion = ( + kappa * coord_x + + dim_minus_one * (np.log((1 + envelop_param - coord_x + + coord_x * envelop_param) / (1 + envelop_param))) + - correction) > np.log(accept_tol) + accepted_iter = np.sum(criterion) + x[n_accepted:n_accepted + accepted_iter] = coord_x[criterion] + n_accepted += accepted_iter + # concatenate x and remaining coordinates: step 3 + coord_rest = _sample_uniform_direction(dim_minus_one, n_accepted, + random_state) + coord_rest = np.einsum( + '...,...i->...i', np.sqrt(1 - x ** 2), coord_rest) + samples = np.concatenate([x[..., None], coord_rest], axis=1) + # reshape output to (size, dim) + if size is not None: + samples = samples.reshape(size + (dim, )) + else: + samples = np.squeeze(samples) + return samples + + def _rotate_samples(self, samples, mu, dim): + """A QR decomposition is used to find the rotation that maps the + north pole (1, 0,...,0) to the vector mu. This rotation is then + applied to all samples. + + Parameters + ---------- + samples: array_like, shape = [..., n] + mu : array-like, shape=[n, ] + Point to parametrise the rotation. + + Returns + ------- + samples : rotated samples + + """ + base_point = np.zeros((dim, )) + base_point[0] = 1. + embedded = np.concatenate([mu[None, :], np.zeros((dim - 1, dim))]) + rotmatrix, _ = np.linalg.qr(np.transpose(embedded)) + if np.allclose(np.matmul(rotmatrix, base_point[:, None])[:, 0], mu): + rotsign = 1 + else: + rotsign = -1 + + # apply rotation + samples = np.einsum('ij,...j->...i', rotmatrix, samples) * rotsign + return samples + + def _rvs(self, dim, mu, kappa, size, random_state): + if dim == 2: + samples = self._rvs_2d(mu, kappa, size, random_state) + elif dim == 3: + samples = self._rvs_3d(kappa, size, random_state) + else: + samples = self._rejection_sampling(dim, kappa, size, + random_state) + + if dim != 2: + samples = self._rotate_samples(samples, mu, dim) + return samples + + def rvs(self, mu=None, kappa=1, size=1, random_state=None): + """Draw random samples from a von Mises-Fisher distribution. + + Parameters + ---------- + mu : array_like + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float + Concentration parameter. Must be positive. + size : int or tuple of ints, optional + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. + Because each sample is N-dimensional, the output shape + is (m,n,k,N). If no shape is specified, a single (N-D) + sample is returned. + random_state : {None, int, np.random.RandomState, np.random.Generator}, + optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`, `N`), where `N` is the + dimension of the distribution. + + """ + dim, mu, kappa = self._process_parameters(mu, kappa) + random_state = self._get_random_state(random_state) + samples = self._rvs(dim, mu, kappa, size, random_state) + return samples + + def _entropy(self, dim, kappa): + halfdim = 0.5 * dim + return (-self._log_norm_factor(dim, kappa) - kappa * + ive(halfdim, kappa) / ive(halfdim - 1, kappa)) + + def entropy(self, mu=None, kappa=1): + """Compute the differential entropy of the von Mises-Fisher + distribution. + + Parameters + ---------- + mu : array_like, default: None + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float, default: 1 + Concentration parameter. Must be positive. + + Returns + ------- + h : scalar + Entropy of the von Mises-Fisher distribution. + + """ + dim, _, kappa = self._process_parameters(mu, kappa) + return self._entropy(dim, kappa) + + def fit(self, x): + """Fit the von Mises-Fisher distribution to data. + + Parameters + ---------- + x : array-like + Data the distribution is fitted to. Must be two dimensional. + The second axis of `x` must be unit vectors of norm 1 and + determine the dimensionality of the fitted + von Mises-Fisher distribution. + + Returns + ------- + mu : ndarray + Estimated mean direction. + kappa : float + Estimated concentration parameter. + + """ + # validate input data + x = np.asarray(x) + if x.ndim != 2: + raise ValueError("'x' must be two dimensional.") + if not np.allclose(np.linalg.norm(x, axis=-1), 1.): + msg = "'x' must be unit vectors of norm 1 along last dimension." + raise ValueError(msg) + dim = x.shape[-1] + + # mu is simply the directional mean + dirstats = directional_stats(x) + mu = dirstats.mean_direction + r = dirstats.mean_resultant_length + + # kappa is the solution to the equation: + # r = I[dim/2](kappa) / I[dim/2 -1](kappa) + # = I[dim/2](kappa) * exp(-kappa) / I[dim/2 -1](kappa) * exp(-kappa) + # = ive(dim/2, kappa) / ive(dim/2 -1, kappa) + + halfdim = 0.5 * dim + + def solve_for_kappa(kappa): + bessel_vals = ive([halfdim, halfdim - 1], kappa) + return bessel_vals[0]/bessel_vals[1] - r + + root_res = root_scalar(solve_for_kappa, method="brentq", + bracket=(1e-8, 1e9)) + kappa = root_res.root + return mu, kappa + + +vonmises_fisher = vonmises_fisher_gen() + + +class vonmises_fisher_frozen(multi_rv_frozen): + def __init__(self, mu=None, kappa=1, seed=None): + """Create a frozen von Mises-Fisher distribution. + + Parameters + ---------- + mu : array_like, default: None + Mean direction of the distribution. + kappa : float, default: 1 + Concentration parameter. Must be positive. + seed : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + """ + self._dist = vonmises_fisher_gen(seed) + self.dim, self.mu, self.kappa = ( + self._dist._process_parameters(mu, kappa) + ) + + def logpdf(self, x): + """ + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + + Returns + ------- + logpdf : ndarray or scalar + Log of probability density function evaluated at `x`. + + """ + return self._dist._logpdf(x, self.dim, self.mu, self.kappa) + + def pdf(self, x): + """ + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x`. + + """ + return np.exp(self.logpdf(x)) + + def rvs(self, size=1, random_state=None): + """Draw random variates from the Von Mises-Fisher distribution. + + Parameters + ---------- + size : int or tuple of ints, optional + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. + Because each sample is N-dimensional, the output shape + is (m,n,k,N). If no shape is specified, a single (N-D) + sample is returned. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the distribution. + + """ + random_state = self._dist._get_random_state(random_state) + return self._dist._rvs(self.dim, self.mu, self.kappa, size, + random_state) + + def entropy(self): + """ + Calculate the differential entropy of the von Mises-Fisher + distribution. + + Returns + ------- + h: float + Entropy of the Von Mises-Fisher distribution. + + """ + return self._dist._entropy(self.dim, self.kappa) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_mvn.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/stats/_mvn.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..53d29d580d16a775b3c81871ef9b48c9ce31527a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/stats/_mvn.cpython-310-x86_64-linux-gnu.so differ diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py b/parrot/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py new file mode 100644 index 0000000000000000000000000000000000000000..baf2e70c9f0fb7d31eebe9b081a22f8446e49780 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py @@ -0,0 +1,482 @@ +import numpy as np + +from scipy.special import ndtri +from scipy.optimize import brentq +from ._discrete_distns import nchypergeom_fisher +from ._common import ConfidenceInterval + + +def _sample_odds_ratio(table): + """ + Given a table [[a, b], [c, d]], compute a*d/(b*c). + + Return nan if the numerator and denominator are 0. + Return inf if just the denominator is 0. + """ + # table must be a 2x2 numpy array. + if table[1, 0] > 0 and table[0, 1] > 0: + oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1]) + elif table[0, 0] == 0 or table[1, 1] == 0: + oddsratio = np.nan + else: + oddsratio = np.inf + return oddsratio + + +def _solve(func): + """ + Solve func(nc) = 0. func must be an increasing function. + """ + # We could just as well call the variable `x` instead of `nc`, but we + # always call this function with functions for which nc (the noncentrality + # parameter) is the variable for which we are solving. + nc = 1.0 + value = func(nc) + if value == 0: + return nc + + # Multiplicative factor by which to increase or decrease nc when + # searching for a bracketing interval. + factor = 2.0 + # Find a bracketing interval. + if value > 0: + nc /= factor + while func(nc) > 0: + nc /= factor + lo = nc + hi = factor*nc + else: + nc *= factor + while func(nc) < 0: + nc *= factor + lo = nc/factor + hi = nc + + # lo and hi bracket the solution for nc. + nc = brentq(func, lo, hi, xtol=1e-13) + return nc + + +def _nc_hypergeom_mean_inverse(x, M, n, N): + """ + For the given noncentral hypergeometric parameters x, M, n,and N + (table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2 + contingency table), find the noncentrality parameter of Fisher's + noncentral hypergeometric distribution whose mean is x. + """ + nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x) + return nc + + +def _hypergeom_params_from_table(table): + # The notation M, n and N is consistent with stats.hypergeom and + # stats.nchypergeom_fisher. + x = table[0, 0] + M = table.sum() + n = table[0].sum() + N = table[:, 0].sum() + return x, M, n, N + + +def _ci_upper(table, alpha): + """ + Compute the upper end of the confidence interval. + """ + if _sample_odds_ratio(table) == np.inf: + return np.inf + + x, M, n, N = _hypergeom_params_from_table(table) + + # nchypergeom_fisher.cdf is a decreasing function of nc, so we negate + # it in the lambda expression. + nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha) + return nc + + +def _ci_lower(table, alpha): + """ + Compute the lower end of the confidence interval. + """ + if _sample_odds_ratio(table) == 0: + return 0 + + x, M, n, N = _hypergeom_params_from_table(table) + + nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha) + return nc + + +def _conditional_oddsratio(table): + """ + Conditional MLE of the odds ratio for the 2x2 contingency table. + """ + x, M, n, N = _hypergeom_params_from_table(table) + # Get the bounds of the support. The support of the noncentral + # hypergeometric distribution with parameters M, n, and N is the same + # for all values of the noncentrality parameter, so we can use 1 here. + lo, hi = nchypergeom_fisher.support(M, n, N, 1) + + # Check if x is at one of the extremes of the support. If so, we know + # the odds ratio is either 0 or inf. + if x == lo: + # x is at the low end of the support. + return 0 + if x == hi: + # x is at the high end of the support. + return np.inf + + nc = _nc_hypergeom_mean_inverse(x, M, n, N) + return nc + + +def _conditional_oddsratio_ci(table, confidence_level=0.95, + alternative='two-sided'): + """ + Conditional exact confidence interval for the odds ratio. + """ + if alternative == 'two-sided': + alpha = 0.5*(1 - confidence_level) + lower = _ci_lower(table, alpha) + upper = _ci_upper(table, alpha) + elif alternative == 'less': + lower = 0.0 + upper = _ci_upper(table, 1 - confidence_level) + else: + # alternative == 'greater' + lower = _ci_lower(table, 1 - confidence_level) + upper = np.inf + + return lower, upper + + +def _sample_odds_ratio_ci(table, confidence_level=0.95, + alternative='two-sided'): + oddsratio = _sample_odds_ratio(table) + log_or = np.log(oddsratio) + se = np.sqrt((1/table).sum()) + if alternative == 'less': + z = ndtri(confidence_level) + loglow = -np.inf + loghigh = log_or + z*se + elif alternative == 'greater': + z = ndtri(confidence_level) + loglow = log_or - z*se + loghigh = np.inf + else: + # alternative is 'two-sided' + z = ndtri(0.5*confidence_level + 0.5) + loglow = log_or - z*se + loghigh = log_or + z*se + + return np.exp(loglow), np.exp(loghigh) + + +class OddsRatioResult: + """ + Result of `scipy.stats.contingency.odds_ratio`. See the + docstring for `odds_ratio` for more details. + + Attributes + ---------- + statistic : float + The computed odds ratio. + + * If `kind` is ``'sample'``, this is sample (or unconditional) + estimate, given by + ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. + * If `kind` is ``'conditional'``, this is the conditional + maximum likelihood estimate for the odds ratio. It is + the noncentrality parameter of Fisher's noncentral + hypergeometric distribution with the same hypergeometric + parameters as `table` and whose mean is ``table[0, 0]``. + + Methods + ------- + confidence_interval : + Confidence interval for the odds ratio. + """ + + def __init__(self, _table, _kind, statistic): + # for now, no need to make _table and _kind public, since this sort of + # information is returned in very few `scipy.stats` results + self._table = _table + self._kind = _kind + self.statistic = statistic + + def __repr__(self): + return f"OddsRatioResult(statistic={self.statistic})" + + def confidence_interval(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the odds ratio. + + Parameters + ---------- + confidence_level: float + Desired confidence level for the confidence interval. + The value must be given as a fraction between 0 and 1. + Default is 0.95 (meaning 95%). + + alternative : {'two-sided', 'less', 'greater'}, optional + The alternative hypothesis of the hypothesis test to which the + confidence interval corresponds. That is, suppose the null + hypothesis is that the true odds ratio equals ``OR`` and the + confidence interval is ``(low, high)``. Then the following options + for `alternative` are available (default is 'two-sided'): + + * 'two-sided': the true odds ratio is not equal to ``OR``. There + is evidence against the null hypothesis at the chosen + `confidence_level` if ``high < OR`` or ``low > OR``. + * 'less': the true odds ratio is less than ``OR``. The ``low`` end + of the confidence interval is 0, and there is evidence against + the null hypothesis at the chosen `confidence_level` if + ``high < OR``. + * 'greater': the true odds ratio is greater than ``OR``. The + ``high`` end of the confidence interval is ``np.inf``, and there + is evidence against the null hypothesis at the chosen + `confidence_level` if ``low > OR``. + + Returns + ------- + ci : ``ConfidenceInterval`` instance + The confidence interval, represented as an object with + attributes ``low`` and ``high``. + + Notes + ----- + When `kind` is ``'conditional'``, the limits of the confidence + interval are the conditional "exact confidence limits" as described + by Fisher [1]_. The conditional odds ratio and confidence interval are + also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_. + + When `kind` is ``'sample'``, the confidence interval is computed + under the assumption that the logarithm of the odds ratio is normally + distributed with standard error given by:: + + se = sqrt(1/a + 1/b + 1/c + 1/d) + + where ``a``, ``b``, ``c`` and ``d`` are the elements of the + contingency table. (See, for example, [2]_, section 3.1.3.2, + or [3]_, section 2.3.3). + + References + ---------- + .. [1] R. A. Fisher (1935), The logic of inductive inference, + Journal of the Royal Statistical Society, Vol. 98, No. 1, + pp. 39-82. + .. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: + Methods, Techniques, and Applications, CRC Press LLC, Boca + Raton, Florida. + .. [3] Alan Agresti, An Introduction to Categorical Data Analysis + (second edition), Wiley, Hoboken, NJ, USA (2007). + """ + if alternative not in ['two-sided', 'less', 'greater']: + raise ValueError("`alternative` must be 'two-sided', 'less' or " + "'greater'.") + + if confidence_level < 0 or confidence_level > 1: + raise ValueError('confidence_level must be between 0 and 1') + + if self._kind == 'conditional': + ci = self._conditional_odds_ratio_ci(confidence_level, alternative) + else: + ci = self._sample_odds_ratio_ci(confidence_level, alternative) + return ci + + def _conditional_odds_ratio_ci(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the conditional odds ratio. + """ + + table = self._table + if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1, + # the odds ratio is NaN and the confidence interval is (0, inf). + ci = (0, np.inf) + else: + ci = _conditional_oddsratio_ci(table, + confidence_level=confidence_level, + alternative=alternative) + return ConfidenceInterval(low=ci[0], high=ci[1]) + + def _sample_odds_ratio_ci(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the sample odds ratio. + """ + if confidence_level < 0 or confidence_level > 1: + raise ValueError('confidence_level must be between 0 and 1') + + table = self._table + if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1, + # the odds ratio is NaN and the confidence interval is (0, inf). + ci = (0, np.inf) + else: + ci = _sample_odds_ratio_ci(table, + confidence_level=confidence_level, + alternative=alternative) + return ConfidenceInterval(low=ci[0], high=ci[1]) + + +def odds_ratio(table, *, kind='conditional'): + r""" + Compute the odds ratio for a 2x2 contingency table. + + Parameters + ---------- + table : array_like of ints + A 2x2 contingency table. Elements must be non-negative integers. + kind : str, optional + Which kind of odds ratio to compute, either the sample + odds ratio (``kind='sample'``) or the conditional odds ratio + (``kind='conditional'``). Default is ``'conditional'``. + + Returns + ------- + result : `~scipy.stats._result_classes.OddsRatioResult` instance + The returned object has two computed attributes: + + statistic : float + * If `kind` is ``'sample'``, this is sample (or unconditional) + estimate, given by + ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. + * If `kind` is ``'conditional'``, this is the conditional + maximum likelihood estimate for the odds ratio. It is + the noncentrality parameter of Fisher's noncentral + hypergeometric distribution with the same hypergeometric + parameters as `table` and whose mean is ``table[0, 0]``. + + The object has the method `confidence_interval` that computes + the confidence interval of the odds ratio. + + See Also + -------- + scipy.stats.fisher_exact + relative_risk + + Notes + ----- + The conditional odds ratio was discussed by Fisher (see "Example 1" + of [1]_). Texts that cover the odds ratio include [2]_ and [3]_. + + .. versionadded:: 1.10.0 + + References + ---------- + .. [1] R. A. Fisher (1935), The logic of inductive inference, + Journal of the Royal Statistical Society, Vol. 98, No. 1, + pp. 39-82. + .. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research. + Volume I - The analysis of case-control studies. IARC Sci Publ. + (32):5-338. PMID: 7216345. (See section 4.2.) + .. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: + Methods, Techniques, and Applications, CRC Press LLC, Boca + Raton, Florida. + .. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of + Cardiovascular Events in Women and Men: A Sex-Specific + Meta-analysis of Randomized Controlled Trials." + JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006. + + Examples + -------- + In epidemiology, individuals are classified as "exposed" or + "unexposed" to some factor or treatment. If the occurrence of some + illness is under study, those who have the illness are often + classified as "cases", and those without it are "noncases". The + counts of the occurrences of these classes gives a contingency + table:: + + exposed unexposed + cases a b + noncases c d + + The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can + be interpreted as the odds of a case occurring in the exposed group, + and ``b/d`` as the odds of a case occurring in the unexposed group. + The sample odds ratio is the ratio of these odds. If the odds ratio + is greater than 1, it suggests that there is a positive association + between being exposed and being a case. + + Interchanging the rows or columns of the contingency table inverts + the odds ratio, so it is important to understand the meaning of labels + given to the rows and columns of the table when interpreting the + odds ratio. + + In [4]_, the use of aspirin to prevent cardiovascular events in women + and men was investigated. The study notably concluded: + + ...aspirin therapy reduced the risk of a composite of + cardiovascular events due to its effect on reducing the risk of + ischemic stroke in women [...] + + The article lists studies of various cardiovascular events. Let's + focus on the ischemic stoke in women. + + The following table summarizes the results of the experiment in which + participants took aspirin or a placebo on a regular basis for several + years. Cases of ischemic stroke were recorded:: + + Aspirin Control/Placebo + Ischemic stroke 176 230 + No stroke 21035 21018 + + The question we ask is "Is there evidence that the aspirin reduces the + risk of ischemic stroke?" + + Compute the odds ratio: + + >>> from scipy.stats.contingency import odds_ratio + >>> res = odds_ratio([[176, 230], [21035, 21018]]) + >>> res.statistic + 0.7646037659999126 + + For this sample, the odds of getting an ischemic stroke for those who have + been taking aspirin are 0.76 times that of those + who have received the placebo. + + To make statistical inferences about the population under study, + we can compute the 95% confidence interval for the odds ratio: + + >>> res.confidence_interval(confidence_level=0.95) + ConfidenceInterval(low=0.6241234078749812, high=0.9354102892100372) + + The 95% confidence interval for the conditional odds ratio is + approximately (0.62, 0.94). + + The fact that the entire 95% confidence interval falls below 1 supports + the authors' conclusion that the aspirin was associated with a + statistically significant reduction in ischemic stroke. + """ + if kind not in ['conditional', 'sample']: + raise ValueError("`kind` must be 'conditional' or 'sample'.") + + c = np.asarray(table) + + if c.shape != (2, 2): + raise ValueError(f"Invalid shape {c.shape}. The input `table` must be " + "of shape (2, 2).") + + if not np.issubdtype(c.dtype, np.integer): + raise ValueError("`table` must be an array of integers, but got " + f"type {c.dtype}") + c = c.astype(np.int64) + + if np.any(c < 0): + raise ValueError("All values in `table` must be nonnegative.") + + if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): + # If both values in a row or column are zero, the p-value is NaN and + # the odds ratio is NaN. + result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan) + return result + + if kind == 'sample': + oddsratio = _sample_odds_ratio(c) + else: # kind is 'conditional' + oddsratio = _conditional_oddsratio(c) + + result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio) + return result diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_page_trend_test.py b/parrot/lib/python3.10/site-packages/scipy/stats/_page_trend_test.py new file mode 100644 index 0000000000000000000000000000000000000000..87a4d0d17c07ce609cc575fc7dc61af75d2b9c51 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_page_trend_test.py @@ -0,0 +1,479 @@ +from itertools import permutations +import numpy as np +import math +from ._continuous_distns import norm +import scipy.stats +from dataclasses import dataclass + + +@dataclass +class PageTrendTestResult: + statistic: float + pvalue: float + method: str + + +def page_trend_test(data, ranked=False, predicted_ranks=None, method='auto'): + r""" + Perform Page's Test, a measure of trend in observations between treatments. + + Page's Test (also known as Page's :math:`L` test) is useful when: + + * there are :math:`n \geq 3` treatments, + * :math:`m \geq 2` subjects are observed for each treatment, and + * the observations are hypothesized to have a particular order. + + Specifically, the test considers the null hypothesis that + + .. math:: + + m_1 = m_2 = m_3 \cdots = m_n, + + where :math:`m_j` is the mean of the observed quantity under treatment + :math:`j`, against the alternative hypothesis that + + .. math:: + + m_1 \leq m_2 \leq m_3 \leq \cdots \leq m_n, + + where at least one inequality is strict. + + As noted by [4]_, Page's :math:`L` test has greater statistical power than + the Friedman test against the alternative that there is a difference in + trend, as Friedman's test only considers a difference in the means of the + observations without considering their order. Whereas Spearman :math:`\rho` + considers the correlation between the ranked observations of two variables + (e.g. the airspeed velocity of a swallow vs. the weight of the coconut it + carries), Page's :math:`L` is concerned with a trend in an observation + (e.g. the airspeed velocity of a swallow) across several distinct + treatments (e.g. carrying each of five coconuts of different weight) even + as the observation is repeated with multiple subjects (e.g. one European + swallow and one African swallow). + + Parameters + ---------- + data : array-like + A :math:`m \times n` array; the element in row :math:`i` and + column :math:`j` is the observation corresponding with subject + :math:`i` and treatment :math:`j`. By default, the columns are + assumed to be arranged in order of increasing predicted mean. + + ranked : boolean, optional + By default, `data` is assumed to be observations rather than ranks; + it will be ranked with `scipy.stats.rankdata` along ``axis=1``. If + `data` is provided in the form of ranks, pass argument ``True``. + + predicted_ranks : array-like, optional + The predicted ranks of the column means. If not specified, + the columns are assumed to be arranged in order of increasing + predicted mean, so the default `predicted_ranks` are + :math:`[1, 2, \dots, n-1, n]`. + + method : {'auto', 'asymptotic', 'exact'}, optional + Selects the method used to calculate the *p*-value. The following + options are available. + + * 'auto': selects between 'exact' and 'asymptotic' to + achieve reasonably accurate results in reasonable time (default) + * 'asymptotic': compares the standardized test statistic against + the normal distribution + * 'exact': computes the exact *p*-value by comparing the observed + :math:`L` statistic against those realized by all possible + permutations of ranks (under the null hypothesis that each + permutation is equally likely) + + Returns + ------- + res : PageTrendTestResult + An object containing attributes: + + statistic : float + Page's :math:`L` test statistic. + pvalue : float + The associated *p*-value + method : {'asymptotic', 'exact'} + The method used to compute the *p*-value + + See Also + -------- + rankdata, friedmanchisquare, spearmanr + + Notes + ----- + As noted in [1]_, "the :math:`n` 'treatments' could just as well represent + :math:`n` objects or events or performances or persons or trials ranked." + Similarly, the :math:`m` 'subjects' could equally stand for :math:`m` + "groupings by ability or some other control variable, or judges doing + the ranking, or random replications of some other sort." + + The procedure for calculating the :math:`L` statistic, adapted from + [1]_, is: + + 1. "Predetermine with careful logic the appropriate hypotheses + concerning the predicted ordering of the experimental results. + If no reasonable basis for ordering any treatments is known, the + :math:`L` test is not appropriate." + 2. "As in other experiments, determine at what level of confidence + you will reject the null hypothesis that there is no agreement of + experimental results with the monotonic hypothesis." + 3. "Cast the experimental material into a two-way table of :math:`n` + columns (treatments, objects ranked, conditions) and :math:`m` + rows (subjects, replication groups, levels of control variables)." + 4. "When experimental observations are recorded, rank them across each + row", e.g. ``ranks = scipy.stats.rankdata(data, axis=1)``. + 5. "Add the ranks in each column", e.g. + ``colsums = np.sum(ranks, axis=0)``. + 6. "Multiply each sum of ranks by the predicted rank for that same + column", e.g. ``products = predicted_ranks * colsums``. + 7. "Sum all such products", e.g. ``L = products.sum()``. + + [1]_ continues by suggesting use of the standardized statistic + + .. math:: + + \chi_L^2 = \frac{\left[12L-3mn(n+1)^2\right]^2}{mn^2(n^2-1)(n+1)} + + "which is distributed approximately as chi-square with 1 degree of + freedom. The ordinary use of :math:`\chi^2` tables would be + equivalent to a two-sided test of agreement. If a one-sided test + is desired, *as will almost always be the case*, the probability + discovered in the chi-square table should be *halved*." + + However, this standardized statistic does not distinguish between the + observed values being well correlated with the predicted ranks and being + _anti_-correlated with the predicted ranks. Instead, we follow [2]_ + and calculate the standardized statistic + + .. math:: + + \Lambda = \frac{L - E_0}{\sqrt{V_0}}, + + where :math:`E_0 = \frac{1}{4} mn(n+1)^2` and + :math:`V_0 = \frac{1}{144} mn^2(n+1)(n^2-1)`, "which is asymptotically + normal under the null hypothesis". + + The *p*-value for ``method='exact'`` is generated by comparing the observed + value of :math:`L` against the :math:`L` values generated for all + :math:`(n!)^m` possible permutations of ranks. The calculation is performed + using the recursive method of [5]. + + The *p*-values are not adjusted for the possibility of ties. When + ties are present, the reported ``'exact'`` *p*-values may be somewhat + larger (i.e. more conservative) than the true *p*-value [2]_. The + ``'asymptotic'``` *p*-values, however, tend to be smaller (i.e. less + conservative) than the ``'exact'`` *p*-values. + + References + ---------- + .. [1] Ellis Batten Page, "Ordered hypotheses for multiple treatments: + a significant test for linear ranks", *Journal of the American + Statistical Association* 58(301), p. 216--230, 1963. + + .. [2] Markus Neuhauser, *Nonparametric Statistical Test: A computational + approach*, CRC Press, p. 150--152, 2012. + + .. [3] Statext LLC, "Page's L Trend Test - Easy Statistics", *Statext - + Statistics Study*, https://www.statext.com/practice/PageTrendTest03.php, + Accessed July 12, 2020. + + .. [4] "Page's Trend Test", *Wikipedia*, WikimediaFoundation, + https://en.wikipedia.org/wiki/Page%27s_trend_test, + Accessed July 12, 2020. + + .. [5] Robert E. Odeh, "The exact distribution of Page's L-statistic in + the two-way layout", *Communications in Statistics - Simulation and + Computation*, 6(1), p. 49--61, 1977. + + Examples + -------- + We use the example from [3]_: 10 students are asked to rate three + teaching methods - tutorial, lecture, and seminar - on a scale of 1-5, + with 1 being the lowest and 5 being the highest. We have decided that + a confidence level of 99% is required to reject the null hypothesis in + favor of our alternative: that the seminar will have the highest ratings + and the tutorial will have the lowest. Initially, the data have been + tabulated with each row representing an individual student's ratings of + the three methods in the following order: tutorial, lecture, seminar. + + >>> table = [[3, 4, 3], + ... [2, 2, 4], + ... [3, 3, 5], + ... [1, 3, 2], + ... [2, 3, 2], + ... [2, 4, 5], + ... [1, 2, 4], + ... [3, 4, 4], + ... [2, 4, 5], + ... [1, 3, 4]] + + Because the tutorial is hypothesized to have the lowest ratings, the + column corresponding with tutorial rankings should be first; the seminar + is hypothesized to have the highest ratings, so its column should be last. + Since the columns are already arranged in this order of increasing + predicted mean, we can pass the table directly into `page_trend_test`. + + >>> from scipy.stats import page_trend_test + >>> res = page_trend_test(table) + >>> res + PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822, + method='exact') + + This *p*-value indicates that there is a 0.1819% chance that + the :math:`L` statistic would reach such an extreme value under the null + hypothesis. Because 0.1819% is less than 1%, we have evidence to reject + the null hypothesis in favor of our alternative at a 99% confidence level. + + The value of the :math:`L` statistic is 133.5. To check this manually, + we rank the data such that high scores correspond with high ranks, settling + ties with an average rank: + + >>> from scipy.stats import rankdata + >>> ranks = rankdata(table, axis=1) + >>> ranks + array([[1.5, 3. , 1.5], + [1.5, 1.5, 3. ], + [1.5, 1.5, 3. ], + [1. , 3. , 2. ], + [1.5, 3. , 1.5], + [1. , 2. , 3. ], + [1. , 2. , 3. ], + [1. , 2.5, 2.5], + [1. , 2. , 3. ], + [1. , 2. , 3. ]]) + + We add the ranks within each column, multiply the sums by the + predicted ranks, and sum the products. + + >>> import numpy as np + >>> m, n = ranks.shape + >>> predicted_ranks = np.arange(1, n+1) + >>> L = (predicted_ranks * np.sum(ranks, axis=0)).sum() + >>> res.statistic == L + True + + As presented in [3]_, the asymptotic approximation of the *p*-value is the + survival function of the normal distribution evaluated at the standardized + test statistic: + + >>> from scipy.stats import norm + >>> E0 = (m*n*(n+1)**2)/4 + >>> V0 = (m*n**2*(n+1)*(n**2-1))/144 + >>> Lambda = (L-E0)/np.sqrt(V0) + >>> p = norm.sf(Lambda) + >>> p + 0.0012693433690751756 + + This does not precisely match the *p*-value reported by `page_trend_test` + above. The asymptotic distribution is not very accurate, nor conservative, + for :math:`m \leq 12` and :math:`n \leq 8`, so `page_trend_test` chose to + use ``method='exact'`` based on the dimensions of the table and the + recommendations in Page's original paper [1]_. To override + `page_trend_test`'s choice, provide the `method` argument. + + >>> res = page_trend_test(table, method="asymptotic") + >>> res + PageTrendTestResult(statistic=133.5, pvalue=0.0012693433690751756, + method='asymptotic') + + If the data are already ranked, we can pass in the ``ranks`` instead of + the ``table`` to save computation time. + + >>> res = page_trend_test(ranks, # ranks of data + ... ranked=True, # data is already ranked + ... ) + >>> res + PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822, + method='exact') + + Suppose the raw data had been tabulated in an order different from the + order of predicted means, say lecture, seminar, tutorial. + + >>> table = np.asarray(table)[:, [1, 2, 0]] + + Since the arrangement of this table is not consistent with the assumed + ordering, we can either rearrange the table or provide the + `predicted_ranks`. Remembering that the lecture is predicted + to have the middle rank, the seminar the highest, and tutorial the lowest, + we pass: + + >>> res = page_trend_test(table, # data as originally tabulated + ... predicted_ranks=[2, 3, 1], # our predicted order + ... ) + >>> res + PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822, + method='exact') + + """ + + # Possible values of the method parameter and the corresponding function + # used to evaluate the p value + methods = {"asymptotic": _l_p_asymptotic, + "exact": _l_p_exact, + "auto": None} + if method not in methods: + raise ValueError(f"`method` must be in {set(methods)}") + + ranks = np.asarray(data) + if ranks.ndim != 2: # TODO: relax this to accept 3d arrays? + raise ValueError("`data` must be a 2d array.") + + m, n = ranks.shape + if m < 2 or n < 3: + raise ValueError("Page's L is only appropriate for data with two " + "or more rows and three or more columns.") + + if np.any(np.isnan(data)): + raise ValueError("`data` contains NaNs, which cannot be ranked " + "meaningfully") + + # ensure NumPy array and rank the data if it's not already ranked + if ranked: + # Only a basic check on whether data is ranked. Checking that the data + # is properly ranked could take as much time as ranking it. + if not (ranks.min() >= 1 and ranks.max() <= ranks.shape[1]): + raise ValueError("`data` is not properly ranked. Rank the data or " + "pass `ranked=False`.") + else: + ranks = scipy.stats.rankdata(data, axis=-1) + + # generate predicted ranks if not provided, ensure valid NumPy array + if predicted_ranks is None: + predicted_ranks = np.arange(1, n+1) + else: + predicted_ranks = np.asarray(predicted_ranks) + if (predicted_ranks.ndim < 1 or + (set(predicted_ranks) != set(range(1, n+1)) or + len(predicted_ranks) != n)): + raise ValueError(f"`predicted_ranks` must include each integer " + f"from 1 to {n} (the number of columns in " + f"`data`) exactly once.") + + if not isinstance(ranked, bool): + raise TypeError("`ranked` must be boolean.") + + # Calculate the L statistic + L = _l_vectorized(ranks, predicted_ranks) + + # Calculate the p-value + if method == "auto": + method = _choose_method(ranks) + p_fun = methods[method] # get the function corresponding with the method + p = p_fun(L, m, n) + + page_result = PageTrendTestResult(statistic=L, pvalue=p, method=method) + return page_result + + +def _choose_method(ranks): + '''Choose method for computing p-value automatically''' + m, n = ranks.shape + if n > 8 or (m > 12 and n > 3) or m > 20: # as in [1], [4] + method = "asymptotic" + else: + method = "exact" + return method + + +def _l_vectorized(ranks, predicted_ranks): + '''Calculate's Page's L statistic for each page of a 3d array''' + colsums = ranks.sum(axis=-2, keepdims=True) + products = predicted_ranks * colsums + Ls = products.sum(axis=-1) + Ls = Ls[0] if Ls.size == 1 else Ls.ravel() + return Ls + + +def _l_p_asymptotic(L, m, n): + '''Calculate the p-value of Page's L from the asymptotic distribution''' + # Using [1] as a reference, the asymptotic p-value would be calculated as: + # chi_L = (12*L - 3*m*n*(n+1)**2)**2/(m*n**2*(n**2-1)*(n+1)) + # p = chi2.sf(chi_L, df=1, loc=0, scale=1)/2 + # but this is insensitive to the direction of the hypothesized ranking + + # See [2] page 151 + E0 = (m*n*(n+1)**2)/4 + V0 = (m*n**2*(n+1)*(n**2-1))/144 + Lambda = (L-E0)/np.sqrt(V0) + # This is a one-sided "greater" test - calculate the probability that the + # L statistic under H0 would be greater than the observed L statistic + p = norm.sf(Lambda) + return p + + +def _l_p_exact(L, m, n): + '''Calculate the p-value of Page's L exactly''' + # [1] uses m, n; [5] uses n, k. + # Switch convention here because exact calculation code references [5]. + L, n, k = int(L), int(m), int(n) + _pagel_state.set_k(k) + return _pagel_state.sf(L, n) + + +class _PageL: + '''Maintains state between `page_trend_test` executions''' + + def __init__(self): + '''Lightweight initialization''' + self.all_pmfs = {} + + def set_k(self, k): + '''Calculate lower and upper limits of L for single row''' + self.k = k + # See [5] top of page 52 + self.a, self.b = (k*(k+1)*(k+2))//6, (k*(k+1)*(2*k+1))//6 + + def sf(self, l, n): + '''Survival function of Page's L statistic''' + ps = [self.pmf(l, n) for l in range(l, n*self.b + 1)] + return np.sum(ps) + + def p_l_k_1(self): + '''Relative frequency of each L value over all possible single rows''' + + # See [5] Equation (6) + ranks = range(1, self.k+1) + # generate all possible rows of length k + rank_perms = np.array(list(permutations(ranks))) + # compute Page's L for all possible rows + Ls = (ranks*rank_perms).sum(axis=1) + # count occurrences of each L value + counts = np.histogram(Ls, np.arange(self.a-0.5, self.b+1.5))[0] + # factorial(k) is number of possible permutations + return counts/math.factorial(self.k) + + def pmf(self, l, n): + '''Recursive function to evaluate p(l, k, n); see [5] Equation 1''' + + if n not in self.all_pmfs: + self.all_pmfs[n] = {} + if self.k not in self.all_pmfs[n]: + self.all_pmfs[n][self.k] = {} + + # Cache results to avoid repeating calculation. Initially this was + # written with lru_cache, but this seems faster? Also, we could add + # an option to save this for future lookup. + if l in self.all_pmfs[n][self.k]: + return self.all_pmfs[n][self.k][l] + + if n == 1: + ps = self.p_l_k_1() # [5] Equation 6 + ls = range(self.a, self.b+1) + # not fast, but we'll only be here once + self.all_pmfs[n][self.k] = {l: p for l, p in zip(ls, ps)} + return self.all_pmfs[n][self.k][l] + + p = 0 + low = max(l-(n-1)*self.b, self.a) # [5] Equation 2 + high = min(l-(n-1)*self.a, self.b) + + # [5] Equation 1 + for t in range(low, high+1): + p1 = self.pmf(l-t, n-1) + p2 = self.pmf(t, 1) + p += p1*p2 + self.all_pmfs[n][self.k][l] = p + return p + + +# Maintain state for faster repeat calls to page_trend_test w/ method='exact' +_pagel_state = _PageL() diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi b/parrot/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1006385a43179478a9a4a32ae5f825aa5b8b35c4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi @@ -0,0 +1,54 @@ +import numpy as np +from scipy._lib._util import DecimalNumber, IntNumber + + +def _cy_wrapper_centered_discrepancy( + sample: np.ndarray, + iterative: bool, + workers: IntNumber, +) -> float: ... + + +def _cy_wrapper_wrap_around_discrepancy( + sample: np.ndarray, + iterative: bool, + workers: IntNumber, +) -> float: ... + + +def _cy_wrapper_mixture_discrepancy( + sample: np.ndarray, + iterative: bool, + workers: IntNumber, +) -> float: ... + + +def _cy_wrapper_l2_star_discrepancy( + sample: np.ndarray, + iterative: bool, + workers: IntNumber, +) -> float: ... + + +def _cy_wrapper_update_discrepancy( + x_new_view: np.ndarray, + sample_view: np.ndarray, + initial_disc: DecimalNumber, +) -> float: ... + + +def _cy_van_der_corput( + n: IntNumber, + base: IntNumber, + start_index: IntNumber, + workers: IntNumber, +) -> np.ndarray: ... + + +def _cy_van_der_corput_scrambled( + n: IntNumber, + base: IntNumber, + start_index: IntNumber, + permutations: np.ndarray, + workers: IntNumber, +) -> np.ndarray: ... diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_relative_risk.py b/parrot/lib/python3.10/site-packages/scipy/stats/_relative_risk.py new file mode 100644 index 0000000000000000000000000000000000000000..51525fd28adb37c72b12106450e4178c786091b2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_relative_risk.py @@ -0,0 +1,263 @@ +import operator +from dataclasses import dataclass +import numpy as np +from scipy.special import ndtri +from ._common import ConfidenceInterval + + +def _validate_int(n, bound, name): + msg = f'{name} must be an integer not less than {bound}, but got {n!r}' + try: + n = operator.index(n) + except TypeError: + raise TypeError(msg) from None + if n < bound: + raise ValueError(msg) + return n + + +@dataclass +class RelativeRiskResult: + """ + Result of `scipy.stats.contingency.relative_risk`. + + Attributes + ---------- + relative_risk : float + This is:: + + (exposed_cases/exposed_total) / (control_cases/control_total) + + exposed_cases : int + The number of "cases" (i.e. occurrence of disease or other event + of interest) among the sample of "exposed" individuals. + exposed_total : int + The total number of "exposed" individuals in the sample. + control_cases : int + The number of "cases" among the sample of "control" or non-exposed + individuals. + control_total : int + The total number of "control" individuals in the sample. + + Methods + ------- + confidence_interval : + Compute the confidence interval for the relative risk estimate. + """ + + relative_risk: float + exposed_cases: int + exposed_total: int + control_cases: int + control_total: int + + def confidence_interval(self, confidence_level=0.95): + """ + Compute the confidence interval for the relative risk. + + The confidence interval is computed using the Katz method + (i.e. "Method C" of [1]_; see also [2]_, section 3.1.2). + + Parameters + ---------- + confidence_level : float, optional + The confidence level to use for the confidence interval. + Default is 0.95. + + Returns + ------- + ci : ConfidenceInterval instance + The return value is an object with attributes ``low`` and + ``high`` that hold the confidence interval. + + References + ---------- + .. [1] D. Katz, J. Baptista, S. P. Azen and M. C. Pike, "Obtaining + confidence intervals for the risk ratio in cohort studies", + Biometrics, 34, 469-474 (1978). + .. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology, + CRC Press LLC, Boca Raton, FL, USA (1996). + + + Examples + -------- + >>> from scipy.stats.contingency import relative_risk + >>> result = relative_risk(exposed_cases=10, exposed_total=75, + ... control_cases=12, control_total=225) + >>> result.relative_risk + 2.5 + >>> result.confidence_interval() + ConfidenceInterval(low=1.1261564003469628, high=5.549850800541033) + """ + if not 0 <= confidence_level <= 1: + raise ValueError('confidence_level must be in the interval ' + '[0, 1].') + + # Handle edge cases where either exposed_cases or control_cases + # is zero. We follow the convention of the R function riskratio + # from the epitools library. + if self.exposed_cases == 0 and self.control_cases == 0: + # relative risk is nan. + return ConfidenceInterval(low=np.nan, high=np.nan) + elif self.exposed_cases == 0: + # relative risk is 0. + return ConfidenceInterval(low=0.0, high=np.nan) + elif self.control_cases == 0: + # relative risk is inf + return ConfidenceInterval(low=np.nan, high=np.inf) + + alpha = 1 - confidence_level + z = ndtri(1 - alpha/2) + rr = self.relative_risk + + # Estimate of the variance of log(rr) is + # var(log(rr)) = 1/exposed_cases - 1/exposed_total + + # 1/control_cases - 1/control_total + # and the standard error is the square root of that. + se = np.sqrt(1/self.exposed_cases - 1/self.exposed_total + + 1/self.control_cases - 1/self.control_total) + delta = z*se + katz_lo = rr*np.exp(-delta) + katz_hi = rr*np.exp(delta) + return ConfidenceInterval(low=katz_lo, high=katz_hi) + + +def relative_risk(exposed_cases, exposed_total, control_cases, control_total): + """ + Compute the relative risk (also known as the risk ratio). + + This function computes the relative risk associated with a 2x2 + contingency table ([1]_, section 2.2.3; [2]_, section 3.1.2). Instead + of accepting a table as an argument, the individual numbers that are + used to compute the relative risk are given as separate parameters. + This is to avoid the ambiguity of which row or column of the contingency + table corresponds to the "exposed" cases and which corresponds to the + "control" cases. Unlike, say, the odds ratio, the relative risk is not + invariant under an interchange of the rows or columns. + + Parameters + ---------- + exposed_cases : nonnegative int + The number of "cases" (i.e. occurrence of disease or other event + of interest) among the sample of "exposed" individuals. + exposed_total : positive int + The total number of "exposed" individuals in the sample. + control_cases : nonnegative int + The number of "cases" among the sample of "control" or non-exposed + individuals. + control_total : positive int + The total number of "control" individuals in the sample. + + Returns + ------- + result : instance of `~scipy.stats._result_classes.RelativeRiskResult` + The object has the float attribute ``relative_risk``, which is:: + + rr = (exposed_cases/exposed_total) / (control_cases/control_total) + + The object also has the method ``confidence_interval`` to compute + the confidence interval of the relative risk for a given confidence + level. + + See Also + -------- + odds_ratio + + Notes + ----- + The R package epitools has the function `riskratio`, which accepts + a table with the following layout:: + + disease=0 disease=1 + exposed=0 (ref) n00 n01 + exposed=1 n10 n11 + + With a 2x2 table in the above format, the estimate of the CI is + computed by `riskratio` when the argument method="wald" is given, + or with the function `riskratio.wald`. + + For example, in a test of the incidence of lung cancer among a + sample of smokers and nonsmokers, the "exposed" category would + correspond to "is a smoker" and the "disease" category would + correspond to "has or had lung cancer". + + To pass the same data to ``relative_risk``, use:: + + relative_risk(n11, n10 + n11, n01, n00 + n01) + + .. versionadded:: 1.7.0 + + References + ---------- + .. [1] Alan Agresti, An Introduction to Categorical Data Analysis + (second edition), Wiley, Hoboken, NJ, USA (2007). + .. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology, + CRC Press LLC, Boca Raton, FL, USA (1996). + + Examples + -------- + >>> from scipy.stats.contingency import relative_risk + + This example is from Example 3.1 of [2]_. The results of a heart + disease study are summarized in the following table:: + + High CAT Low CAT Total + -------- ------- ----- + CHD 27 44 71 + No CHD 95 443 538 + + Total 122 487 609 + + CHD is coronary heart disease, and CAT refers to the level of + circulating catecholamine. CAT is the "exposure" variable, and + high CAT is the "exposed" category. So the data from the table + to be passed to ``relative_risk`` is:: + + exposed_cases = 27 + exposed_total = 122 + control_cases = 44 + control_total = 487 + + >>> result = relative_risk(27, 122, 44, 487) + >>> result.relative_risk + 2.4495156482861398 + + Find the confidence interval for the relative risk. + + >>> result.confidence_interval(confidence_level=0.95) + ConfidenceInterval(low=1.5836990926700116, high=3.7886786315466354) + + The interval does not contain 1, so the data supports the statement + that high CAT is associated with greater risk of CHD. + """ + # Relative risk is a trivial calculation. The nontrivial part is in the + # `confidence_interval` method of the RelativeRiskResult class. + + exposed_cases = _validate_int(exposed_cases, 0, "exposed_cases") + exposed_total = _validate_int(exposed_total, 1, "exposed_total") + control_cases = _validate_int(control_cases, 0, "control_cases") + control_total = _validate_int(control_total, 1, "control_total") + + if exposed_cases > exposed_total: + raise ValueError('exposed_cases must not exceed exposed_total.') + if control_cases > control_total: + raise ValueError('control_cases must not exceed control_total.') + + if exposed_cases == 0 and control_cases == 0: + # relative risk is 0/0. + rr = np.nan + elif exposed_cases == 0: + # relative risk is 0/nonzero + rr = 0.0 + elif control_cases == 0: + # relative risk is nonzero/0. + rr = np.inf + else: + p1 = exposed_cases / exposed_total + p2 = control_cases / control_total + rr = p1 / p2 + return RelativeRiskResult(relative_risk=rr, + exposed_cases=exposed_cases, + exposed_total=exposed_total, + control_cases=control_cases, + control_total=control_total) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_resampling.py b/parrot/lib/python3.10/site-packages/scipy/stats/_resampling.py new file mode 100644 index 0000000000000000000000000000000000000000..d7d4d50a7b7fc672b4d867847c28b358eb257e72 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_resampling.py @@ -0,0 +1,2255 @@ +from __future__ import annotations + +import warnings +import numpy as np +from itertools import combinations, permutations, product +from collections.abc import Sequence +from dataclasses import dataclass +import inspect + +from scipy._lib._util import check_random_state, _rename_parameter, rng_integers +from scipy._lib._array_api import (array_namespace, is_numpy, xp_minimum, + xp_clip, xp_moveaxis_to_end) +from scipy.special import ndtr, ndtri, comb, factorial + +from ._common import ConfidenceInterval +from ._axis_nan_policy import _broadcast_concatenate, _broadcast_arrays +from ._warnings_errors import DegenerateDataWarning + +__all__ = ['bootstrap', 'monte_carlo_test', 'permutation_test'] + + +def _vectorize_statistic(statistic): + """Vectorize an n-sample statistic""" + # This is a little cleaner than np.nditer at the expense of some data + # copying: concatenate samples together, then use np.apply_along_axis + def stat_nd(*data, axis=0): + lengths = [sample.shape[axis] for sample in data] + split_indices = np.cumsum(lengths)[:-1] + z = _broadcast_concatenate(data, axis) + + # move working axis to position 0 so that new dimensions in the output + # of `statistic` are _prepended_. ("This axis is removed, and replaced + # with new dimensions...") + z = np.moveaxis(z, axis, 0) + + def stat_1d(z): + data = np.split(z, split_indices) + return statistic(*data) + + return np.apply_along_axis(stat_1d, 0, z)[()] + return stat_nd + + +def _jackknife_resample(sample, batch=None): + """Jackknife resample the sample. Only one-sample stats for now.""" + n = sample.shape[-1] + batch_nominal = batch or n + + for k in range(0, n, batch_nominal): + # col_start:col_end are the observations to remove + batch_actual = min(batch_nominal, n-k) + + # jackknife - each row leaves out one observation + j = np.ones((batch_actual, n), dtype=bool) + np.fill_diagonal(j[:, k:k+batch_actual], False) + i = np.arange(n) + i = np.broadcast_to(i, (batch_actual, n)) + i = i[j].reshape((batch_actual, n-1)) + + resamples = sample[..., i] + yield resamples + + +def _bootstrap_resample(sample, n_resamples=None, random_state=None): + """Bootstrap resample the sample.""" + n = sample.shape[-1] + + # bootstrap - each row is a random resample of original observations + i = rng_integers(random_state, 0, n, (n_resamples, n)) + + resamples = sample[..., i] + return resamples + + +def _percentile_of_score(a, score, axis): + """Vectorized, simplified `scipy.stats.percentileofscore`. + Uses logic of the 'mean' value of percentileofscore's kind parameter. + + Unlike `stats.percentileofscore`, the percentile returned is a fraction + in [0, 1]. + """ + B = a.shape[axis] + return ((a < score).sum(axis=axis) + (a <= score).sum(axis=axis)) / (2 * B) + + +def _percentile_along_axis(theta_hat_b, alpha): + """`np.percentile` with different percentile for each slice.""" + # the difference between _percentile_along_axis and np.percentile is that + # np.percentile gets _all_ the qs for each axis slice, whereas + # _percentile_along_axis gets the q corresponding with each axis slice + shape = theta_hat_b.shape[:-1] + alpha = np.broadcast_to(alpha, shape) + percentiles = np.zeros_like(alpha, dtype=np.float64) + for indices, alpha_i in np.ndenumerate(alpha): + if np.isnan(alpha_i): + # e.g. when bootstrap distribution has only one unique element + msg = ( + "The BCa confidence interval cannot be calculated." + " This problem is known to occur when the distribution" + " is degenerate or the statistic is np.min." + ) + warnings.warn(DegenerateDataWarning(msg), stacklevel=3) + percentiles[indices] = np.nan + else: + theta_hat_b_i = theta_hat_b[indices] + percentiles[indices] = np.percentile(theta_hat_b_i, alpha_i) + return percentiles[()] # return scalar instead of 0d array + + +def _bca_interval(data, statistic, axis, alpha, theta_hat_b, batch): + """Bias-corrected and accelerated interval.""" + # closely follows [1] 14.3 and 15.4 (Eq. 15.36) + + # calculate z0_hat + theta_hat = np.asarray(statistic(*data, axis=axis))[..., None] + percentile = _percentile_of_score(theta_hat_b, theta_hat, axis=-1) + z0_hat = ndtri(percentile) + + # calculate a_hat + theta_hat_ji = [] # j is for sample of data, i is for jackknife resample + for j, sample in enumerate(data): + # _jackknife_resample will add an axis prior to the last axis that + # corresponds with the different jackknife resamples. Do the same for + # each sample of the data to ensure broadcastability. We need to + # create a copy of the list containing the samples anyway, so do this + # in the loop to simplify the code. This is not the bottleneck... + samples = [np.expand_dims(sample, -2) for sample in data] + theta_hat_i = [] + for jackknife_sample in _jackknife_resample(sample, batch): + samples[j] = jackknife_sample + broadcasted = _broadcast_arrays(samples, axis=-1) + theta_hat_i.append(statistic(*broadcasted, axis=-1)) + theta_hat_ji.append(theta_hat_i) + + theta_hat_ji = [np.concatenate(theta_hat_i, axis=-1) + for theta_hat_i in theta_hat_ji] + + n_j = [theta_hat_i.shape[-1] for theta_hat_i in theta_hat_ji] + + theta_hat_j_dot = [theta_hat_i.mean(axis=-1, keepdims=True) + for theta_hat_i in theta_hat_ji] + + U_ji = [(n - 1) * (theta_hat_dot - theta_hat_i) + for theta_hat_dot, theta_hat_i, n + in zip(theta_hat_j_dot, theta_hat_ji, n_j)] + + nums = [(U_i**3).sum(axis=-1)/n**3 for U_i, n in zip(U_ji, n_j)] + dens = [(U_i**2).sum(axis=-1)/n**2 for U_i, n in zip(U_ji, n_j)] + a_hat = 1/6 * sum(nums) / sum(dens)**(3/2) + + # calculate alpha_1, alpha_2 + z_alpha = ndtri(alpha) + z_1alpha = -z_alpha + num1 = z0_hat + z_alpha + alpha_1 = ndtr(z0_hat + num1/(1 - a_hat*num1)) + num2 = z0_hat + z_1alpha + alpha_2 = ndtr(z0_hat + num2/(1 - a_hat*num2)) + return alpha_1, alpha_2, a_hat # return a_hat for testing + + +def _bootstrap_iv(data, statistic, vectorized, paired, axis, confidence_level, + alternative, n_resamples, batch, method, bootstrap_result, + random_state): + """Input validation and standardization for `bootstrap`.""" + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if vectorized is None: + vectorized = 'axis' in inspect.signature(statistic).parameters + + if not vectorized: + statistic = _vectorize_statistic(statistic) + + axis_int = int(axis) + if axis != axis_int: + raise ValueError("`axis` must be an integer.") + + n_samples = 0 + try: + n_samples = len(data) + except TypeError: + raise ValueError("`data` must be a sequence of samples.") + + if n_samples == 0: + raise ValueError("`data` must contain at least one sample.") + + message = ("Ignoring the dimension specified by `axis`, arrays in `data` do not " + "have the same shape. Beginning in SciPy 1.16.0, `bootstrap` will " + "explicitly broadcast elements of `data` to the same shape (ignoring " + "`axis`) before performing the calculation. To avoid this warning in " + "the meantime, ensure that all samples have the same shape (except " + "potentially along `axis`).") + data = [np.atleast_1d(sample) for sample in data] + reduced_shapes = set() + for sample in data: + reduced_shape = list(sample.shape) + reduced_shape.pop(axis) + reduced_shapes.add(tuple(reduced_shape)) + if len(reduced_shapes) != 1: + warnings.warn(message, FutureWarning, stacklevel=3) + + data_iv = [] + for sample in data: + if sample.shape[axis_int] <= 1: + raise ValueError("each sample in `data` must contain two or more " + "observations along `axis`.") + sample = np.moveaxis(sample, axis_int, -1) + data_iv.append(sample) + + if paired not in {True, False}: + raise ValueError("`paired` must be `True` or `False`.") + + if paired: + n = data_iv[0].shape[-1] + for sample in data_iv[1:]: + if sample.shape[-1] != n: + message = ("When `paired is True`, all samples must have the " + "same length along `axis`") + raise ValueError(message) + + # to generate the bootstrap distribution for paired-sample statistics, + # resample the indices of the observations + def statistic(i, axis=-1, data=data_iv, unpaired_statistic=statistic): + data = [sample[..., i] for sample in data] + return unpaired_statistic(*data, axis=axis) + + data_iv = [np.arange(n)] + + confidence_level_float = float(confidence_level) + + alternative = alternative.lower() + alternatives = {'two-sided', 'less', 'greater'} + if alternative not in alternatives: + raise ValueError(f"`alternative` must be one of {alternatives}") + + n_resamples_int = int(n_resamples) + if n_resamples != n_resamples_int or n_resamples_int < 0: + raise ValueError("`n_resamples` must be a non-negative integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + methods = {'percentile', 'basic', 'bca'} + method = method.lower() + if method not in methods: + raise ValueError(f"`method` must be in {methods}") + + message = "`bootstrap_result` must have attribute `bootstrap_distribution'" + if (bootstrap_result is not None + and not hasattr(bootstrap_result, "bootstrap_distribution")): + raise ValueError(message) + + message = ("Either `bootstrap_result.bootstrap_distribution.size` or " + "`n_resamples` must be positive.") + if ((not bootstrap_result or + not bootstrap_result.bootstrap_distribution.size) + and n_resamples_int == 0): + raise ValueError(message) + + random_state = check_random_state(random_state) + + return (data_iv, statistic, vectorized, paired, axis_int, + confidence_level_float, alternative, n_resamples_int, batch_iv, + method, bootstrap_result, random_state) + + +@dataclass +class BootstrapResult: + """Result object returned by `scipy.stats.bootstrap`. + + Attributes + ---------- + confidence_interval : ConfidenceInterval + The bootstrap confidence interval as an instance of + `collections.namedtuple` with attributes `low` and `high`. + bootstrap_distribution : ndarray + The bootstrap distribution, that is, the value of `statistic` for + each resample. The last dimension corresponds with the resamples + (e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``). + standard_error : float or ndarray + The bootstrap standard error, that is, the sample standard + deviation of the bootstrap distribution. + + """ + confidence_interval: ConfidenceInterval + bootstrap_distribution: np.ndarray + standard_error: float | np.ndarray + + +def bootstrap(data, statistic, *, n_resamples=9999, batch=None, + vectorized=None, paired=False, axis=0, confidence_level=0.95, + alternative='two-sided', method='BCa', bootstrap_result=None, + random_state=None): + r""" + Compute a two-sided bootstrap confidence interval of a statistic. + + When `method` is ``'percentile'`` and `alternative` is ``'two-sided'``, + a bootstrap confidence interval is computed according to the following + procedure. + + 1. Resample the data: for each sample in `data` and for each of + `n_resamples`, take a random sample of the original sample + (with replacement) of the same size as the original sample. + + 2. Compute the bootstrap distribution of the statistic: for each set of + resamples, compute the test statistic. + + 3. Determine the confidence interval: find the interval of the bootstrap + distribution that is + + - symmetric about the median and + - contains `confidence_level` of the resampled statistic values. + + While the ``'percentile'`` method is the most intuitive, it is rarely + used in practice. Two more common methods are available, ``'basic'`` + ('reverse percentile') and ``'BCa'`` ('bias-corrected and accelerated'); + they differ in how step 3 is performed. + + If the samples in `data` are taken at random from their respective + distributions :math:`n` times, the confidence interval returned by + `bootstrap` will contain the true value of the statistic for those + distributions approximately `confidence_level`:math:`\, \times \, n` times. + + Parameters + ---------- + data : sequence of array-like + Each element of `data` is a sample containing scalar observations from an + underlying distribution. Elements of `data` must be broadcastable to the + same shape (with the possible exception of the dimension specified by `axis`). + + .. versionchanged:: 1.14.0 + `bootstrap` will now emit a ``FutureWarning`` if the shapes of the + elements of `data` are not the same (with the exception of the dimension + specified by `axis`). + Beginning in SciPy 1.16.0, `bootstrap` will explicitly broadcast the + elements to the same shape (except along `axis`) before performing + the calculation. + + statistic : callable + Statistic for which the confidence interval is to be calculated. + `statistic` must be a callable that accepts ``len(data)`` samples + as separate arguments and returns the resulting statistic. + If `vectorized` is set ``True``, + `statistic` must also accept a keyword argument `axis` and be + vectorized to compute the statistic along the provided `axis`. + n_resamples : int, default: ``9999`` + The number of resamples performed to form the bootstrap distribution + of the statistic. + batch : int, optional + The number of resamples to process in each vectorized call to + `statistic`. Memory usage is O( `batch` * ``n`` ), where ``n`` is the + sample size. Default is ``None``, in which case ``batch = n_resamples`` + (or ``batch = max(n_resamples, n)`` for ``method='BCa'``). + vectorized : bool, optional + If `vectorized` is set ``False``, `statistic` will not be passed + keyword argument `axis` and is expected to calculate the statistic + only for 1D samples. If ``True``, `statistic` will be passed keyword + argument `axis` and is expected to calculate the statistic along `axis` + when passed an ND sample array. If ``None`` (default), `vectorized` + will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of + a vectorized statistic typically reduces computation time. + paired : bool, default: ``False`` + Whether the statistic treats corresponding elements of the samples + in `data` as paired. + axis : int, default: ``0`` + The axis of the samples in `data` along which the `statistic` is + calculated. + confidence_level : float, default: ``0.95`` + The confidence level of the confidence interval. + alternative : {'two-sided', 'less', 'greater'}, default: ``'two-sided'`` + Choose ``'two-sided'`` (default) for a two-sided confidence interval, + ``'less'`` for a one-sided confidence interval with the lower bound + at ``-np.inf``, and ``'greater'`` for a one-sided confidence interval + with the upper bound at ``np.inf``. The other bound of the one-sided + confidence intervals is the same as that of a two-sided confidence + interval with `confidence_level` twice as far from 1.0; e.g. the upper + bound of a 95% ``'less'`` confidence interval is the same as the upper + bound of a 90% ``'two-sided'`` confidence interval. + method : {'percentile', 'basic', 'bca'}, default: ``'BCa'`` + Whether to return the 'percentile' bootstrap confidence interval + (``'percentile'``), the 'basic' (AKA 'reverse') bootstrap confidence + interval (``'basic'``), or the bias-corrected and accelerated bootstrap + confidence interval (``'BCa'``). + bootstrap_result : BootstrapResult, optional + Provide the result object returned by a previous call to `bootstrap` + to include the previous bootstrap distribution in the new bootstrap + distribution. This can be used, for example, to change + `confidence_level`, change `method`, or see the effect of performing + additional resampling without repeating computations. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is ``None`` (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + res : BootstrapResult + An object with attributes: + + confidence_interval : ConfidenceInterval + The bootstrap confidence interval as an instance of + `collections.namedtuple` with attributes `low` and `high`. + bootstrap_distribution : ndarray + The bootstrap distribution, that is, the value of `statistic` for + each resample. The last dimension corresponds with the resamples + (e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``). + standard_error : float or ndarray + The bootstrap standard error, that is, the sample standard + deviation of the bootstrap distribution. + + Warns + ----- + `~scipy.stats.DegenerateDataWarning` + Generated when ``method='BCa'`` and the bootstrap distribution is + degenerate (e.g. all elements are identical). + + Notes + ----- + Elements of the confidence interval may be NaN for ``method='BCa'`` if + the bootstrap distribution is degenerate (e.g. all elements are identical). + In this case, consider using another `method` or inspecting `data` for + indications that other analysis may be more appropriate (e.g. all + observations are identical). + + References + ---------- + .. [1] B. Efron and R. J. Tibshirani, An Introduction to the Bootstrap, + Chapman & Hall/CRC, Boca Raton, FL, USA (1993) + .. [2] Nathaniel E. Helwig, "Bootstrap Confidence Intervals", + http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf + .. [3] Bootstrapping (statistics), Wikipedia, + https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 + + Examples + -------- + Suppose we have sampled data from an unknown distribution. + + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> from scipy.stats import norm + >>> dist = norm(loc=2, scale=4) # our "unknown" distribution + >>> data = dist.rvs(size=100, random_state=rng) + + We are interested in the standard deviation of the distribution. + + >>> std_true = dist.std() # the true value of the statistic + >>> print(std_true) + 4.0 + >>> std_sample = np.std(data) # the sample statistic + >>> print(std_sample) + 3.9460644295563863 + + The bootstrap is used to approximate the variability we would expect if we + were to repeatedly sample from the unknown distribution and calculate the + statistic of the sample each time. It does this by repeatedly resampling + values *from the original sample* with replacement and calculating the + statistic of each resample. This results in a "bootstrap distribution" of + the statistic. + + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import bootstrap + >>> data = (data,) # samples must be in a sequence + >>> res = bootstrap(data, np.std, confidence_level=0.9, + ... random_state=rng) + >>> fig, ax = plt.subplots() + >>> ax.hist(res.bootstrap_distribution, bins=25) + >>> ax.set_title('Bootstrap Distribution') + >>> ax.set_xlabel('statistic value') + >>> ax.set_ylabel('frequency') + >>> plt.show() + + The standard error quantifies this variability. It is calculated as the + standard deviation of the bootstrap distribution. + + >>> res.standard_error + 0.24427002125829136 + >>> res.standard_error == np.std(res.bootstrap_distribution, ddof=1) + True + + The bootstrap distribution of the statistic is often approximately normal + with scale equal to the standard error. + + >>> x = np.linspace(3, 5) + >>> pdf = norm.pdf(x, loc=std_sample, scale=res.standard_error) + >>> fig, ax = plt.subplots() + >>> ax.hist(res.bootstrap_distribution, bins=25, density=True) + >>> ax.plot(x, pdf) + >>> ax.set_title('Normal Approximation of the Bootstrap Distribution') + >>> ax.set_xlabel('statistic value') + >>> ax.set_ylabel('pdf') + >>> plt.show() + + This suggests that we could construct a 90% confidence interval on the + statistic based on quantiles of this normal distribution. + + >>> norm.interval(0.9, loc=std_sample, scale=res.standard_error) + (3.5442759991341726, 4.3478528599786) + + Due to central limit theorem, this normal approximation is accurate for a + variety of statistics and distributions underlying the samples; however, + the approximation is not reliable in all cases. Because `bootstrap` is + designed to work with arbitrary underlying distributions and statistics, + it uses more advanced techniques to generate an accurate confidence + interval. + + >>> print(res.confidence_interval) + ConfidenceInterval(low=3.57655333533867, high=4.382043696342881) + + If we sample from the original distribution 100 times and form a bootstrap + confidence interval for each sample, the confidence interval + contains the true value of the statistic approximately 90% of the time. + + >>> n_trials = 100 + >>> ci_contains_true_std = 0 + >>> for i in range(n_trials): + ... data = (dist.rvs(size=100, random_state=rng),) + ... res = bootstrap(data, np.std, confidence_level=0.9, + ... n_resamples=999, random_state=rng) + ... ci = res.confidence_interval + ... if ci[0] < std_true < ci[1]: + ... ci_contains_true_std += 1 + >>> print(ci_contains_true_std) + 88 + + Rather than writing a loop, we can also determine the confidence intervals + for all 100 samples at once. + + >>> data = (dist.rvs(size=(n_trials, 100), random_state=rng),) + >>> res = bootstrap(data, np.std, axis=-1, confidence_level=0.9, + ... n_resamples=999, random_state=rng) + >>> ci_l, ci_u = res.confidence_interval + + Here, `ci_l` and `ci_u` contain the confidence interval for each of the + ``n_trials = 100`` samples. + + >>> print(ci_l[:5]) + [3.86401283 3.33304394 3.52474647 3.54160981 3.80569252] + >>> print(ci_u[:5]) + [4.80217409 4.18143252 4.39734707 4.37549713 4.72843584] + + And again, approximately 90% contain the true value, ``std_true = 4``. + + >>> print(np.sum((ci_l < std_true) & (std_true < ci_u))) + 93 + + `bootstrap` can also be used to estimate confidence intervals of + multi-sample statistics. For example, to get a confidence interval + for the difference between means, we write a function that accepts + two sample arguments and returns only the statistic. The use of the + ``axis`` argument ensures that all mean calculations are perform in + a single vectorized call, which is faster than looping over pairs + of resamples in Python. + + >>> def my_statistic(sample1, sample2, axis=-1): + ... mean1 = np.mean(sample1, axis=axis) + ... mean2 = np.mean(sample2, axis=axis) + ... return mean1 - mean2 + + Here, we use the 'percentile' method with the default 95% confidence level. + + >>> sample1 = norm.rvs(scale=1, size=100, random_state=rng) + >>> sample2 = norm.rvs(scale=2, size=100, random_state=rng) + >>> data = (sample1, sample2) + >>> res = bootstrap(data, my_statistic, method='basic', random_state=rng) + >>> print(my_statistic(sample1, sample2)) + 0.16661030792089523 + >>> print(res.confidence_interval) + ConfidenceInterval(low=-0.29087973240818693, high=0.6371338699912273) + + The bootstrap estimate of the standard error is also available. + + >>> print(res.standard_error) + 0.238323948262459 + + Paired-sample statistics work, too. For example, consider the Pearson + correlation coefficient. + + >>> from scipy.stats import pearsonr + >>> n = 100 + >>> x = np.linspace(0, 10, n) + >>> y = x + rng.uniform(size=n) + >>> print(pearsonr(x, y)[0]) # element 0 is the statistic + 0.9954306665125647 + + We wrap `pearsonr` so that it returns only the statistic, ensuring + that we use the `axis` argument because it is available. + + >>> def my_statistic(x, y, axis=-1): + ... return pearsonr(x, y, axis=axis)[0] + + We call `bootstrap` using ``paired=True``. + + >>> res = bootstrap((x, y), my_statistic, paired=True, random_state=rng) + >>> print(res.confidence_interval) + ConfidenceInterval(low=0.9941504301315878, high=0.996377412215445) + + The result object can be passed back into `bootstrap` to perform additional + resampling: + + >>> len(res.bootstrap_distribution) + 9999 + >>> res = bootstrap((x, y), my_statistic, paired=True, + ... n_resamples=1000, random_state=rng, + ... bootstrap_result=res) + >>> len(res.bootstrap_distribution) + 10999 + + or to change the confidence interval options: + + >>> res2 = bootstrap((x, y), my_statistic, paired=True, + ... n_resamples=0, random_state=rng, bootstrap_result=res, + ... method='percentile', confidence_level=0.9) + >>> np.testing.assert_equal(res2.bootstrap_distribution, + ... res.bootstrap_distribution) + >>> res.confidence_interval + ConfidenceInterval(low=0.9941574828235082, high=0.9963781698210212) + + without repeating computation of the original bootstrap distribution. + + """ + # Input validation + args = _bootstrap_iv(data, statistic, vectorized, paired, axis, + confidence_level, alternative, n_resamples, batch, + method, bootstrap_result, random_state) + (data, statistic, vectorized, paired, axis, confidence_level, + alternative, n_resamples, batch, method, bootstrap_result, + random_state) = args + + theta_hat_b = ([] if bootstrap_result is None + else [bootstrap_result.bootstrap_distribution]) + + batch_nominal = batch or n_resamples or 1 + + for k in range(0, n_resamples, batch_nominal): + batch_actual = min(batch_nominal, n_resamples-k) + # Generate resamples + resampled_data = [] + for sample in data: + resample = _bootstrap_resample(sample, n_resamples=batch_actual, + random_state=random_state) + resampled_data.append(resample) + + # Compute bootstrap distribution of statistic + theta_hat_b.append(statistic(*resampled_data, axis=-1)) + theta_hat_b = np.concatenate(theta_hat_b, axis=-1) + + # Calculate percentile interval + alpha = ((1 - confidence_level)/2 if alternative == 'two-sided' + else (1 - confidence_level)) + if method == 'bca': + interval = _bca_interval(data, statistic, axis=-1, alpha=alpha, + theta_hat_b=theta_hat_b, batch=batch)[:2] + percentile_fun = _percentile_along_axis + else: + interval = alpha, 1-alpha + + def percentile_fun(a, q): + return np.percentile(a=a, q=q, axis=-1) + + # Calculate confidence interval of statistic + ci_l = percentile_fun(theta_hat_b, interval[0]*100) + ci_u = percentile_fun(theta_hat_b, interval[1]*100) + if method == 'basic': # see [3] + theta_hat = statistic(*data, axis=-1) + ci_l, ci_u = 2*theta_hat - ci_u, 2*theta_hat - ci_l + + if alternative == 'less': + ci_l = np.full_like(ci_l, -np.inf) + elif alternative == 'greater': + ci_u = np.full_like(ci_u, np.inf) + + return BootstrapResult(confidence_interval=ConfidenceInterval(ci_l, ci_u), + bootstrap_distribution=theta_hat_b, + standard_error=np.std(theta_hat_b, ddof=1, axis=-1)) + + +def _monte_carlo_test_iv(data, rvs, statistic, vectorized, n_resamples, + batch, alternative, axis): + """Input validation for `monte_carlo_test`.""" + axis_int = int(axis) + if axis != axis_int: + raise ValueError("`axis` must be an integer.") + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if not isinstance(rvs, Sequence): + rvs = (rvs,) + data = (data,) + for rvs_i in rvs: + if not callable(rvs_i): + raise TypeError("`rvs` must be callable or sequence of callables.") + + # At this point, `data` should be a sequence + # If it isn't, the user passed a sequence for `rvs` but not `data` + message = "If `rvs` is a sequence, `len(rvs)` must equal `len(data)`." + try: + len(data) + except TypeError as e: + raise ValueError(message) from e + if not len(rvs) == len(data): + raise ValueError(message) + + if not callable(statistic): + raise TypeError("`statistic` must be callable.") + + if vectorized is None: + try: + signature = inspect.signature(statistic).parameters + except ValueError as e: + message = (f"Signature inspection of {statistic=} failed; " + "pass `vectorize` explicitly.") + raise ValueError(message) from e + vectorized = 'axis' in signature + + xp = array_namespace(*data) + + if not vectorized: + if is_numpy(xp): + statistic_vectorized = _vectorize_statistic(statistic) + else: + message = ("`statistic` must be vectorized (i.e. support an `axis` " + f"argument) when `data` contains {xp.__name__} arrays.") + raise ValueError(message) + else: + statistic_vectorized = statistic + + data = _broadcast_arrays(data, axis, xp=xp) + data_iv = [] + for sample in data: + sample = xp.broadcast_to(sample, (1,)) if sample.ndim == 0 else sample + sample = xp_moveaxis_to_end(sample, axis_int, xp=xp) + data_iv.append(sample) + + n_resamples_int = int(n_resamples) + if n_resamples != n_resamples_int or n_resamples_int <= 0: + raise ValueError("`n_resamples` must be a positive integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + alternatives = {'two-sided', 'greater', 'less'} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f"`alternative` must be in {alternatives}") + + # Infer the desired p-value dtype based on the input types + min_float = getattr(xp, 'float16', xp.float32) + dtype = xp.result_type(*data_iv, min_float) + + return (data_iv, rvs, statistic_vectorized, vectorized, n_resamples_int, + batch_iv, alternative, axis_int, dtype, xp) + + +@dataclass +class MonteCarloTestResult: + """Result object returned by `scipy.stats.monte_carlo_test`. + + Attributes + ---------- + statistic : float or ndarray + The observed test statistic of the sample. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + """ + statistic: float | np.ndarray + pvalue: float | np.ndarray + null_distribution: np.ndarray + + +@_rename_parameter('sample', 'data') +def monte_carlo_test(data, rvs, statistic, *, vectorized=None, + n_resamples=9999, batch=None, alternative="two-sided", + axis=0): + r"""Perform a Monte Carlo hypothesis test. + + `data` contains a sample or a sequence of one or more samples. `rvs` + specifies the distribution(s) of the sample(s) in `data` under the null + hypothesis. The value of `statistic` for the given `data` is compared + against a Monte Carlo null distribution: the value of the statistic for + each of `n_resamples` sets of samples generated using `rvs`. This gives + the p-value, the probability of observing such an extreme value of the + test statistic under the null hypothesis. + + Parameters + ---------- + data : array-like or sequence of array-like + An array or sequence of arrays of observations. + rvs : callable or tuple of callables + A callable or sequence of callables that generates random variates + under the null hypothesis. Each element of `rvs` must be a callable + that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and + returns an N-d array sample of that shape. If `rvs` is a sequence, the + number of callables in `rvs` must match the number of samples in + `data`, i.e. ``len(rvs) == len(data)``. If `rvs` is a single callable, + `data` is treated as a single sample. + statistic : callable + Statistic for which the p-value of the hypothesis test is to be + calculated. `statistic` must be a callable that accepts a sample + (e.g. ``statistic(sample)``) or ``len(rvs)`` separate samples (e.g. + ``statistic(samples1, sample2)`` if `rvs` contains two callables and + `data` contains two samples) and returns the resulting statistic. + If `vectorized` is set ``True``, `statistic` must also accept a keyword + argument `axis` and be vectorized to compute the statistic along the + provided `axis` of the samples in `data`. + vectorized : bool, optional + If `vectorized` is set ``False``, `statistic` will not be passed + keyword argument `axis` and is expected to calculate the statistic + only for 1D samples. If ``True``, `statistic` will be passed keyword + argument `axis` and is expected to calculate the statistic along `axis` + when passed ND sample arrays. If ``None`` (default), `vectorized` + will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of + a vectorized statistic typically reduces computation time. + n_resamples : int, default: 9999 + Number of samples drawn from each of the callables of `rvs`. + Equivalently, the number statistic values under the null hypothesis + used as the Monte Carlo null distribution. + batch : int, optional + The number of Monte Carlo samples to process in each call to + `statistic`. Memory usage is O( `batch` * ``sample.size[axis]`` ). Default + is ``None``, in which case `batch` equals `n_resamples`. + alternative : {'two-sided', 'less', 'greater'} + The alternative hypothesis for which the p-value is calculated. + For each alternative, the p-value is defined as follows. + + - ``'greater'`` : the percentage of the null distribution that is + greater than or equal to the observed value of the test statistic. + - ``'less'`` : the percentage of the null distribution that is + less than or equal to the observed value of the test statistic. + - ``'two-sided'`` : twice the smaller of the p-values above. + + axis : int, default: 0 + The axis of `data` (or each sample within `data`) over which to + calculate the statistic. + + Returns + ------- + res : MonteCarloTestResult + An object with attributes: + + statistic : float or ndarray + The test statistic of the observed `data`. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + + .. warning:: + The p-value is calculated by counting the elements of the null + distribution that are as extreme or more extreme than the observed + value of the statistic. Due to the use of finite precision arithmetic, + some statistic functions return numerically distinct values when the + theoretical values would be exactly equal. In some cases, this could + lead to a large error in the calculated p-value. `monte_carlo_test` + guards against this by considering elements in the null distribution + that are "close" (within a relative tolerance of 100 times the + floating point epsilon of inexact dtypes) to the observed + value of the test statistic as equal to the observed value of the + test statistic. However, the user is advised to inspect the null + distribution to assess whether this method of comparison is + appropriate, and if not, calculate the p-value manually. + + References + ---------- + + .. [1] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." + Statistical Applications in Genetics and Molecular Biology 9.1 (2010). + + Examples + -------- + + Suppose we wish to test whether a small sample has been drawn from a normal + distribution. We decide that we will use the skew of the sample as a + test statistic, and we will consider a p-value of 0.05 to be statistically + significant. + + >>> import numpy as np + >>> from scipy import stats + >>> def statistic(x, axis): + ... return stats.skew(x, axis) + + After collecting our data, we calculate the observed value of the test + statistic. + + >>> rng = np.random.default_rng() + >>> x = stats.skewnorm.rvs(a=1, size=50, random_state=rng) + >>> statistic(x, axis=0) + 0.12457412450240658 + + To determine the probability of observing such an extreme value of the + skewness by chance if the sample were drawn from the normal distribution, + we can perform a Monte Carlo hypothesis test. The test will draw many + samples at random from their normal distribution, calculate the skewness + of each sample, and compare our original skewness against this + distribution to determine an approximate p-value. + + >>> from scipy.stats import monte_carlo_test + >>> # because our statistic is vectorized, we pass `vectorized=True` + >>> rvs = lambda size: stats.norm.rvs(size=size, random_state=rng) + >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True) + >>> print(res.statistic) + 0.12457412450240658 + >>> print(res.pvalue) + 0.7012 + + The probability of obtaining a test statistic less than or equal to the + observed value under the null hypothesis is ~70%. This is greater than + our chosen threshold of 5%, so we cannot consider this to be significant + evidence against the null hypothesis. + + Note that this p-value essentially matches that of + `scipy.stats.skewtest`, which relies on an asymptotic distribution of a + test statistic based on the sample skewness. + + >>> stats.skewtest(x).pvalue + 0.6892046027110614 + + This asymptotic approximation is not valid for small sample sizes, but + `monte_carlo_test` can be used with samples of any size. + + >>> x = stats.skewnorm.rvs(a=1, size=7, random_state=rng) + >>> # stats.skewtest(x) would produce an error due to small sample + >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True) + + The Monte Carlo distribution of the test statistic is provided for + further investigation. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.hist(res.null_distribution, bins=50) + >>> ax.set_title("Monte Carlo distribution of test statistic") + >>> ax.set_xlabel("Value of Statistic") + >>> ax.set_ylabel("Frequency") + >>> plt.show() + + """ + args = _monte_carlo_test_iv(data, rvs, statistic, vectorized, + n_resamples, batch, alternative, axis) + (data, rvs, statistic, vectorized, n_resamples, + batch, alternative, axis, dtype, xp) = args + + # Some statistics return plain floats; ensure they're at least a NumPy float + observed = xp.asarray(statistic(*data, axis=-1)) + observed = observed[()] if observed.ndim == 0 else observed + + n_observations = [sample.shape[-1] for sample in data] + batch_nominal = batch or n_resamples + null_distribution = [] + for k in range(0, n_resamples, batch_nominal): + batch_actual = min(batch_nominal, n_resamples - k) + resamples = [rvs_i(size=(batch_actual, n_observations_i)) + for rvs_i, n_observations_i in zip(rvs, n_observations)] + null_distribution.append(statistic(*resamples, axis=-1)) + null_distribution = xp.concat(null_distribution) + null_distribution = xp.reshape(null_distribution, [-1] + [1]*observed.ndim) + + # relative tolerance for detecting numerically distinct but + # theoretically equal values in the null distribution + eps = (0 if not xp.isdtype(observed.dtype, ('real floating')) + else xp.finfo(observed.dtype).eps*100) + gamma = xp.abs(eps * observed) + + def less(null_distribution, observed): + cmps = null_distribution <= observed + gamma + cmps = xp.asarray(cmps, dtype=dtype) + pvalues = (xp.sum(cmps, axis=0, dtype=dtype) + 1.) / (n_resamples + 1.) + return pvalues + + def greater(null_distribution, observed): + cmps = null_distribution >= observed - gamma + cmps = xp.asarray(cmps, dtype=dtype) + pvalues = (xp.sum(cmps, axis=0, dtype=dtype) + 1.) / (n_resamples + 1.) + return pvalues + + def two_sided(null_distribution, observed): + pvalues_less = less(null_distribution, observed) + pvalues_greater = greater(null_distribution, observed) + pvalues = xp_minimum(pvalues_less, pvalues_greater) * 2 + return pvalues + + compare = {"less": less, + "greater": greater, + "two-sided": two_sided} + + pvalues = compare[alternative](null_distribution, observed) + pvalues = xp_clip(pvalues, 0., 1., xp=xp) + + return MonteCarloTestResult(observed, pvalues, null_distribution) + + +@dataclass +class PowerResult: + """Result object returned by `scipy.stats.power`. + + Attributes + ---------- + power : float or ndarray + The estimated power. + pvalues : float or ndarray + The simulated p-values. + """ + power: float | np.ndarray + pvalues: float | np.ndarray + + +def _wrap_kwargs(fun): + """Wrap callable to accept arbitrary kwargs and ignore unused ones""" + + try: + keys = set(inspect.signature(fun).parameters.keys()) + except ValueError: + # NumPy Generator methods can't be inspected + keys = {'size'} + + # Set keys=keys/fun=fun to avoid late binding gotcha + def wrapped_rvs_i(*args, keys=keys, fun=fun, **all_kwargs): + kwargs = {key: val for key, val in all_kwargs.items() + if key in keys} + return fun(*args, **kwargs) + return wrapped_rvs_i + + +def _power_iv(rvs, test, n_observations, significance, vectorized, + n_resamples, batch, kwargs): + """Input validation for `monte_carlo_test`.""" + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if not isinstance(rvs, Sequence): + rvs = (rvs,) + n_observations = (n_observations,) + for rvs_i in rvs: + if not callable(rvs_i): + raise TypeError("`rvs` must be callable or sequence of callables.") + + if not len(rvs) == len(n_observations): + message = ("If `rvs` is a sequence, `len(rvs)` " + "must equal `len(n_observations)`.") + raise ValueError(message) + + significance = np.asarray(significance)[()] + if (not np.issubdtype(significance.dtype, np.floating) + or np.min(significance) < 0 or np.max(significance) > 1): + raise ValueError("`significance` must contain floats between 0 and 1.") + + kwargs = dict() if kwargs is None else kwargs + if not isinstance(kwargs, dict): + raise TypeError("`kwargs` must be a dictionary that maps keywords to arrays.") + + vals = kwargs.values() + keys = kwargs.keys() + + # Wrap callables to ignore unused keyword arguments + wrapped_rvs = [_wrap_kwargs(rvs_i) for rvs_i in rvs] + + # Broadcast, then ravel nobs/kwarg combinations. In the end, + # `nobs` and `vals` have shape (# of combinations, number of variables) + tmp = np.asarray(np.broadcast_arrays(*n_observations, *vals)) + shape = tmp.shape + if tmp.ndim == 1: + tmp = tmp[np.newaxis, :] + else: + tmp = tmp.reshape((shape[0], -1)).T + nobs, vals = tmp[:, :len(rvs)], tmp[:, len(rvs):] + nobs = nobs.astype(int) + + if not callable(test): + raise TypeError("`test` must be callable.") + + if vectorized is None: + vectorized = 'axis' in inspect.signature(test).parameters + + if not vectorized: + test_vectorized = _vectorize_statistic(test) + else: + test_vectorized = test + # Wrap `test` function to ignore unused kwargs + test_vectorized = _wrap_kwargs(test_vectorized) + + n_resamples_int = int(n_resamples) + if n_resamples != n_resamples_int or n_resamples_int <= 0: + raise ValueError("`n_resamples` must be a positive integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + return (wrapped_rvs, test_vectorized, nobs, significance, vectorized, + n_resamples_int, batch_iv, vals, keys, shape[1:]) + + +def power(test, rvs, n_observations, *, significance=0.01, vectorized=None, + n_resamples=10000, batch=None, kwargs=None): + r"""Simulate the power of a hypothesis test under an alternative hypothesis. + + Parameters + ---------- + test : callable + Hypothesis test for which the power is to be simulated. + `test` must be a callable that accepts a sample (e.g. ``test(sample)``) + or ``len(rvs)`` separate samples (e.g. ``test(samples1, sample2)`` if + `rvs` contains two callables and `n_observations` contains two values) + and returns the p-value of the test. + If `vectorized` is set to ``True``, `test` must also accept a keyword + argument `axis` and be vectorized to perform the test along the + provided `axis` of the samples. + Any callable from `scipy.stats` with an `axis` argument that returns an + object with a `pvalue` attribute is also acceptable. + rvs : callable or tuple of callables + A callable or sequence of callables that generate(s) random variates + under the alternative hypothesis. Each element of `rvs` must accept + keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and return an + N-d array of that shape. If `rvs` is a sequence, the number of callables + in `rvs` must match the number of elements of `n_observations`, i.e. + ``len(rvs) == len(n_observations)``. If `rvs` is a single callable, + `n_observations` is treated as a single element. + n_observations : tuple of ints or tuple of integer arrays + If a sequence of ints, each is the sizes of a sample to be passed to `test`. + If a sequence of integer arrays, the power is simulated for each + set of corresponding sample sizes. See Examples. + significance : float or array_like of floats, default: 0.01 + The threshold for significance; i.e., the p-value below which the + hypothesis test results will be considered as evidence against the null + hypothesis. Equivalently, the acceptable rate of Type I error under + the null hypothesis. If an array, the power is simulated for each + significance threshold. + kwargs : dict, optional + Keyword arguments to be passed to `rvs` and/or `test` callables. + Introspection is used to determine which keyword arguments may be + passed to each callable. + The value corresponding with each keyword must be an array. + Arrays must be broadcastable with one another and with each array in + `n_observations`. The power is simulated for each set of corresponding + sample sizes and arguments. See Examples. + vectorized : bool, optional + If `vectorized` is set to ``False``, `test` will not be passed keyword + argument `axis` and is expected to perform the test only for 1D samples. + If ``True``, `test` will be passed keyword argument `axis` and is + expected to perform the test along `axis` when passed N-D sample arrays. + If ``None`` (default), `vectorized` will be set ``True`` if ``axis`` is + a parameter of `test`. Use of a vectorized test typically reduces + computation time. + n_resamples : int, default: 10000 + Number of samples drawn from each of the callables of `rvs`. + Equivalently, the number tests performed under the alternative + hypothesis to approximate the power. + batch : int, optional + The number of samples to process in each call to `test`. Memory usage is + proportional to the product of `batch` and the largest sample size. Default + is ``None``, in which case `batch` equals `n_resamples`. + + Returns + ------- + res : PowerResult + An object with attributes: + + power : float or ndarray + The estimated power against the alternative. + pvalues : ndarray + The p-values observed under the alternative hypothesis. + + Notes + ----- + The power is simulated as follows: + + - Draw many random samples (or sets of samples), each of the size(s) + specified by `n_observations`, under the alternative specified by + `rvs`. + - For each sample (or set of samples), compute the p-value according to + `test`. These p-values are recorded in the ``pvalues`` attribute of + the result object. + - Compute the proportion of p-values that are less than the `significance` + level. This is the power recorded in the ``power`` attribute of the + result object. + + Suppose that `significance` is an array with shape ``shape1``, the elements + of `kwargs` and `n_observations` are mutually broadcastable to shape ``shape2``, + and `test` returns an array of p-values of shape ``shape3``. Then the result + object ``power`` attribute will be of shape ``shape1 + shape2 + shape3``, and + the ``pvalues`` attribute will be of shape ``shape2 + shape3 + (n_resamples,)``. + + Examples + -------- + Suppose we wish to simulate the power of the independent sample t-test + under the following conditions: + + - The first sample has 10 observations drawn from a normal distribution + with mean 0. + - The second sample has 12 observations drawn from a normal distribution + with mean 1.0. + - The threshold on p-values for significance is 0.05. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng(2549598345528) + >>> + >>> test = stats.ttest_ind + >>> n_observations = (10, 12) + >>> rvs1 = rng.normal + >>> rvs2 = lambda size: rng.normal(loc=1, size=size) + >>> rvs = (rvs1, rvs2) + >>> res = stats.power(test, rvs, n_observations, significance=0.05) + >>> res.power + 0.6116 + + With samples of size 10 and 12, respectively, the power of the t-test + with a significance threshold of 0.05 is approximately 60% under the chosen + alternative. We can investigate the effect of sample size on the power + by passing sample size arrays. + + >>> import matplotlib.pyplot as plt + >>> nobs_x = np.arange(5, 21) + >>> nobs_y = nobs_x + >>> n_observations = (nobs_x, nobs_y) + >>> res = stats.power(test, rvs, n_observations, significance=0.05) + >>> ax = plt.subplot() + >>> ax.plot(nobs_x, res.power) + >>> ax.set_xlabel('Sample Size') + >>> ax.set_ylabel('Simulated Power') + >>> ax.set_title('Simulated Power of `ttest_ind` with Equal Sample Sizes') + >>> plt.show() + + Alternatively, we can investigate the impact that effect size has on the power. + In this case, the effect size is the location of the distribution underlying + the second sample. + + >>> n_observations = (10, 12) + >>> loc = np.linspace(0, 1, 20) + >>> rvs2 = lambda size, loc: rng.normal(loc=loc, size=size) + >>> rvs = (rvs1, rvs2) + >>> res = stats.power(test, rvs, n_observations, significance=0.05, + ... kwargs={'loc': loc}) + >>> ax = plt.subplot() + >>> ax.plot(loc, res.power) + >>> ax.set_xlabel('Effect Size') + >>> ax.set_ylabel('Simulated Power') + >>> ax.set_title('Simulated Power of `ttest_ind`, Varying Effect Size') + >>> plt.show() + + We can also use `power` to estimate the Type I error rate (also referred to by the + ambiguous term "size") of a test and assess whether it matches the nominal level. + For example, the null hypothesis of `jarque_bera` is that the sample was drawn from + a distribution with the same skewness and kurtosis as the normal distribution. To + estimate the Type I error rate, we can consider the null hypothesis to be a true + *alternative* hypothesis and calculate the power. + + >>> test = stats.jarque_bera + >>> n_observations = 10 + >>> rvs = rng.normal + >>> significance = np.linspace(0.0001, 0.1, 1000) + >>> res = stats.power(test, rvs, n_observations, significance=significance) + >>> size = res.power + + As shown below, the Type I error rate of the test is far below the nominal level + for such a small sample, as mentioned in its documentation. + + >>> ax = plt.subplot() + >>> ax.plot(significance, size) + >>> ax.plot([0, 0.1], [0, 0.1], '--') + >>> ax.set_xlabel('nominal significance level') + >>> ax.set_ylabel('estimated test size (Type I error rate)') + >>> ax.set_title('Estimated test size vs nominal significance level') + >>> ax.set_aspect('equal', 'box') + >>> ax.legend(('`ttest_1samp`', 'ideal test')) + >>> plt.show() + + As one might expect from such a conservative test, the power is quite low with + respect to some alternatives. For example, the power of the test under the + alternative that the sample was drawn from the Laplace distribution may not + be much greater than the Type I error rate. + + >>> rvs = rng.laplace + >>> significance = np.linspace(0.0001, 0.1, 1000) + >>> res = stats.power(test, rvs, n_observations, significance=0.05) + >>> print(res.power) + 0.0587 + + This is not a mistake in SciPy's implementation; it is simply due to the fact + that the null distribution of the test statistic is derived under the assumption + that the sample size is large (i.e. approaches infinity), and this asymptotic + approximation is not accurate for small samples. In such cases, resampling + and Monte Carlo methods (e.g. `permutation_test`, `goodness_of_fit`, + `monte_carlo_test`) may be more appropriate. + + """ + tmp = _power_iv(rvs, test, n_observations, significance, + vectorized, n_resamples, batch, kwargs) + (rvs, test, nobs, significance, + vectorized, n_resamples, batch, args, kwds, shape)= tmp + + batch_nominal = batch or n_resamples + pvalues = [] # results of various nobs/kwargs combinations + for nobs_i, args_i in zip(nobs, args): + kwargs_i = dict(zip(kwds, args_i)) + pvalues_i = [] # results of batches; fixed nobs/kwargs combination + for k in range(0, n_resamples, batch_nominal): + batch_actual = min(batch_nominal, n_resamples - k) + resamples = [rvs_j(size=(batch_actual, nobs_ij), **kwargs_i) + for rvs_j, nobs_ij in zip(rvs, nobs_i)] + res = test(*resamples, **kwargs_i, axis=-1) + p = getattr(res, 'pvalue', res) + pvalues_i.append(p) + # Concatenate results from batches + pvalues_i = np.concatenate(pvalues_i, axis=-1) + pvalues.append(pvalues_i) + # `test` can return result with array of p-values + shape += pvalues_i.shape[:-1] + # Concatenate results from various nobs/kwargs combinations + pvalues = np.concatenate(pvalues, axis=0) + # nobs/kwargs arrays were raveled to single axis; unravel + pvalues = pvalues.reshape(shape + (-1,)) + if significance.ndim > 0: + newdims = tuple(range(significance.ndim, pvalues.ndim + significance.ndim)) + significance = np.expand_dims(significance, newdims) + powers = np.mean(pvalues < significance, axis=-1) + + return PowerResult(power=powers, pvalues=pvalues) + + +@dataclass +class PermutationTestResult: + """Result object returned by `scipy.stats.permutation_test`. + + Attributes + ---------- + statistic : float or ndarray + The observed test statistic of the data. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + """ + statistic: float | np.ndarray + pvalue: float | np.ndarray + null_distribution: np.ndarray + + +def _all_partitions_concatenated(ns): + """ + Generate all partitions of indices of groups of given sizes, concatenated + + `ns` is an iterable of ints. + """ + def all_partitions(z, n): + for c in combinations(z, n): + x0 = set(c) + x1 = z - x0 + yield [x0, x1] + + def all_partitions_n(z, ns): + if len(ns) == 0: + yield [z] + return + for c in all_partitions(z, ns[0]): + for d in all_partitions_n(c[1], ns[1:]): + yield c[0:1] + d + + z = set(range(np.sum(ns))) + for partitioning in all_partitions_n(z, ns[:]): + x = np.concatenate([list(partition) + for partition in partitioning]).astype(int) + yield x + + +def _batch_generator(iterable, batch): + """A generator that yields batches of elements from an iterable""" + iterator = iter(iterable) + if batch <= 0: + raise ValueError("`batch` must be positive.") + z = [item for i, item in zip(range(batch), iterator)] + while z: # we don't want StopIteration without yielding an empty list + yield z + z = [item for i, item in zip(range(batch), iterator)] + + +def _pairings_permutations_gen(n_permutations, n_samples, n_obs_sample, batch, + random_state): + # Returns a generator that yields arrays of size + # `(batch, n_samples, n_obs_sample)`. + # Each row is an independent permutation of indices 0 to `n_obs_sample`. + batch = min(batch, n_permutations) + + if hasattr(random_state, 'permuted'): + def batched_perm_generator(): + indices = np.arange(n_obs_sample) + indices = np.tile(indices, (batch, n_samples, 1)) + for k in range(0, n_permutations, batch): + batch_actual = min(batch, n_permutations-k) + # Don't permute in place, otherwise results depend on `batch` + permuted_indices = random_state.permuted(indices, axis=-1) + yield permuted_indices[:batch_actual] + else: # RandomState and early Generators don't have `permuted` + def batched_perm_generator(): + for k in range(0, n_permutations, batch): + batch_actual = min(batch, n_permutations-k) + size = (batch_actual, n_samples, n_obs_sample) + x = random_state.random(size=size) + yield np.argsort(x, axis=-1)[:batch_actual] + + return batched_perm_generator() + + +def _calculate_null_both(data, statistic, n_permutations, batch, + random_state=None): + """ + Calculate null distribution for independent sample tests. + """ + n_samples = len(data) + + # compute number of permutations + # (distinct partitions of data into samples of these sizes) + n_obs_i = [sample.shape[-1] for sample in data] # observations per sample + n_obs_ic = np.cumsum(n_obs_i) + n_obs = n_obs_ic[-1] # total number of observations + n_max = np.prod([comb(n_obs_ic[i], n_obs_ic[i-1]) + for i in range(n_samples-1, 0, -1)]) + + # perm_generator is an iterator that produces permutations of indices + # from 0 to n_obs. We'll concatenate the samples, use these indices to + # permute the data, then split the samples apart again. + if n_permutations >= n_max: + exact_test = True + n_permutations = n_max + perm_generator = _all_partitions_concatenated(n_obs_i) + else: + exact_test = False + # Neither RandomState.permutation nor Generator.permutation + # can permute axis-slices independently. If this feature is + # added in the future, batches of the desired size should be + # generated in a single call. + perm_generator = (random_state.permutation(n_obs) + for i in range(n_permutations)) + + batch = batch or int(n_permutations) + null_distribution = [] + + # First, concatenate all the samples. In batches, permute samples with + # indices produced by the `perm_generator`, split them into new samples of + # the original sizes, compute the statistic for each batch, and add these + # statistic values to the null distribution. + data = np.concatenate(data, axis=-1) + for indices in _batch_generator(perm_generator, batch=batch): + indices = np.array(indices) + + # `indices` is 2D: each row is a permutation of the indices. + # We use it to index `data` along its last axis, which corresponds + # with observations. + # After indexing, the second to last axis of `data_batch` corresponds + # with permutations, and the last axis corresponds with observations. + data_batch = data[..., indices] + + # Move the permutation axis to the front: we'll concatenate a list + # of batched statistic values along this zeroth axis to form the + # null distribution. + data_batch = np.moveaxis(data_batch, -2, 0) + data_batch = np.split(data_batch, n_obs_ic[:-1], axis=-1) + null_distribution.append(statistic(*data_batch, axis=-1)) + null_distribution = np.concatenate(null_distribution, axis=0) + + return null_distribution, n_permutations, exact_test + + +def _calculate_null_pairings(data, statistic, n_permutations, batch, + random_state=None): + """ + Calculate null distribution for association tests. + """ + n_samples = len(data) + + # compute number of permutations (factorial(n) permutations of each sample) + n_obs_sample = data[0].shape[-1] # observations per sample; same for each + n_max = factorial(n_obs_sample)**n_samples + + # `perm_generator` is an iterator that produces a list of permutations of + # indices from 0 to n_obs_sample, one for each sample. + if n_permutations >= n_max: + exact_test = True + n_permutations = n_max + batch = batch or int(n_permutations) + # cartesian product of the sets of all permutations of indices + perm_generator = product(*(permutations(range(n_obs_sample)) + for i in range(n_samples))) + batched_perm_generator = _batch_generator(perm_generator, batch=batch) + else: + exact_test = False + batch = batch or int(n_permutations) + # Separate random permutations of indices for each sample. + # Again, it would be nice if RandomState/Generator.permutation + # could permute each axis-slice separately. + args = n_permutations, n_samples, n_obs_sample, batch, random_state + batched_perm_generator = _pairings_permutations_gen(*args) + + null_distribution = [] + + for indices in batched_perm_generator: + indices = np.array(indices) + + # `indices` is 3D: the zeroth axis is for permutations, the next is + # for samples, and the last is for observations. Swap the first two + # to make the zeroth axis correspond with samples, as it does for + # `data`. + indices = np.swapaxes(indices, 0, 1) + + # When we're done, `data_batch` will be a list of length `n_samples`. + # Each element will be a batch of random permutations of one sample. + # The zeroth axis of each batch will correspond with permutations, + # and the last will correspond with observations. (This makes it + # easy to pass into `statistic`.) + data_batch = [None]*n_samples + for i in range(n_samples): + data_batch[i] = data[i][..., indices[i]] + data_batch[i] = np.moveaxis(data_batch[i], -2, 0) + + null_distribution.append(statistic(*data_batch, axis=-1)) + null_distribution = np.concatenate(null_distribution, axis=0) + + return null_distribution, n_permutations, exact_test + + +def _calculate_null_samples(data, statistic, n_permutations, batch, + random_state=None): + """ + Calculate null distribution for paired-sample tests. + """ + n_samples = len(data) + + # By convention, the meaning of the "samples" permutations type for + # data with only one sample is to flip the sign of the observations. + # Achieve this by adding a second sample - the negative of the original. + if n_samples == 1: + data = [data[0], -data[0]] + + # The "samples" permutation strategy is the same as the "pairings" + # strategy except the roles of samples and observations are flipped. + # So swap these axes, then we'll use the function for the "pairings" + # strategy to do all the work! + data = np.swapaxes(data, 0, -1) + + # (Of course, the user's statistic doesn't know what we've done here, + # so we need to pass it what it's expecting.) + def statistic_wrapped(*data, axis): + data = np.swapaxes(data, 0, -1) + if n_samples == 1: + data = data[0:1] + return statistic(*data, axis=axis) + + return _calculate_null_pairings(data, statistic_wrapped, n_permutations, + batch, random_state) + + +def _permutation_test_iv(data, statistic, permutation_type, vectorized, + n_resamples, batch, alternative, axis, random_state): + """Input validation for `permutation_test`.""" + + axis_int = int(axis) + if axis != axis_int: + raise ValueError("`axis` must be an integer.") + + permutation_types = {'samples', 'pairings', 'independent'} + permutation_type = permutation_type.lower() + if permutation_type not in permutation_types: + raise ValueError(f"`permutation_type` must be in {permutation_types}.") + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if vectorized is None: + vectorized = 'axis' in inspect.signature(statistic).parameters + + if not vectorized: + statistic = _vectorize_statistic(statistic) + + message = "`data` must be a tuple containing at least two samples" + try: + if len(data) < 2 and permutation_type == 'independent': + raise ValueError(message) + except TypeError: + raise TypeError(message) + + data = _broadcast_arrays(data, axis) + data_iv = [] + for sample in data: + sample = np.atleast_1d(sample) + if sample.shape[axis] <= 1: + raise ValueError("each sample in `data` must contain two or more " + "observations along `axis`.") + sample = np.moveaxis(sample, axis_int, -1) + data_iv.append(sample) + + n_resamples_int = (int(n_resamples) if not np.isinf(n_resamples) + else np.inf) + if n_resamples != n_resamples_int or n_resamples_int <= 0: + raise ValueError("`n_resamples` must be a positive integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + alternatives = {'two-sided', 'greater', 'less'} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f"`alternative` must be in {alternatives}") + + random_state = check_random_state(random_state) + + return (data_iv, statistic, permutation_type, vectorized, n_resamples_int, + batch_iv, alternative, axis_int, random_state) + + +def permutation_test(data, statistic, *, permutation_type='independent', + vectorized=None, n_resamples=9999, batch=None, + alternative="two-sided", axis=0, random_state=None): + r""" + Performs a permutation test of a given statistic on provided data. + + For independent sample statistics, the null hypothesis is that the data are + randomly sampled from the same distribution. + For paired sample statistics, two null hypothesis can be tested: + that the data are paired at random or that the data are assigned to samples + at random. + + Parameters + ---------- + data : iterable of array-like + Contains the samples, each of which is an array of observations. + Dimensions of sample arrays must be compatible for broadcasting except + along `axis`. + statistic : callable + Statistic for which the p-value of the hypothesis test is to be + calculated. `statistic` must be a callable that accepts samples + as separate arguments (e.g. ``statistic(*data)``) and returns the + resulting statistic. + If `vectorized` is set ``True``, `statistic` must also accept a keyword + argument `axis` and be vectorized to compute the statistic along the + provided `axis` of the sample arrays. + permutation_type : {'independent', 'samples', 'pairings'}, optional + The type of permutations to be performed, in accordance with the + null hypothesis. The first two permutation types are for paired sample + statistics, in which all samples contain the same number of + observations and observations with corresponding indices along `axis` + are considered to be paired; the third is for independent sample + statistics. + + - ``'samples'`` : observations are assigned to different samples + but remain paired with the same observations from other samples. + This permutation type is appropriate for paired sample hypothesis + tests such as the Wilcoxon signed-rank test and the paired t-test. + - ``'pairings'`` : observations are paired with different observations, + but they remain within the same sample. This permutation type is + appropriate for association/correlation tests with statistics such + as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's + :math:`r`. + - ``'independent'`` (default) : observations are assigned to different + samples. Samples may contain different numbers of observations. This + permutation type is appropriate for independent sample hypothesis + tests such as the Mann-Whitney :math:`U` test and the independent + sample t-test. + + Please see the Notes section below for more detailed descriptions + of the permutation types. + + vectorized : bool, optional + If `vectorized` is set ``False``, `statistic` will not be passed + keyword argument `axis` and is expected to calculate the statistic + only for 1D samples. If ``True``, `statistic` will be passed keyword + argument `axis` and is expected to calculate the statistic along `axis` + when passed an ND sample array. If ``None`` (default), `vectorized` + will be set ``True`` if ``axis`` is a parameter of `statistic`. Use + of a vectorized statistic typically reduces computation time. + n_resamples : int or np.inf, default: 9999 + Number of random permutations (resamples) used to approximate the null + distribution. If greater than or equal to the number of distinct + permutations, the exact null distribution will be computed. + Note that the number of distinct permutations grows very rapidly with + the sizes of samples, so exact tests are feasible only for very small + data sets. + batch : int, optional + The number of permutations to process in each call to `statistic`. + Memory usage is O( `batch` * ``n`` ), where ``n`` is the total size + of all samples, regardless of the value of `vectorized`. Default is + ``None``, in which case ``batch`` is the number of permutations. + alternative : {'two-sided', 'less', 'greater'}, optional + The alternative hypothesis for which the p-value is calculated. + For each alternative, the p-value is defined for exact tests as + follows. + + - ``'greater'`` : the percentage of the null distribution that is + greater than or equal to the observed value of the test statistic. + - ``'less'`` : the percentage of the null distribution that is + less than or equal to the observed value of the test statistic. + - ``'two-sided'`` (default) : twice the smaller of the p-values above. + + Note that p-values for randomized tests are calculated according to the + conservative (over-estimated) approximation suggested in [2]_ and [3]_ + rather than the unbiased estimator suggested in [4]_. That is, when + calculating the proportion of the randomized null distribution that is + as extreme as the observed value of the test statistic, the values in + the numerator and denominator are both increased by one. An + interpretation of this adjustment is that the observed value of the + test statistic is always included as an element of the randomized + null distribution. + The convention used for two-sided p-values is not universal; + the observed test statistic and null distribution are returned in + case a different definition is preferred. + + axis : int, default: 0 + The axis of the (broadcasted) samples over which to calculate the + statistic. If samples have a different number of dimensions, + singleton dimensions are prepended to samples with fewer dimensions + before `axis` is considered. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate permutations. + + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + res : PermutationTestResult + An object with attributes: + + statistic : float or ndarray + The observed test statistic of the data. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + + Notes + ----- + + The three types of permutation tests supported by this function are + described below. + + **Unpaired statistics** (``permutation_type='independent'``): + + The null hypothesis associated with this permutation type is that all + observations are sampled from the same underlying distribution and that + they have been assigned to one of the samples at random. + + Suppose ``data`` contains two samples; e.g. ``a, b = data``. + When ``1 < n_resamples < binom(n, k)``, where + + * ``k`` is the number of observations in ``a``, + * ``n`` is the total number of observations in ``a`` and ``b``, and + * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), + + the data are pooled (concatenated), randomly assigned to either the first + or second sample, and the statistic is calculated. This process is + performed repeatedly, `permutation` times, generating a distribution of the + statistic under the null hypothesis. The statistic of the original + data is compared to this distribution to determine the p-value. + + When ``n_resamples >= binom(n, k)``, an exact test is performed: the data + are *partitioned* between the samples in each distinct way exactly once, + and the exact null distribution is formed. + Note that for a given partitioning of the data between the samples, + only one ordering/permutation of the data *within* each sample is + considered. For statistics that do not depend on the order of the data + within samples, this dramatically reduces computational cost without + affecting the shape of the null distribution (because the frequency/count + of each value is affected by the same factor). + + For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this + permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``. + Because only one ordering/permutation of the data *within* each sample + is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]`` + and ``y = [a4, a3, b1]`` would *not* be considered distinct from the + example above. + + ``permutation_type='independent'`` does not support one-sample statistics, + but it can be applied to statistics with more than two samples. In this + case, if ``n`` is an array of the number of observations within each + sample, the number of distinct partitions is:: + + np.prod([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)]) + + **Paired statistics, permute pairings** (``permutation_type='pairings'``): + + The null hypothesis associated with this permutation type is that + observations within each sample are drawn from the same underlying + distribution and that pairings with elements of other samples are + assigned at random. + + Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we + wish to consider all possible pairings of elements of ``a`` with elements + of a second sample, ``b``. Let ``n`` be the number of observations in + ``a``, which must also equal the number of observations in ``b``. + + When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are + randomly permuted. The user-supplied statistic accepts one data argument, + say ``a_perm``, and calculates the statistic considering ``a_perm`` and + ``b``. This process is performed repeatedly, `permutation` times, + generating a distribution of the statistic under the null hypothesis. + The statistic of the original data is compared to this distribution to + determine the p-value. + + When ``n_resamples >= factorial(n)``, an exact test is performed: + ``a`` is permuted in each distinct way exactly once. Therefore, the + `statistic` is computed for each unique pairing of samples between ``a`` + and ``b`` exactly once. + + For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this + permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left + in its original order. + + ``permutation_type='pairings'`` supports ``data`` containing any number + of samples, each of which must contain the same number of observations. + All samples provided in ``data`` are permuted *independently*. Therefore, + if ``m`` is the number of samples and ``n`` is the number of observations + within each sample, then the number of permutations in an exact test is:: + + factorial(n)**m + + Note that if a two-sample statistic, for example, does not inherently + depend on the order in which observations are provided - only on the + *pairings* of observations - then only one of the two samples should be + provided in ``data``. This dramatically reduces computational cost without + affecting the shape of the null distribution (because the frequency/count + of each value is affected by the same factor). + + **Paired statistics, permute samples** (``permutation_type='samples'``): + + The null hypothesis associated with this permutation type is that + observations within each pair are drawn from the same underlying + distribution and that the sample to which they are assigned is random. + + Suppose ``data`` contains two samples; e.g. ``a, b = data``. + Let ``n`` be the number of observations in ``a``, which must also equal + the number of observations in ``b``. + + When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are + randomly swapped between samples (maintaining their pairings) and the + statistic is calculated. This process is performed repeatedly, + `permutation` times, generating a distribution of the statistic under the + null hypothesis. The statistic of the original data is compared to this + distribution to determine the p-value. + + When ``n_resamples >= 2**n``, an exact test is performed: the observations + are assigned to the two samples in each distinct way (while maintaining + pairings) exactly once. + + For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this + permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``. + + ``permutation_type='samples'`` supports ``data`` containing any number + of samples, each of which must contain the same number of observations. + If ``data`` contains more than one sample, paired observations within + ``data`` are exchanged between samples *independently*. Therefore, if ``m`` + is the number of samples and ``n`` is the number of observations within + each sample, then the number of permutations in an exact test is:: + + factorial(m)**n + + Several paired-sample statistical tests, such as the Wilcoxon signed rank + test and paired-sample t-test, can be performed considering only the + *difference* between two paired elements. Accordingly, if ``data`` contains + only one sample, then the null distribution is formed by independently + changing the *sign* of each observation. + + .. warning:: + The p-value is calculated by counting the elements of the null + distribution that are as extreme or more extreme than the observed + value of the statistic. Due to the use of finite precision arithmetic, + some statistic functions return numerically distinct values when the + theoretical values would be exactly equal. In some cases, this could + lead to a large error in the calculated p-value. `permutation_test` + guards against this by considering elements in the null distribution + that are "close" (within a relative tolerance of 100 times the + floating point epsilon of inexact dtypes) to the observed + value of the test statistic as equal to the observed value of the + test statistic. However, the user is advised to inspect the null + distribution to assess whether this method of comparison is + appropriate, and if not, calculate the p-value manually. See example + below. + + References + ---------- + + .. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951). + .. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." + Statistical Applications in Genetics and Molecular Biology 9.1 (2010). + .. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference". + Statistical Science (2004). + .. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap + (1993). + + Examples + -------- + + Suppose we wish to test whether two samples are drawn from the same + distribution. Assume that the underlying distributions are unknown to us, + and that before observing the data, we hypothesized that the mean of the + first sample would be less than that of the second sample. We decide that + we will use the difference between the sample means as a test statistic, + and we will consider a p-value of 0.05 to be statistically significant. + + For efficiency, we write the function defining the test statistic in a + vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the + statistic will be calculated for each axis-slice along `axis`. + + >>> import numpy as np + >>> def statistic(x, y, axis): + ... return np.mean(x, axis=axis) - np.mean(y, axis=axis) + + After collecting our data, we calculate the observed value of the test + statistic. + + >>> from scipy.stats import norm + >>> rng = np.random.default_rng() + >>> x = norm.rvs(size=5, random_state=rng) + >>> y = norm.rvs(size=6, loc = 3, random_state=rng) + >>> statistic(x, y, 0) + -3.5411688580987266 + + Indeed, the test statistic is negative, suggesting that the true mean of + the distribution underlying ``x`` is less than that of the distribution + underlying ``y``. To determine the probability of this occurring by chance + if the two samples were drawn from the same distribution, we perform + a permutation test. + + >>> from scipy.stats import permutation_test + >>> # because our statistic is vectorized, we pass `vectorized=True` + >>> # `n_resamples=np.inf` indicates that an exact test is to be performed + >>> res = permutation_test((x, y), statistic, vectorized=True, + ... n_resamples=np.inf, alternative='less') + >>> print(res.statistic) + -3.5411688580987266 + >>> print(res.pvalue) + 0.004329004329004329 + + The probability of obtaining a test statistic less than or equal to the + observed value under the null hypothesis is 0.4329%. This is less than our + chosen threshold of 5%, so we consider this to be significant evidence + against the null hypothesis in favor of the alternative. + + Because the size of the samples above was small, `permutation_test` could + perform an exact test. For larger samples, we resort to a randomized + permutation test. + + >>> x = norm.rvs(size=100, random_state=rng) + >>> y = norm.rvs(size=120, loc=0.2, random_state=rng) + >>> res = permutation_test((x, y), statistic, n_resamples=9999, + ... vectorized=True, alternative='less', + ... random_state=rng) + >>> print(res.statistic) + -0.4230459671240913 + >>> print(res.pvalue) + 0.0015 + + The approximate probability of obtaining a test statistic less than or + equal to the observed value under the null hypothesis is 0.0225%. This is + again less than our chosen threshold of 5%, so again we have significant + evidence to reject the null hypothesis in favor of the alternative. + + For large samples and number of permutations, the result is comparable to + that of the corresponding asymptotic test, the independent sample t-test. + + >>> from scipy.stats import ttest_ind + >>> res_asymptotic = ttest_ind(x, y, alternative='less') + >>> print(res_asymptotic.pvalue) + 0.0014669545224902675 + + The permutation distribution of the test statistic is provided for + further investigation. + + >>> import matplotlib.pyplot as plt + >>> plt.hist(res.null_distribution, bins=50) + >>> plt.title("Permutation distribution of test statistic") + >>> plt.xlabel("Value of Statistic") + >>> plt.ylabel("Frequency") + >>> plt.show() + + Inspection of the null distribution is essential if the statistic suffers + from inaccuracy due to limited machine precision. Consider the following + case: + + >>> from scipy.stats import pearsonr + >>> x = [1, 2, 4, 3] + >>> y = [2, 4, 6, 8] + >>> def statistic(x, y, axis=-1): + ... return pearsonr(x, y, axis=axis).statistic + >>> res = permutation_test((x, y), statistic, vectorized=True, + ... permutation_type='pairings', + ... alternative='greater') + >>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution + + In this case, some elements of the null distribution differ from the + observed value of the correlation coefficient ``r`` due to numerical noise. + We manually inspect the elements of the null distribution that are nearly + the same as the observed value of the test statistic. + + >>> r + 0.7999999999999999 + >>> unique = np.unique(null) + >>> unique + array([-1. , -1. , -0.8, -0.8, -0.8, -0.6, -0.4, -0.4, -0.2, -0.2, -0.2, + 0. , 0.2, 0.2, 0.2, 0.4, 0.4, 0.6, 0.8, 0.8, 0.8, 1. , + 1. ]) # may vary + >>> unique[np.isclose(r, unique)].tolist() + [0.7999999999999998, 0.7999999999999999, 0.8] # may vary + + If `permutation_test` were to perform the comparison naively, the + elements of the null distribution with value ``0.7999999999999998`` would + not be considered as extreme or more extreme as the observed value of the + statistic, so the calculated p-value would be too small. + + >>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null) + >>> incorrect_pvalue + 0.14583333333333334 # may vary + + Instead, `permutation_test` treats elements of the null distribution that + are within ``max(1e-14, abs(r)*1e-14)`` of the observed value of the + statistic ``r`` to be equal to ``r``. + + >>> correct_pvalue = np.count_nonzero(null >= r - 1e-14) / len(null) + >>> correct_pvalue + 0.16666666666666666 + >>> res.pvalue == correct_pvalue + True + + This method of comparison is expected to be accurate in most practical + situations, but the user is advised to assess this by inspecting the + elements of the null distribution that are close to the observed value + of the statistic. Also, consider the use of statistics that can be + calculated using exact arithmetic (e.g. integer statistics). + + """ + args = _permutation_test_iv(data, statistic, permutation_type, vectorized, + n_resamples, batch, alternative, axis, + random_state) + (data, statistic, permutation_type, vectorized, n_resamples, batch, + alternative, axis, random_state) = args + + observed = statistic(*data, axis=-1) + + null_calculators = {"pairings": _calculate_null_pairings, + "samples": _calculate_null_samples, + "independent": _calculate_null_both} + null_calculator_args = (data, statistic, n_resamples, + batch, random_state) + calculate_null = null_calculators[permutation_type] + null_distribution, n_resamples, exact_test = ( + calculate_null(*null_calculator_args)) + + # See References [2] and [3] + adjustment = 0 if exact_test else 1 + + # relative tolerance for detecting numerically distinct but + # theoretically equal values in the null distribution + eps = (0 if not np.issubdtype(observed.dtype, np.inexact) + else np.finfo(observed.dtype).eps*100) + gamma = np.abs(eps * observed) + + def less(null_distribution, observed): + cmps = null_distribution <= observed + gamma + pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) + return pvalues + + def greater(null_distribution, observed): + cmps = null_distribution >= observed - gamma + pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) + return pvalues + + def two_sided(null_distribution, observed): + pvalues_less = less(null_distribution, observed) + pvalues_greater = greater(null_distribution, observed) + pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 + return pvalues + + compare = {"less": less, + "greater": greater, + "two-sided": two_sided} + + pvalues = compare[alternative](null_distribution, observed) + pvalues = np.clip(pvalues, 0, 1) + + return PermutationTestResult(observed, pvalues, null_distribution) + + +@dataclass +class ResamplingMethod: + """Configuration information for a statistical resampling method. + + Instances of this class can be passed into the `method` parameter of some + hypothesis test functions to perform a resampling or Monte Carlo version + of the hypothesis test. + + Attributes + ---------- + n_resamples : int + The number of resamples to perform or Monte Carlo samples to draw. + batch : int, optional + The number of resamples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all resamples in a single batch. + """ + n_resamples: int = 9999 + batch: int = None # type: ignore[assignment] + + +@dataclass +class MonteCarloMethod(ResamplingMethod): + """Configuration information for a Monte Carlo hypothesis test. + + Instances of this class can be passed into the `method` parameter of some + hypothesis test functions to perform a Monte Carlo version of the + hypothesis tests. + + Attributes + ---------- + n_resamples : int, optional + The number of Monte Carlo samples to draw. Default is 9999. + batch : int, optional + The number of Monte Carlo samples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all samples in a single batch. + rvs : callable or tuple of callables, optional + A callable or sequence of callables that generates random variates + under the null hypothesis. Each element of `rvs` must be a callable + that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and + returns an N-d array sample of that shape. If `rvs` is a sequence, the + number of callables in `rvs` must match the number of samples passed + to the hypothesis test in which the `MonteCarloMethod` is used. Default + is ``None``, in which case the hypothesis test function chooses values + to match the standard version of the hypothesis test. For example, + the null hypothesis of `scipy.stats.pearsonr` is typically that the + samples are drawn from the standard normal distribution, so + ``rvs = (rng.normal, rng.normal)`` where + ``rng = np.random.default_rng()``. + """ + rvs: object = None + + def _asdict(self): + # `dataclasses.asdict` deepcopies; we don't want that. + return dict(n_resamples=self.n_resamples, batch=self.batch, + rvs=self.rvs) + + +@dataclass +class PermutationMethod(ResamplingMethod): + """Configuration information for a permutation hypothesis test. + + Instances of this class can be passed into the `method` parameter of some + hypothesis test functions to perform a permutation version of the + hypothesis tests. + + Attributes + ---------- + n_resamples : int, optional + The number of resamples to perform. Default is 9999. + batch : int, optional + The number of resamples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all resamples in a single batch. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, then that instance is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + """ + random_state: object = None + + def _asdict(self): + # `dataclasses.asdict` deepcopies; we don't want that. + return dict(n_resamples=self.n_resamples, batch=self.batch, + random_state=self.random_state) + + +@dataclass +class BootstrapMethod(ResamplingMethod): + """Configuration information for a bootstrap confidence interval. + + Instances of this class can be passed into the `method` parameter of some + confidence interval methods to generate a bootstrap confidence interval. + + Attributes + ---------- + n_resamples : int, optional + The number of resamples to perform. Default is 9999. + batch : int, optional + The number of resamples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all resamples in a single batch. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, then that instance is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + + method : {'bca', 'percentile', 'basic'} + Whether to use the 'percentile' bootstrap ('percentile'), the 'basic' + (AKA 'reverse') bootstrap ('basic'), or the bias-corrected and + accelerated bootstrap ('BCa', default). + """ + random_state: object = None + method: str = 'BCa' + + def _asdict(self): + # `dataclasses.asdict` deepcopies; we don't want that. + return dict(n_resamples=self.n_resamples, batch=self.batch, + random_state=self.random_state, method=self.method) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_result_classes.py b/parrot/lib/python3.10/site-packages/scipy/stats/_result_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..975af9310efb0c9a414439fd8d531fb95c988951 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_result_classes.py @@ -0,0 +1,40 @@ +# This module exists only to allow Sphinx to generate docs +# for the result objects returned by some functions in stats +# _without_ adding them to the main stats documentation page. + +""" +Result classes +-------------- + +.. currentmodule:: scipy.stats._result_classes + +.. autosummary:: + :toctree: generated/ + + RelativeRiskResult + BinomTestResult + TukeyHSDResult + DunnettResult + PearsonRResult + FitResult + OddsRatioResult + TtestResult + ECDFResult + EmpiricalDistributionFunction + +""" + +__all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult', + 'PearsonRResult', 'FitResult', 'OddsRatioResult', + 'TtestResult', 'DunnettResult', 'ECDFResult', + 'EmpiricalDistributionFunction'] + + +from ._binomtest import BinomTestResult +from ._odds_ratio import OddsRatioResult +from ._relative_risk import RelativeRiskResult +from ._hypotests import TukeyHSDResult +from ._multicomp import DunnettResult +from ._stats_py import PearsonRResult, TtestResult +from ._fit import FitResult +from ._survival import ECDFResult, EmpiricalDistributionFunction diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_sampling.py b/parrot/lib/python3.10/site-packages/scipy/stats/_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..44143985a88738c43984347b7787279348fac7f4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_sampling.py @@ -0,0 +1,1314 @@ +import math +import numbers +import numpy as np +from scipy import stats +from scipy import special as sc +from ._qmc import (check_random_state as check_random_state_qmc, + Halton, QMCEngine) +from ._unuran.unuran_wrapper import NumericalInversePolynomial +from scipy._lib._util import check_random_state + + +__all__ = ['FastGeneratorInversion', 'RatioUniforms'] + + +# define pdfs and other helper functions to create the generators + +def argus_pdf(x, chi): + # approach follows Baumgarten/Hoermann: Generating ARGUS random variates + # for chi > 5, use relationship of the ARGUS distribution to Gamma(1.5) + if chi <= 5: + y = 1 - x * x + return x * math.sqrt(y) * math.exp(-0.5 * chi**2 * y) + return math.sqrt(x) * math.exp(-x) + + +def argus_gamma_trf(x, chi): + if chi <= 5: + return x + return np.sqrt(1.0 - 2 * x / chi**2) + + +def argus_gamma_inv_trf(x, chi): + if chi <= 5: + return x + return 0.5 * chi**2 * (1 - x**2) + + +def betaprime_pdf(x, a, b): + if x > 0: + logf = (a - 1) * math.log(x) - (a + b) * math.log1p(x) - sc.betaln(a, b) + return math.exp(logf) + else: + # return pdf at x == 0 separately to avoid runtime warnings + if a > 1: + return 0 + elif a < 1: + return np.inf + else: + return 1 / sc.beta(a, b) + + +def beta_valid_params(a, b): + return (min(a, b) >= 0.1) and (max(a, b) <= 700) + + +def gamma_pdf(x, a): + if x > 0: + return math.exp(-math.lgamma(a) + (a - 1.0) * math.log(x) - x) + else: + return 0 if a >= 1 else np.inf + + +def invgamma_pdf(x, a): + if x > 0: + return math.exp(-(a + 1.0) * math.log(x) - math.lgamma(a) - 1 / x) + else: + return 0 if a >= 1 else np.inf + + +def burr_pdf(x, cc, dd): + # note: we use np.exp instead of math.exp, otherwise an overflow + # error can occur in the setup, e.g., for parameters + # 1.89128135, 0.30195177, see test test_burr_overflow + if x > 0: + lx = math.log(x) + return np.exp(-(cc + 1) * lx - (dd + 1) * math.log1p(np.exp(-cc * lx))) + else: + return 0 + + +def burr12_pdf(x, cc, dd): + if x > 0: + lx = math.log(x) + logterm = math.log1p(math.exp(cc * lx)) + return math.exp((cc - 1) * lx - (dd + 1) * logterm + math.log(cc * dd)) + else: + return 0 + + +def chi_pdf(x, a): + if x > 0: + return math.exp( + (a - 1) * math.log(x) + - 0.5 * (x * x) + - (a / 2 - 1) * math.log(2) + - math.lgamma(0.5 * a) + ) + else: + return 0 if a >= 1 else np.inf + + +def chi2_pdf(x, df): + if x > 0: + return math.exp( + (df / 2 - 1) * math.log(x) + - 0.5 * x + - (df / 2) * math.log(2) + - math.lgamma(0.5 * df) + ) + else: + return 0 if df >= 1 else np.inf + + +def alpha_pdf(x, a): + if x > 0: + return math.exp(-2.0 * math.log(x) - 0.5 * (a - 1.0 / x) ** 2) + return 0.0 + + +def bradford_pdf(x, c): + if 0 <= x <= 1: + return 1.0 / (1.0 + c * x) + return 0.0 + + +def crystalball_pdf(x, b, m): + if x > -b: + return math.exp(-0.5 * x * x) + return math.exp(m * math.log(m / b) - 0.5 * b * b - m * math.log(m / b - b - x)) + + +def weibull_min_pdf(x, c): + if x > 0: + return c * math.exp((c - 1) * math.log(x) - x**c) + return 0.0 + + +def weibull_max_pdf(x, c): + if x < 0: + return c * math.exp((c - 1) * math.log(-x) - ((-x) ** c)) + return 0.0 + + +def invweibull_pdf(x, c): + if x > 0: + return c * math.exp(-(c + 1) * math.log(x) - x ** (-c)) + return 0.0 + + +def wald_pdf(x): + if x > 0: + return math.exp(-((x - 1) ** 2) / (2 * x)) / math.sqrt(x**3) + return 0.0 + + +def geninvgauss_mode(p, b): + if p > 1: # equivalent mode formulas numerical more stable versions + return (math.sqrt((1 - p) ** 2 + b**2) - (1 - p)) / b + return b / (math.sqrt((1 - p) ** 2 + b**2) + (1 - p)) + + +def geninvgauss_pdf(x, p, b): + m = geninvgauss_mode(p, b) + lfm = (p - 1) * math.log(m) - 0.5 * b * (m + 1 / m) + if x > 0: + return math.exp((p - 1) * math.log(x) - 0.5 * b * (x + 1 / x) - lfm) + return 0.0 + + +def invgauss_mode(mu): + return 1.0 / (math.sqrt(1.5 * 1.5 + 1 / (mu * mu)) + 1.5) + + +def invgauss_pdf(x, mu): + m = invgauss_mode(mu) + lfm = -1.5 * math.log(m) - (m - mu) ** 2 / (2 * m * mu**2) + if x > 0: + return math.exp(-1.5 * math.log(x) - (x - mu) ** 2 / (2 * x * mu**2) - lfm) + return 0.0 + + +def powerlaw_pdf(x, a): + if x > 0: + return x ** (a - 1) + return 0.0 + + +# Define a dictionary: for a given distribution (keys), another dictionary +# (values) specifies the parameters for NumericalInversePolynomial (PINV). +# The keys of the latter dictionary are: +# - pdf: the pdf of the distribution (callable). The signature of the pdf +# is float -> float (i.e., the function does not have to be vectorized). +# If possible, functions like log or exp from the module math should be +# preferred over functions from numpy since the PINV setup will be faster +# in that case. +# - check_pinv_params: callable f that returns true if the shape parameters +# (args) are recommended parameters for PINV (i.e., the u-error does +# not exceed the default tolerance) +# - center: scalar if the center does not depend on args, otherwise +# callable that returns the center as a function of the shape parameters +# - rvs_transform: a callable that can be used to transform the rvs that +# are distributed according to the pdf to the target distribution +# (as an example, see the entry for the beta distribution) +# - rvs_transform_inv: the inverse of rvs_transform (it is required +# for the transformed ppf) +# - mirror_uniform: boolean or a callable that returns true or false +# depending on the shape parameters. If True, the ppf is applied +# to 1-u instead of u to generate rvs, where u is a uniform rv. +# While both u and 1-u are uniform, it can be required to use 1-u +# to compute the u-error correctly. This is only relevant for the argus +# distribution. +# The only required keys are "pdf" and "check_pinv_params". +# All other keys are optional. + +PINV_CONFIG = { + "alpha": { + "pdf": alpha_pdf, + "check_pinv_params": lambda a: 1.0e-11 <= a < 2.1e5, + "center": lambda a: 0.25 * (math.sqrt(a * a + 8.0) - a), + }, + "anglit": { + "pdf": lambda x: math.cos(2 * x) + 1.0e-13, + # +1.e-13 is necessary, otherwise PINV has strange problems as + # f(upper border) is very close to 0 + "center": 0, + }, + "argus": { + "pdf": argus_pdf, + "center": lambda chi: 0.7 if chi <= 5 else 0.5, + "check_pinv_params": lambda chi: 1e-20 < chi < 901, + "rvs_transform": argus_gamma_trf, + "rvs_transform_inv": argus_gamma_inv_trf, + "mirror_uniform": lambda chi: chi > 5, + }, + "beta": { + "pdf": betaprime_pdf, + "center": lambda a, b: max(0.1, (a - 1) / (b + 1)), + "check_pinv_params": beta_valid_params, + "rvs_transform": lambda x, *args: x / (1 + x), + "rvs_transform_inv": lambda x, *args: x / (1 - x) if x < 1 else np.inf, + }, + "betaprime": { + "pdf": betaprime_pdf, + "center": lambda a, b: max(0.1, (a - 1) / (b + 1)), + "check_pinv_params": beta_valid_params, + }, + "bradford": { + "pdf": bradford_pdf, + "check_pinv_params": lambda a: 1.0e-6 <= a <= 1e9, + "center": 0.5, + }, + "burr": { + "pdf": burr_pdf, + "center": lambda a, b: (2 ** (1 / b) - 1) ** (-1 / a), + "check_pinv_params": lambda a, b: (min(a, b) >= 0.3) and (max(a, b) <= 50), + }, + "burr12": { + "pdf": burr12_pdf, + "center": lambda a, b: (2 ** (1 / b) - 1) ** (1 / a), + "check_pinv_params": lambda a, b: (min(a, b) >= 0.2) and (max(a, b) <= 50), + }, + "cauchy": { + "pdf": lambda x: 1 / (1 + (x * x)), + "center": 0, + }, + "chi": { + "pdf": chi_pdf, + "check_pinv_params": lambda df: 0.05 <= df <= 1.0e6, + "center": lambda a: math.sqrt(a), + }, + "chi2": { + "pdf": chi2_pdf, + "check_pinv_params": lambda df: 0.07 <= df <= 1e6, + "center": lambda a: a, + }, + "cosine": { + "pdf": lambda x: 1 + math.cos(x), + "center": 0, + }, + "crystalball": { + "pdf": crystalball_pdf, + "check_pinv_params": lambda b, m: (0.01 <= b <= 5.5) + and (1.1 <= m <= 75.1), + "center": 0.0, + }, + "expon": { + "pdf": lambda x: math.exp(-x), + "center": 1.0, + }, + "gamma": { + "pdf": gamma_pdf, + "check_pinv_params": lambda a: 0.04 <= a <= 1e6, + "center": lambda a: a, + }, + "gennorm": { + "pdf": lambda x, b: math.exp(-abs(x) ** b), + "check_pinv_params": lambda b: 0.081 <= b <= 45.0, + "center": 0.0, + }, + "geninvgauss": { + "pdf": geninvgauss_pdf, + "check_pinv_params": lambda p, b: (abs(p) <= 1200.0) + and (1.0e-10 <= b <= 1200.0), + "center": geninvgauss_mode, + }, + "gumbel_l": { + "pdf": lambda x: math.exp(x - math.exp(x)), + "center": -0.6, + }, + "gumbel_r": { + "pdf": lambda x: math.exp(-x - math.exp(-x)), + "center": 0.6, + }, + "hypsecant": { + "pdf": lambda x: 1.0 / (math.exp(x) + math.exp(-x)), + "center": 0.0, + }, + "invgamma": { + "pdf": invgamma_pdf, + "check_pinv_params": lambda a: 0.04 <= a <= 1e6, + "center": lambda a: 1 / a, + }, + "invgauss": { + "pdf": invgauss_pdf, + "check_pinv_params": lambda mu: 1.0e-10 <= mu <= 1.0e9, + "center": invgauss_mode, + }, + "invweibull": { + "pdf": invweibull_pdf, + "check_pinv_params": lambda a: 0.12 <= a <= 512, + "center": 1.0, + }, + "laplace": { + "pdf": lambda x: math.exp(-abs(x)), + "center": 0.0, + }, + "logistic": { + "pdf": lambda x: math.exp(-x) / (1 + math.exp(-x)) ** 2, + "center": 0.0, + }, + "maxwell": { + "pdf": lambda x: x * x * math.exp(-0.5 * x * x), + "center": 1.41421, + }, + "moyal": { + "pdf": lambda x: math.exp(-(x + math.exp(-x)) / 2), + "center": 1.2, + }, + "norm": { + "pdf": lambda x: math.exp(-x * x / 2), + "center": 0.0, + }, + "pareto": { + "pdf": lambda x, b: x ** -(b + 1), + "center": lambda b: b / (b - 1) if b > 2 else 1.5, + "check_pinv_params": lambda b: 0.08 <= b <= 400000, + }, + "powerlaw": { + "pdf": powerlaw_pdf, + "center": 1.0, + "check_pinv_params": lambda a: 0.06 <= a <= 1.0e5, + }, + "t": { + "pdf": lambda x, df: (1 + x * x / df) ** (-0.5 * (df + 1)), + "check_pinv_params": lambda a: 0.07 <= a <= 1e6, + "center": 0.0, + }, + "rayleigh": { + "pdf": lambda x: x * math.exp(-0.5 * (x * x)), + "center": 1.0, + }, + "semicircular": { + "pdf": lambda x: math.sqrt(1.0 - (x * x)), + "center": 0, + }, + "wald": { + "pdf": wald_pdf, + "center": 1.0, + }, + "weibull_max": { + "pdf": weibull_max_pdf, + "check_pinv_params": lambda a: 0.25 <= a <= 512, + "center": -1.0, + }, + "weibull_min": { + "pdf": weibull_min_pdf, + "check_pinv_params": lambda a: 0.25 <= a <= 512, + "center": 1.0, + }, +} + + +def _validate_qmc_input(qmc_engine, d, seed): + # Input validation for `qmc_engine` and `d` + # Error messages for invalid `d` are raised by QMCEngine + # we could probably use a stats.qmc.check_qrandom_state + if isinstance(qmc_engine, QMCEngine): + if d is not None and qmc_engine.d != d: + message = "`d` must be consistent with dimension of `qmc_engine`." + raise ValueError(message) + d = qmc_engine.d if d is None else d + elif qmc_engine is None: + d = 1 if d is None else d + qmc_engine = Halton(d, seed=seed) + else: + message = ( + "`qmc_engine` must be an instance of " + "`scipy.stats.qmc.QMCEngine` or `None`." + ) + raise ValueError(message) + + return qmc_engine, d + + +class CustomDistPINV: + def __init__(self, pdf, args): + self._pdf = lambda x: pdf(x, *args) + + def pdf(self, x): + return self._pdf(x) + + +class FastGeneratorInversion: + """ + Fast sampling by numerical inversion of the CDF for a large class of + continuous distributions in `scipy.stats`. + + Parameters + ---------- + dist : rv_frozen object + Frozen distribution object from `scipy.stats`. The list of supported + distributions can be found in the Notes section. The shape parameters, + `loc` and `scale` used to create the distributions must be scalars. + For example, for the Gamma distribution with shape parameter `p`, + `p` has to be a float, and for the beta distribution with shape + parameters (a, b), both a and b have to be floats. + domain : tuple of floats, optional + If one wishes to sample from a truncated/conditional distribution, + the domain has to be specified. + The default is None. In that case, the random variates are not + truncated, and the domain is inferred from the support of the + distribution. + ignore_shape_range : boolean, optional. + If False, shape parameters that are outside of the valid range + of values to ensure that the numerical accuracy (see Notes) is + high, raise a ValueError. If True, any shape parameters that are valid + for the distribution are accepted. This can be useful for testing. + The default is False. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + A NumPy random number generator or seed for the underlying NumPy + random number generator used to generate the stream of uniform + random numbers. + If `random_state` is None, it uses ``self.random_state``. + If `random_state` is an int, + ``np.random.default_rng(random_state)`` is used. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Attributes + ---------- + loc : float + The location parameter. + random_state : {`numpy.random.Generator`, `numpy.random.RandomState`} + The random state used in relevant methods like `rvs` (unless + another `random_state` is passed as an argument to these methods). + scale : float + The scale parameter. + + Methods + ------- + cdf + evaluate_error + ppf + qrvs + rvs + support + + Notes + ----- + The class creates an object for continuous distributions specified + by `dist`. The method `rvs` uses a generator from + `scipy.stats.sampling` that is created when the object is instantiated. + In addition, the methods `qrvs` and `ppf` are added. + `qrvs` generate samples based on quasi-random numbers from + `scipy.stats.qmc`. `ppf` is the PPF based on the + numerical inversion method in [1]_ (`NumericalInversePolynomial`) that is + used to generate random variates. + + Supported distributions (`distname`) are: + ``alpha``, ``anglit``, ``argus``, ``beta``, ``betaprime``, ``bradford``, + ``burr``, ``burr12``, ``cauchy``, ``chi``, ``chi2``, ``cosine``, + ``crystalball``, ``expon``, ``gamma``, ``gennorm``, ``geninvgauss``, + ``gumbel_l``, ``gumbel_r``, ``hypsecant``, ``invgamma``, ``invgauss``, + ``invweibull``, ``laplace``, ``logistic``, ``maxwell``, ``moyal``, + ``norm``, ``pareto``, ``powerlaw``, ``t``, ``rayleigh``, ``semicircular``, + ``wald``, ``weibull_max``, ``weibull_min``. + + `rvs` relies on the accuracy of the numerical inversion. If very extreme + shape parameters are used, the numerical inversion might not work. However, + for all implemented distributions, the admissible shape parameters have + been tested, and an error will be raised if the user supplies values + outside of the allowed range. The u-error should not exceed 1e-10 for all + valid parameters. Note that warnings might be raised even if parameters + are within the valid range when the object is instantiated. + To check numerical accuracy, the method `evaluate_error` can be used. + + Note that all implemented distributions are also part of `scipy.stats`, and + the object created by `FastGeneratorInversion` relies on methods like + `ppf`, `cdf` and `pdf` from `rv_frozen`. The main benefit of using this + class can be summarized as follows: Once the generator to sample random + variates is created in the setup step, sampling and evaluation of + the PPF using `ppf` are very fast, + and performance is essentially independent of the distribution. Therefore, + a substantial speed-up can be achieved for many distributions if large + numbers of random variates are required. It is important to know that this + fast sampling is achieved by inversion of the CDF. Thus, one uniform + random variate is transformed into a non-uniform variate, which is an + advantage for several simulation methods, e.g., when + the variance reduction methods of common random variates or + antithetic variates are be used ([2]_). + + In addition, inversion makes it possible to + - to use a QMC generator from `scipy.stats.qmc` (method `qrvs`), + - to generate random variates truncated to an interval. For example, if + one aims to sample standard normal random variates from + the interval (2, 4), this can be easily achieved by using the parameter + `domain`. + + The location and scale that are initially defined by `dist` + can be reset without having to rerun the setup + step to create the generator that is used for sampling. The relation + of the distribution `Y` with `loc` and `scale` to the standard + distribution `X` (i.e., ``loc=0`` and ``scale=1``) is given by + ``Y = loc + scale * X``. + + References + ---------- + .. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold. + "Random variate generation by numerical inversion when only the + density is known." ACM Transactions on Modeling and Computer + Simulation (TOMACS) 20.4 (2010): 1-25. + .. [2] Hörmann, Wolfgang, Josef Leydold and Gerhard Derflinger. + "Automatic nonuniform random number generation." + Springer, 2004. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> from scipy.stats.sampling import FastGeneratorInversion + + Let's start with a simple example to illustrate the main features: + + >>> gamma_frozen = stats.gamma(1.5) + >>> gamma_dist = FastGeneratorInversion(gamma_frozen) + >>> r = gamma_dist.rvs(size=1000) + + The mean should be approximately equal to the shape parameter 1.5: + + >>> r.mean() + 1.52423591130436 # may vary + + Similarly, we can draw a sample based on quasi-random numbers: + + >>> r = gamma_dist.qrvs(size=1000) + >>> r.mean() + 1.4996639255942914 # may vary + + Compare the PPF against approximation `ppf`. + + >>> q = [0.001, 0.2, 0.5, 0.8, 0.999] + >>> np.max(np.abs(gamma_frozen.ppf(q) - gamma_dist.ppf(q))) + 4.313394796895409e-08 + + To confirm that the numerical inversion is accurate, we evaluate the + approximation error (u-error), which should be below 1e-10 (for more + details, refer to the documentation of `evaluate_error`): + + >>> gamma_dist.evaluate_error() + (7.446320551265581e-11, nan) # may vary + + Note that the location and scale can be changed without instantiating a + new generator: + + >>> gamma_dist.loc = 2 + >>> gamma_dist.scale = 3 + >>> r = gamma_dist.rvs(size=1000) + + The mean should be approximately 2 + 3*1.5 = 6.5. + + >>> r.mean() + 6.399549295242894 # may vary + + Let us also illustrate how truncation can be applied: + + >>> trunc_norm = FastGeneratorInversion(stats.norm(), domain=(3, 4)) + >>> r = trunc_norm.rvs(size=1000) + >>> 3 < r.min() < r.max() < 4 + True + + Check the mean: + + >>> r.mean() + 3.250433367078603 # may vary + + >>> stats.norm.expect(lb=3, ub=4, conditional=True) + 3.260454285589997 + + In this particular, case, `scipy.stats.truncnorm` could also be used to + generate truncated normal random variates. + + """ + + def __init__( + self, + dist, + *, + domain=None, + ignore_shape_range=False, + random_state=None, + ): + + if isinstance(dist, stats.distributions.rv_frozen): + distname = dist.dist.name + if distname not in PINV_CONFIG.keys(): + raise ValueError( + f"Distribution '{distname}' is not supported." + f"It must be one of {list(PINV_CONFIG.keys())}" + ) + else: + raise ValueError("`dist` must be a frozen distribution object") + + loc = dist.kwds.get("loc", 0) + scale = dist.kwds.get("scale", 1) + args = dist.args + if not np.isscalar(loc): + raise ValueError("loc must be scalar.") + if not np.isscalar(scale): + raise ValueError("scale must be scalar.") + + self._frozendist = getattr(stats, distname)( + *args, + loc=loc, + scale=scale, + ) + self._distname = distname + + nargs = np.broadcast_arrays(args)[0].size + nargs_expected = self._frozendist.dist.numargs + if nargs != nargs_expected: + raise ValueError( + f"Each of the {nargs_expected} shape parameters must be a " + f"scalar, but {nargs} values are provided." + ) + + self.random_state = random_state + + if domain is None: + self._domain = self._frozendist.support() + self._p_lower = 0.0 + self._p_domain = 1.0 + else: + self._domain = domain + self._p_lower = self._frozendist.cdf(self._domain[0]) + _p_domain = self._frozendist.cdf(self._domain[1]) - self._p_lower + self._p_domain = _p_domain + self._set_domain_adj() + self._ignore_shape_range = ignore_shape_range + + # the domain to be passed to NumericalInversePolynomial + # define a separate variable since in case of a transformation, + # domain_pinv will not be the same as self._domain + self._domain_pinv = self._domain + + # get information about the distribution from the config to set up + # the generator + dist = self._process_config(distname, args) + + if self._rvs_transform_inv is not None: + d0 = self._rvs_transform_inv(self._domain[0], *args) + d1 = self._rvs_transform_inv(self._domain[1], *args) + if d0 > d1: + # swap values if transformation if decreasing + d0, d1 = d1, d0 + # only update _domain_pinv and not _domain + # _domain refers to the original distribution, _domain_pinv + # to the transformed distribution + self._domain_pinv = d0, d1 + + # self._center has been set by the call self._process_config + # check if self._center is inside the transformed domain + # _domain_pinv, otherwise move it to the endpoint that is closer + if self._center is not None: + if self._center < self._domain_pinv[0]: + self._center = self._domain_pinv[0] + elif self._center > self._domain_pinv[1]: + self._center = self._domain_pinv[1] + + self._rng = NumericalInversePolynomial( + dist, + random_state=self.random_state, + domain=self._domain_pinv, + center=self._center, + ) + + @property + def random_state(self): + return self._random_state + + @random_state.setter + def random_state(self, random_state): + self._random_state = check_random_state_qmc(random_state) + + @property + def loc(self): + return self._frozendist.kwds.get("loc", 0) + + @loc.setter + def loc(self, loc): + if not np.isscalar(loc): + raise ValueError("loc must be scalar.") + self._frozendist.kwds["loc"] = loc + # update the adjusted domain that depends on loc and scale + self._set_domain_adj() + + @property + def scale(self): + return self._frozendist.kwds.get("scale", 0) + + @scale.setter + def scale(self, scale): + if not np.isscalar(scale): + raise ValueError("scale must be scalar.") + self._frozendist.kwds["scale"] = scale + # update the adjusted domain that depends on loc and scale + self._set_domain_adj() + + def _set_domain_adj(self): + """ Adjust the domain based on loc and scale. """ + loc = self.loc + scale = self.scale + lb = self._domain[0] * scale + loc + ub = self._domain[1] * scale + loc + self._domain_adj = (lb, ub) + + def _process_config(self, distname, args): + cfg = PINV_CONFIG[distname] + if "check_pinv_params" in cfg: + if not self._ignore_shape_range: + if not cfg["check_pinv_params"](*args): + msg = ("No generator is defined for the shape parameters " + f"{args}. Use ignore_shape_range to proceed " + "with the selected values.") + raise ValueError(msg) + + if "center" in cfg.keys(): + if not np.isscalar(cfg["center"]): + self._center = cfg["center"](*args) + else: + self._center = cfg["center"] + else: + self._center = None + self._rvs_transform = cfg.get("rvs_transform", None) + self._rvs_transform_inv = cfg.get("rvs_transform_inv", None) + _mirror_uniform = cfg.get("mirror_uniform", None) + if _mirror_uniform is None: + self._mirror_uniform = False + else: + self._mirror_uniform = _mirror_uniform(*args) + + return CustomDistPINV(cfg["pdf"], args) + + def rvs(self, size=None): + """ + Sample from the distribution by inversion. + + Parameters + ---------- + size : int or tuple, optional + The shape of samples. Default is ``None`` in which case a scalar + sample is returned. + + Returns + ------- + rvs : array_like + A NumPy array of random variates. + + Notes + ----- + Random variates are generated by numerical inversion of the CDF, i.e., + `ppf` computed by `NumericalInversePolynomial` when the class + is instantiated. Note that the + default ``rvs`` method of the rv_continuous class is + overwritten. Hence, a different stream of random numbers is generated + even if the same seed is used. + """ + # note: we cannot use self._rng.rvs directly in case + # self._mirror_uniform is true + u = self.random_state.uniform(size=size) + if self._mirror_uniform: + u = 1 - u + r = self._rng.ppf(u) + if self._rvs_transform is not None: + r = self._rvs_transform(r, *self._frozendist.args) + return self.loc + self.scale * r + + def ppf(self, q): + """ + Very fast PPF (inverse CDF) of the distribution which + is a very close approximation of the exact PPF values. + + Parameters + ---------- + u : array_like + Array with probabilities. + + Returns + ------- + ppf : array_like + Quantiles corresponding to the values in `u`. + + Notes + ----- + The evaluation of the PPF is very fast but it may have a large + relative error in the far tails. The numerical precision of the PPF + is controlled by the u-error, that is, + ``max |u - CDF(PPF(u))|`` where the max is taken over points in + the interval [0,1], see `evaluate_error`. + + Note that this PPF is designed to generate random samples. + """ + q = np.asarray(q) + if self._mirror_uniform: + x = self._rng.ppf(1 - q) + else: + x = self._rng.ppf(q) + if self._rvs_transform is not None: + x = self._rvs_transform(x, *self._frozendist.args) + return self.scale * x + self.loc + + def qrvs(self, size=None, d=None, qmc_engine=None): + """ + Quasi-random variates of the given distribution. + + The `qmc_engine` is used to draw uniform quasi-random variates, and + these are converted to quasi-random variates of the given distribution + using inverse transform sampling. + + Parameters + ---------- + size : int, tuple of ints, or None; optional + Defines shape of random variates array. Default is ``None``. + d : int or None, optional + Defines dimension of uniform quasi-random variates to be + transformed. Default is ``None``. + qmc_engine : scipy.stats.qmc.QMCEngine(d=1), optional + Defines the object to use for drawing + quasi-random variates. Default is ``None``, which uses + `scipy.stats.qmc.Halton(1)`. + + Returns + ------- + rvs : ndarray or scalar + Quasi-random variates. See Notes for shape information. + + Notes + ----- + The shape of the output array depends on `size`, `d`, and `qmc_engine`. + The intent is for the interface to be natural, but the detailed rules + to achieve this are complicated. + + - If `qmc_engine` is ``None``, a `scipy.stats.qmc.Halton` instance is + created with dimension `d`. If `d` is not provided, ``d=1``. + - If `qmc_engine` is not ``None`` and `d` is ``None``, `d` is + determined from the dimension of the `qmc_engine`. + - If `qmc_engine` is not ``None`` and `d` is not ``None`` but the + dimensions are inconsistent, a ``ValueError`` is raised. + - After `d` is determined according to the rules above, the output + shape is ``tuple_shape + d_shape``, where: + + - ``tuple_shape = tuple()`` if `size` is ``None``, + - ``tuple_shape = (size,)`` if `size` is an ``int``, + - ``tuple_shape = size`` if `size` is a sequence, + - ``d_shape = tuple()`` if `d` is ``None`` or `d` is 1, and + - ``d_shape = (d,)`` if `d` is greater than 1. + + The elements of the returned array are part of a low-discrepancy + sequence. If `d` is 1, this means that none of the samples are truly + independent. If `d` > 1, each slice ``rvs[..., i]`` will be of a + quasi-independent sequence; see `scipy.stats.qmc.QMCEngine` for + details. Note that when `d` > 1, the samples returned are still those + of the provided univariate distribution, not a multivariate + generalization of that distribution. + + """ + qmc_engine, d = _validate_qmc_input(qmc_engine, d, self.random_state) + # mainly copied from unuran_wrapper.pyx.templ + # `rvs` is flexible about whether `size` is an int or tuple, so this + # should be, too. + try: + if size is None: + tuple_size = (1,) + else: + tuple_size = tuple(size) + except TypeError: + tuple_size = (size,) + # we do not use rng.qrvs directly since we need to be + # able to apply the ppf to 1 - u + N = 1 if size is None else np.prod(size) + u = qmc_engine.random(N) + if self._mirror_uniform: + u = 1 - u + qrvs = self._ppf(u) + if self._rvs_transform is not None: + qrvs = self._rvs_transform(qrvs, *self._frozendist.args) + if size is None: + qrvs = qrvs.squeeze()[()] + else: + if d == 1: + qrvs = qrvs.reshape(tuple_size) + else: + qrvs = qrvs.reshape(tuple_size + (d,)) + return self.loc + self.scale * qrvs + + def evaluate_error(self, size=100000, random_state=None, x_error=False): + """ + Evaluate the numerical accuracy of the inversion (u- and x-error). + + Parameters + ---------- + size : int, optional + The number of random points over which the error is estimated. + Default is ``100000``. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + A NumPy random number generator or seed for the underlying NumPy + random number generator used to generate the stream of uniform + random numbers. + If `random_state` is None, use ``self.random_state``. + If `random_state` is an int, + ``np.random.default_rng(random_state)`` is used. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + u_error, x_error : tuple of floats + A NumPy array of random variates. + + Notes + ----- + The numerical precision of the inverse CDF `ppf` is controlled by + the u-error. It is computed as follows: + ``max |u - CDF(PPF(u))|`` where the max is taken `size` random + points in the interval [0,1]. `random_state` determines the random + sample. Note that if `ppf` was exact, the u-error would be zero. + + The x-error measures the direct distance between the exact PPF + and `ppf`. If ``x_error`` is set to ``True`, it is + computed as the maximum of the minimum of the relative and absolute + x-error: + ``max(min(x_error_abs[i], x_error_rel[i]))`` where + ``x_error_abs[i] = |PPF(u[i]) - PPF_fast(u[i])|``, + ``x_error_rel[i] = max |(PPF(u[i]) - PPF_fast(u[i])) / PPF(u[i])|``. + Note that it is important to consider the relative x-error in the case + that ``PPF(u)`` is close to zero or very large. + + By default, only the u-error is evaluated and the x-error is set to + ``np.nan``. Note that the evaluation of the x-error will be very slow + if the implementation of the PPF is slow. + + Further information about these error measures can be found in [1]_. + + References + ---------- + .. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold. + "Random variate generation by numerical inversion when only the + density is known." ACM Transactions on Modeling and Computer + Simulation (TOMACS) 20.4 (2010): 1-25. + + Examples + -------- + + >>> import numpy as np + >>> from scipy import stats + >>> from scipy.stats.sampling import FastGeneratorInversion + + Create an object for the normal distribution: + + >>> d_norm_frozen = stats.norm() + >>> d_norm = FastGeneratorInversion(d_norm_frozen) + + To confirm that the numerical inversion is accurate, we evaluate the + approximation error (u-error and x-error). + + >>> u_error, x_error = d_norm.evaluate_error(x_error=True) + + The u-error should be below 1e-10: + + >>> u_error + 8.785783212061915e-11 # may vary + + Compare the PPF against approximation `ppf`: + + >>> q = [0.001, 0.2, 0.4, 0.6, 0.8, 0.999] + >>> diff = np.abs(d_norm_frozen.ppf(q) - d_norm.ppf(q)) + >>> x_error_abs = np.max(diff) + >>> x_error_abs + 1.2937954707581412e-08 + + This is the absolute x-error evaluated at the points q. The relative + error is given by + + >>> x_error_rel = np.max(diff / np.abs(d_norm_frozen.ppf(q))) + >>> x_error_rel + 4.186725600453555e-09 + + The x_error computed above is derived in a very similar way over a + much larger set of random values q. At each value q[i], the minimum + of the relative and absolute error is taken. The final value is then + derived as the maximum of these values. In our example, we get the + following value: + + >>> x_error + 4.507068014335139e-07 # may vary + + """ + if not isinstance(size, (numbers.Integral, np.integer)): + raise ValueError("size must be an integer.") + # urng will be used to draw the samples for testing the error + # it must not interfere with self.random_state. therefore, do not + # call self.rvs, but draw uniform random numbers and apply + # self.ppf (note: like in rvs, consider self._mirror_uniform) + urng = check_random_state_qmc(random_state) + u = urng.uniform(size=size) + if self._mirror_uniform: + u = 1 - u + x = self.ppf(u) + uerr = np.max(np.abs(self._cdf(x) - u)) + if not x_error: + return uerr, np.nan + ppf_u = self._ppf(u) + x_error_abs = np.abs(self.ppf(u)-ppf_u) + x_error_rel = x_error_abs / np.abs(ppf_u) + x_error_combined = np.array([x_error_abs, x_error_rel]).min(axis=0) + return uerr, np.max(x_error_combined) + + def support(self): + """Support of the distribution. + + Returns + ------- + a, b : float + end-points of the distribution's support. + + Notes + ----- + + Note that the support of the distribution depends on `loc`, + `scale` and `domain`. + + Examples + -------- + + >>> from scipy import stats + >>> from scipy.stats.sampling import FastGeneratorInversion + + Define a truncated normal distribution: + + >>> d_norm = FastGeneratorInversion(stats.norm(), domain=(0, 1)) + >>> d_norm.support() + (0, 1) + + Shift the distribution: + + >>> d_norm.loc = 2.5 + >>> d_norm.support() + (2.5, 3.5) + + """ + return self._domain_adj + + def _cdf(self, x): + """Cumulative distribution function (CDF) + + Parameters + ---------- + x : array_like + The values where the CDF is evaluated + + Returns + ------- + y : ndarray + CDF evaluated at x + + """ + y = self._frozendist.cdf(x) + if self._p_domain == 1.0: + return y + return np.clip((y - self._p_lower) / self._p_domain, 0, 1) + + def _ppf(self, q): + """Percent point function (inverse of `cdf`) + + Parameters + ---------- + q : array_like + lower tail probability + + Returns + ------- + x : array_like + quantile corresponding to the lower tail probability q. + + """ + if self._p_domain == 1.0: + return self._frozendist.ppf(q) + x = self._frozendist.ppf(self._p_domain * np.array(q) + self._p_lower) + return np.clip(x, self._domain_adj[0], self._domain_adj[1]) + + +class RatioUniforms: + """ + Generate random samples from a probability density function using the + ratio-of-uniforms method. + + Parameters + ---------- + pdf : callable + A function with signature `pdf(x)` that is proportional to the + probability density function of the distribution. + umax : float + The upper bound of the bounding rectangle in the u-direction. + vmin : float + The lower bound of the bounding rectangle in the v-direction. + vmax : float + The upper bound of the bounding rectangle in the v-direction. + c : float, optional. + Shift parameter of ratio-of-uniforms method, see Notes. Default is 0. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + + Notes + ----- + Given a univariate probability density function `pdf` and a constant `c`, + define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``. + If ``(U, V)`` is a random vector uniformly distributed over ``A``, + then ``V/U + c`` follows a distribution according to `pdf`. + + The above result (see [1]_, [2]_) can be used to sample random variables + using only the PDF, i.e. no inversion of the CDF is required. Typical + choices of `c` are zero or the mode of `pdf`. The set ``A`` is a subset of + the rectangle ``R = [0, umax] x [vmin, vmax]`` where + + - ``umax = sup sqrt(pdf(x))`` + - ``vmin = inf (x - c) sqrt(pdf(x))`` + - ``vmax = sup (x - c) sqrt(pdf(x))`` + + In particular, these values are finite if `pdf` is bounded and + ``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails). + One can generate ``(U, V)`` uniformly on ``R`` and return + ``V/U + c`` if ``(U, V)`` are also in ``A`` which can be directly + verified. + + The algorithm is not changed if one replaces `pdf` by k * `pdf` for any + constant k > 0. Thus, it is often convenient to work with a function + that is proportional to the probability density function by dropping + unnecessary normalization factors. + + Intuitively, the method works well if ``A`` fills up most of the + enclosing rectangle such that the probability is high that ``(U, V)`` + lies in ``A`` whenever it lies in ``R`` as the number of required + iterations becomes too large otherwise. To be more precise, note that + the expected number of iterations to draw ``(U, V)`` uniformly + distributed on ``R`` such that ``(U, V)`` is also in ``A`` is given by + the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin) / area(pdf)``, + where `area(pdf)` is the integral of `pdf` (which is equal to one if the + probability density function is used but can take on other values if a + function proportional to the density is used). The equality holds since + the area of ``A`` is equal to ``0.5 * area(pdf)`` (Theorem 7.1 in [1]_). + If the sampling fails to generate a single random variate after 50000 + iterations (i.e. not a single draw is in ``A``), an exception is raised. + + If the bounding rectangle is not correctly specified (i.e. if it does not + contain ``A``), the algorithm samples from a distribution different from + the one given by `pdf`. It is therefore recommended to perform a + test such as `~scipy.stats.kstest` as a check. + + References + ---------- + .. [1] L. Devroye, "Non-Uniform Random Variate Generation", + Springer-Verlag, 1986. + + .. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian + random variates", Statistics and Computing, 24(4), p. 547--557, 2014. + + .. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random + Variables Using the Ratio of Uniform Deviates", + ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + + >>> from scipy.stats.sampling import RatioUniforms + >>> rng = np.random.default_rng() + + Simulate normally distributed random variables. It is easy to compute the + bounding rectangle explicitly in that case. For simplicity, we drop the + normalization factor of the density. + + >>> f = lambda x: np.exp(-x**2 / 2) + >>> v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + >>> umax = np.sqrt(f(0)) + >>> gen = RatioUniforms(f, umax=umax, vmin=-v, vmax=v, random_state=rng) + >>> r = gen.rvs(size=2500) + + The K-S test confirms that the random variates are indeed normally + distributed (normality is not rejected at 5% significance level): + + >>> stats.kstest(r, 'norm')[1] + 0.250634764150542 + + The exponential distribution provides another example where the bounding + rectangle can be determined explicitly. + + >>> gen = RatioUniforms(lambda x: np.exp(-x), umax=1, vmin=0, + ... vmax=2*np.exp(-1), random_state=rng) + >>> r = gen.rvs(1000) + >>> stats.kstest(r, 'expon')[1] + 0.21121052054580314 + + """ + + def __init__(self, pdf, *, umax, vmin, vmax, c=0, random_state=None): + if vmin >= vmax: + raise ValueError("vmin must be smaller than vmax.") + + if umax <= 0: + raise ValueError("umax must be positive.") + + self._pdf = pdf + self._umax = umax + self._vmin = vmin + self._vmax = vmax + self._c = c + self._rng = check_random_state(random_state) + + def rvs(self, size=1): + """Sampling of random variates + + Parameters + ---------- + size : int or tuple of ints, optional + Number of random variates to be generated (default is 1). + + Returns + ------- + rvs : ndarray + The random variates distributed according to the probability + distribution defined by the pdf. + + """ + size1d = tuple(np.atleast_1d(size)) + N = np.prod(size1d) # number of rvs needed, reshape upon return + + # start sampling using ratio of uniforms method + x = np.zeros(N) + simulated, i = 0, 1 + + # loop until N rvs have been generated: expected runtime is finite. + # to avoid infinite loop, raise exception if not a single rv has been + # generated after 50000 tries. even if the expected number of iterations + # is 1000, the probability of this event is (1-1/1000)**50000 + # which is of order 10e-22 + while simulated < N: + k = N - simulated + # simulate uniform rvs on [0, umax] and [vmin, vmax] + u1 = self._umax * self._rng.uniform(size=k) + v1 = self._rng.uniform(self._vmin, self._vmax, size=k) + # apply rejection method + rvs = v1 / u1 + self._c + accept = (u1**2 <= self._pdf(rvs)) + num_accept = np.sum(accept) + if num_accept > 0: + x[simulated:(simulated + num_accept)] = rvs[accept] + simulated += num_accept + + if (simulated == 0) and (i*N >= 50000): + msg = ( + f"Not a single random variate could be generated in {i*N} " + "attempts. The ratio of uniforms method does not appear " + "to work for the provided parameters. Please check the " + "pdf and the bounds." + ) + raise RuntimeError(msg) + i += 1 + + return np.reshape(x, size1d) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_stats.pxd b/parrot/lib/python3.10/site-packages/scipy/stats/_stats.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e01565f75fe232446e4b8b0b50fdf645c8506108 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_stats.pxd @@ -0,0 +1,10 @@ +# destined to be used in a LowLevelCallable + +cdef double _geninvgauss_pdf(double x, void *user_data) noexcept nogil +cdef double _studentized_range_cdf(int n, double[2] x, void *user_data) noexcept nogil +cdef double _studentized_range_cdf_asymptotic(double z, void *user_data) noexcept nogil +cdef double _studentized_range_pdf(int n, double[2] x, void *user_data) noexcept nogil +cdef double _studentized_range_pdf_asymptotic(double z, void *user_data) noexcept nogil +cdef double _studentized_range_moment(int n, double[3] x_arg, void *user_data) noexcept nogil +cdef double _genhyperbolic_pdf(double x, void *user_data) noexcept nogil +cdef double _genhyperbolic_logpdf(double x, void *user_data) noexcept nogil diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_stats_mstats_common.py b/parrot/lib/python3.10/site-packages/scipy/stats/_stats_mstats_common.py new file mode 100644 index 0000000000000000000000000000000000000000..6900eba1fa6157c9de956255c49f5cbce0029c11 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_stats_mstats_common.py @@ -0,0 +1,303 @@ +import warnings +import numpy as np +from . import distributions +from .._lib._bunch import _make_tuple_bunch +from ._stats_pythran import siegelslopes as siegelslopes_pythran + +__all__ = ['_find_repeats', 'theilslopes', 'siegelslopes'] + +# This is not a namedtuple for backwards compatibility. See PR #12983 +TheilslopesResult = _make_tuple_bunch('TheilslopesResult', + ['slope', 'intercept', + 'low_slope', 'high_slope']) +SiegelslopesResult = _make_tuple_bunch('SiegelslopesResult', + ['slope', 'intercept']) + + +def theilslopes(y, x=None, alpha=0.95, method='separate'): + r""" + Computes the Theil-Sen estimator for a set of points (x, y). + + `theilslopes` implements a method for robust linear regression. It + computes the slope as the median of all slopes between paired values. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + alpha : float, optional + Confidence degree between 0 and 1. Default is 95% confidence. + Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are + interpreted as "find the 90% confidence interval". + method : {'joint', 'separate'}, optional + Method to be used for computing estimate for intercept. + Following methods are supported, + + * 'joint': Uses np.median(y - slope * x) as intercept. + * 'separate': Uses np.median(y) - slope * np.median(x) + as intercept. + + The default is 'separate'. + + .. versionadded:: 1.8.0 + + Returns + ------- + result : ``TheilslopesResult`` instance + The return value is an object with the following attributes: + + slope : float + Theil slope. + intercept : float + Intercept of the Theil line. + low_slope : float + Lower bound of the confidence interval on `slope`. + high_slope : float + Upper bound of the confidence interval on `slope`. + + See Also + -------- + siegelslopes : a similar technique using repeated medians + + Notes + ----- + The implementation of `theilslopes` follows [1]_. The intercept is + not defined in [1]_, and here it is defined as ``median(y) - + slope*median(x)``, which is given in [3]_. Other definitions of + the intercept exist in the literature such as ``median(y - slope*x)`` + in [4]_. The approach to compute the intercept can be determined by the + parameter ``method``. A confidence interval for the intercept is not + given as this question is not addressed in [1]_. + + For compatibility with older versions of SciPy, the return value acts + like a ``namedtuple`` of length 4, with fields ``slope``, ``intercept``, + ``low_slope``, and ``high_slope``, so one can continue to write:: + + slope, intercept, low_slope, high_slope = theilslopes(y, x) + + References + ---------- + .. [1] P.K. Sen, "Estimates of the regression coefficient based on + Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968. + .. [2] H. Theil, "A rank-invariant method of linear and polynomial + regression analysis I, II and III", Nederl. Akad. Wetensch., Proc. + 53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950. + .. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed., + John Wiley and Sons, New York, pp. 493. + .. [4] https://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-5, 5, num=150) + >>> y = x + np.random.normal(size=x.size) + >>> y[11:15] += 10 # add outliers + >>> y[-5:] -= 7 + + Compute the slope, intercept and 90% confidence interval. For comparison, + also compute the least-squares fit with `linregress`: + + >>> res = stats.theilslopes(y, x, 0.90, method='separate') + >>> lsq_res = stats.linregress(x, y) + + Plot the results. The Theil-Sen regression line is shown in red, with the + dashed red lines illustrating the confidence interval of the slope (note + that the dashed red lines are not the confidence interval of the regression + as the confidence interval of the intercept is not included). The green + line shows the least-squares fit for comparison. + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, y, 'b.') + >>> ax.plot(x, res[1] + res[0] * x, 'r-') + >>> ax.plot(x, res[1] + res[2] * x, 'r--') + >>> ax.plot(x, res[1] + res[3] * x, 'r--') + >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') + >>> plt.show() + + """ + if method not in ['joint', 'separate']: + raise ValueError("method must be either 'joint' or 'separate'." + f"'{method}' is invalid.") + # We copy both x and y so we can use _find_repeats. + y = np.array(y, dtype=float, copy=True).ravel() + if x is None: + x = np.arange(len(y), dtype=float) + else: + x = np.array(x, dtype=float, copy=True).ravel() + if len(x) != len(y): + raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})") + + # Compute sorted slopes only when deltax > 0 + deltax = x[:, np.newaxis] - x + deltay = y[:, np.newaxis] - y + slopes = deltay[deltax > 0] / deltax[deltax > 0] + if not slopes.size: + msg = "All `x` coordinates are identical." + warnings.warn(msg, RuntimeWarning, stacklevel=2) + slopes.sort() + medslope = np.median(slopes) + if method == 'joint': + medinter = np.median(y - medslope * x) + else: + medinter = np.median(y) - medslope * np.median(x) + # Now compute confidence intervals + if alpha > 0.5: + alpha = 1. - alpha + + z = distributions.norm.ppf(alpha / 2.) + # This implements (2.6) from Sen (1968) + _, nxreps = _find_repeats(x) + _, nyreps = _find_repeats(y) + nt = len(slopes) # N in Sen (1968) + ny = len(y) # n in Sen (1968) + # Equation 2.6 in Sen (1968): + sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) - + sum(k * (k-1) * (2*k + 5) for k in nxreps) - + sum(k * (k-1) * (2*k + 5) for k in nyreps)) + # Find the confidence interval indices in `slopes` + try: + sigma = np.sqrt(sigsq) + Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1) + Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0) + delta = slopes[[Rl, Ru]] + except (ValueError, IndexError): + delta = (np.nan, np.nan) + + return TheilslopesResult(slope=medslope, intercept=medinter, + low_slope=delta[0], high_slope=delta[1]) + + +def _find_repeats(arr): + # This function assumes it may clobber its input. + if len(arr) == 0: + return np.array(0, np.float64), np.array(0, np.intp) + + # XXX This cast was previously needed for the Fortran implementation, + # should we ditch it? + arr = np.asarray(arr, np.float64).ravel() + arr.sort() + + # Taken from NumPy 1.9's np.unique. + change = np.concatenate(([True], arr[1:] != arr[:-1])) + unique = arr[change] + change_idx = np.concatenate(np.nonzero(change) + ([arr.size],)) + freq = np.diff(change_idx) + atleast2 = freq > 1 + return unique[atleast2], freq[atleast2] + + +def siegelslopes(y, x=None, method="hierarchical"): + r""" + Computes the Siegel estimator for a set of points (x, y). + + `siegelslopes` implements a method for robust linear regression + using repeated medians (see [1]_) to fit a line to the points (x, y). + The method is robust to outliers with an asymptotic breakdown point + of 50%. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + method : {'hierarchical', 'separate'} + If 'hierarchical', estimate the intercept using the estimated + slope ``slope`` (default option). + If 'separate', estimate the intercept independent of the estimated + slope. See Notes for details. + + Returns + ------- + result : ``SiegelslopesResult`` instance + The return value is an object with the following attributes: + + slope : float + Estimate of the slope of the regression line. + intercept : float + Estimate of the intercept of the regression line. + + See Also + -------- + theilslopes : a similar technique without repeated medians + + Notes + ----- + With ``n = len(y)``, compute ``m_j`` as the median of + the slopes from the point ``(x[j], y[j])`` to all other `n-1` points. + ``slope`` is then the median of all slopes ``m_j``. + Two ways are given to estimate the intercept in [1]_ which can be chosen + via the parameter ``method``. + The hierarchical approach uses the estimated slope ``slope`` + and computes ``intercept`` as the median of ``y - slope*x``. + The other approach estimates the intercept separately as follows: for + each point ``(x[j], y[j])``, compute the intercepts of all the `n-1` + lines through the remaining points and take the median ``i_j``. + ``intercept`` is the median of the ``i_j``. + + The implementation computes `n` times the median of a vector of size `n` + which can be slow for large vectors. There are more efficient algorithms + (see [2]_) which are not implemented here. + + For compatibility with older versions of SciPy, the return value acts + like a ``namedtuple`` of length 2, with fields ``slope`` and + ``intercept``, so one can continue to write:: + + slope, intercept = siegelslopes(y, x) + + References + ---------- + .. [1] A. Siegel, "Robust Regression Using Repeated Medians", + Biometrika, Vol. 69, pp. 242-244, 1982. + + .. [2] A. Stein and M. Werman, "Finding the repeated median regression + line", Proceedings of the Third Annual ACM-SIAM Symposium on + Discrete Algorithms, pp. 409-413, 1992. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-5, 5, num=150) + >>> y = x + np.random.normal(size=x.size) + >>> y[11:15] += 10 # add outliers + >>> y[-5:] -= 7 + + Compute the slope and intercept. For comparison, also compute the + least-squares fit with `linregress`: + + >>> res = stats.siegelslopes(y, x) + >>> lsq_res = stats.linregress(x, y) + + Plot the results. The Siegel regression line is shown in red. The green + line shows the least-squares fit for comparison. + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, y, 'b.') + >>> ax.plot(x, res[1] + res[0] * x, 'r-') + >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') + >>> plt.show() + + """ + if method not in ['hierarchical', 'separate']: + raise ValueError("method can only be 'hierarchical' or 'separate'") + y = np.asarray(y).ravel() + if x is None: + x = np.arange(len(y), dtype=float) + else: + x = np.asarray(x, dtype=float).ravel() + if len(x) != len(y): + raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})") + dtype = np.result_type(x, y, np.float32) # use at least float32 + y, x = y.astype(dtype), x.astype(dtype) + medslope, medinter = siegelslopes_pythran(y, x, method) + return SiegelslopesResult(slope=medslope, intercept=medinter) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_tukeylambda_stats.py b/parrot/lib/python3.10/site-packages/scipy/stats/_tukeylambda_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..b77173c136d80eb57f5c993108b7408653acad13 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_tukeylambda_stats.py @@ -0,0 +1,199 @@ +import numpy as np +from numpy import poly1d +from scipy.special import beta + + +# The following code was used to generate the Pade coefficients for the +# Tukey Lambda variance function. Version 0.17 of mpmath was used. +#--------------------------------------------------------------------------- +# import mpmath as mp +# +# mp.mp.dps = 60 +# +# one = mp.mpf(1) +# two = mp.mpf(2) +# +# def mpvar(lam): +# if lam == 0: +# v = mp.pi**2 / three +# else: +# v = (two / lam**2) * (one / (one + two*lam) - +# mp.beta(lam + one, lam + one)) +# return v +# +# t = mp.taylor(mpvar, 0, 8) +# p, q = mp.pade(t, 4, 4) +# print("p =", [mp.fp.mpf(c) for c in p]) +# print("q =", [mp.fp.mpf(c) for c in q]) +#--------------------------------------------------------------------------- + +# Pade coefficients for the Tukey Lambda variance function. +_tukeylambda_var_pc = [3.289868133696453, 0.7306125098871127, + -0.5370742306855439, 0.17292046290190008, + -0.02371146284628187] +_tukeylambda_var_qc = [1.0, 3.683605511659861, 4.184152498888124, + 1.7660926747377275, 0.2643989311168465] + +# numpy.poly1d instances for the numerator and denominator of the +# Pade approximation to the Tukey Lambda variance. +_tukeylambda_var_p = poly1d(_tukeylambda_var_pc[::-1]) +_tukeylambda_var_q = poly1d(_tukeylambda_var_qc[::-1]) + + +def tukeylambda_variance(lam): + """Variance of the Tukey Lambda distribution. + + Parameters + ---------- + lam : array_like + The lambda values at which to compute the variance. + + Returns + ------- + v : ndarray + The variance. For lam < -0.5, the variance is not defined, so + np.nan is returned. For lam = 0.5, np.inf is returned. + + Notes + ----- + In an interval around lambda=0, this function uses the [4,4] Pade + approximation to compute the variance. Otherwise it uses the standard + formula (https://en.wikipedia.org/wiki/Tukey_lambda_distribution). The + Pade approximation is used because the standard formula has a removable + discontinuity at lambda = 0, and does not produce accurate numerical + results near lambda = 0. + """ + lam = np.asarray(lam) + shp = lam.shape + lam = np.atleast_1d(lam).astype(np.float64) + + # For absolute values of lam less than threshold, use the Pade + # approximation. + threshold = 0.075 + + # Play games with masks to implement the conditional evaluation of + # the distribution. + # lambda < -0.5: var = nan + low_mask = lam < -0.5 + # lambda == -0.5: var = inf + neghalf_mask = lam == -0.5 + # abs(lambda) < threshold: use Pade approximation + small_mask = np.abs(lam) < threshold + # else the "regular" case: use the explicit formula. + reg_mask = ~(low_mask | neghalf_mask | small_mask) + + # Get the 'lam' values for the cases where they are needed. + small = lam[small_mask] + reg = lam[reg_mask] + + # Compute the function for each case. + v = np.empty_like(lam) + v[low_mask] = np.nan + v[neghalf_mask] = np.inf + if small.size > 0: + # Use the Pade approximation near lambda = 0. + v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small) + if reg.size > 0: + v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) - + beta(reg + 1, reg + 1)) + v.shape = shp + return v + + +# The following code was used to generate the Pade coefficients for the +# Tukey Lambda kurtosis function. Version 0.17 of mpmath was used. +#--------------------------------------------------------------------------- +# import mpmath as mp +# +# mp.mp.dps = 60 +# +# one = mp.mpf(1) +# two = mp.mpf(2) +# three = mp.mpf(3) +# four = mp.mpf(4) +# +# def mpkurt(lam): +# if lam == 0: +# k = mp.mpf(6)/5 +# else: +# numer = (one/(four*lam+one) - four*mp.beta(three*lam+one, lam+one) + +# three*mp.beta(two*lam+one, two*lam+one)) +# denom = two*(one/(two*lam+one) - mp.beta(lam+one,lam+one))**2 +# k = numer / denom - three +# return k +# +# # There is a bug in mpmath 0.17: when we use the 'method' keyword of the +# # taylor function and we request a degree 9 Taylor polynomial, we actually +# # get degree 8. +# t = mp.taylor(mpkurt, 0, 9, method='quad', radius=0.01) +# t = [mp.chop(c, tol=1e-15) for c in t] +# p, q = mp.pade(t, 4, 4) +# print("p =", [mp.fp.mpf(c) for c in p]) +# print("q =", [mp.fp.mpf(c) for c in q]) +#--------------------------------------------------------------------------- + +# Pade coefficients for the Tukey Lambda kurtosis function. +_tukeylambda_kurt_pc = [1.2, -5.853465139719495, -22.653447381131077, + 0.20601184383406815, 4.59796302262789] +_tukeylambda_kurt_qc = [1.0, 7.171149192233599, 12.96663094361842, + 0.43075235247853005, -2.789746758009912] + +# numpy.poly1d instances for the numerator and denominator of the +# Pade approximation to the Tukey Lambda kurtosis. +_tukeylambda_kurt_p = poly1d(_tukeylambda_kurt_pc[::-1]) +_tukeylambda_kurt_q = poly1d(_tukeylambda_kurt_qc[::-1]) + + +def tukeylambda_kurtosis(lam): + """Kurtosis of the Tukey Lambda distribution. + + Parameters + ---------- + lam : array_like + The lambda values at which to compute the variance. + + Returns + ------- + v : ndarray + The variance. For lam < -0.25, the variance is not defined, so + np.nan is returned. For lam = 0.25, np.inf is returned. + + """ + lam = np.asarray(lam) + shp = lam.shape + lam = np.atleast_1d(lam).astype(np.float64) + + # For absolute values of lam less than threshold, use the Pade + # approximation. + threshold = 0.055 + + # Use masks to implement the conditional evaluation of the kurtosis. + # lambda < -0.25: kurtosis = nan + low_mask = lam < -0.25 + # lambda == -0.25: kurtosis = inf + negqrtr_mask = lam == -0.25 + # lambda near 0: use Pade approximation + small_mask = np.abs(lam) < threshold + # else the "regular" case: use the explicit formula. + reg_mask = ~(low_mask | negqrtr_mask | small_mask) + + # Get the 'lam' values for the cases where they are needed. + small = lam[small_mask] + reg = lam[reg_mask] + + # Compute the function for each case. + k = np.empty_like(lam) + k[low_mask] = np.nan + k[negqrtr_mask] = np.inf + if small.size > 0: + k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small) + if reg.size > 0: + numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) + + 3 * beta(2 * reg + 1, 2 * reg + 1)) + denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2 + k[reg_mask] = numer / denom - 3 + + # The return value will be a numpy array; resetting the shape ensures that + # if `lam` was a scalar, the return value is a 0-d array. + k.shape = shp + return k diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_variation.py b/parrot/lib/python3.10/site-packages/scipy/stats/_variation.py new file mode 100644 index 0000000000000000000000000000000000000000..9febde9ec8f1ae967634ed799f5f7ba2d817318e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_variation.py @@ -0,0 +1,128 @@ +import numpy as np + +from scipy._lib._util import _get_nan +from scipy._lib._array_api import array_namespace, xp_copysign + +from ._axis_nan_policy import _axis_nan_policy_factory + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def variation(a, axis=0, nan_policy='propagate', ddof=0, *, keepdims=False): + """ + Compute the coefficient of variation. + + The coefficient of variation is the standard deviation divided by the + mean. This function is equivalent to:: + + np.std(x, axis=axis, ddof=ddof) / np.mean(x) + + The default for ``ddof`` is 0, but many definitions of the coefficient + of variation use the square root of the unbiased sample variance + for the sample standard deviation, which corresponds to ``ddof=1``. + + The function does not take the absolute value of the mean of the data, + so the return value is negative if the mean is negative. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate the coefficient of variation. + Default is 0. If None, compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains ``nan``. + The following options are available: + + * 'propagate': return ``nan`` + * 'raise': raise an exception + * 'omit': perform the calculation with ``nan`` values omitted + + The default is 'propagate'. + ddof : int, optional + Gives the "Delta Degrees Of Freedom" used when computing the + standard deviation. The divisor used in the calculation of the + standard deviation is ``N - ddof``, where ``N`` is the number of + elements. `ddof` must be less than ``N``; if it isn't, the result + will be ``nan`` or ``inf``, depending on ``N`` and the values in + the array. By default `ddof` is zero for backwards compatibility, + but it is recommended to use ``ddof=1`` to ensure that the sample + standard deviation is computed as the square root of the unbiased + sample variance. + + Returns + ------- + variation : ndarray + The calculated variation along the requested axis. + + Notes + ----- + There are several edge cases that are handled without generating a + warning: + + * If both the mean and the standard deviation are zero, ``nan`` + is returned. + * If the mean is zero and the standard deviation is nonzero, ``inf`` + is returned. + * If the input has length zero (either because the array has zero + length, or all the input values are ``nan`` and ``nan_policy`` is + ``'omit'``), ``nan`` is returned. + * If the input contains ``inf``, ``nan`` is returned. + + References + ---------- + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import variation + >>> variation([1, 2, 3, 4, 5], ddof=1) + 0.5270462766947299 + + Compute the variation along a given dimension of an array that contains + a few ``nan`` values: + + >>> x = np.array([[ 10.0, np.nan, 11.0, 19.0, 23.0, 29.0, 98.0], + ... [ 29.0, 30.0, 32.0, 33.0, 35.0, 56.0, 57.0], + ... [np.nan, np.nan, 12.0, 13.0, 16.0, 16.0, 17.0]]) + >>> variation(x, axis=1, ddof=1, nan_policy='omit') + array([1.05109361, 0.31428986, 0.146483 ]) + + """ + xp = array_namespace(a) + a = xp.asarray(a) + # `nan_policy` and `keepdims` are handled by `_axis_nan_policy` + # `axis=None` is only handled for NumPy backend + if axis is None: + a = xp.reshape(a, (-1,)) + axis = 0 + + n = a.shape[axis] + NaN = _get_nan(a) + + if a.size == 0 or ddof > n: + # Handle as a special case to avoid spurious warnings. + # The return values, if any, are all nan. + shp = list(a.shape) + shp.pop(axis) + result = xp.full(shp, fill_value=NaN) + return result[()] if result.ndim == 0 else result + + mean_a = xp.mean(a, axis=axis) + + if ddof == n: + # Another special case. Result is either inf or nan. + std_a = xp.std(a, axis=axis, correction=0) + result = xp.where(std_a > 0, xp_copysign(xp.asarray(xp.inf), mean_a), NaN) + return result[()] if result.ndim == 0 else result + + with np.errstate(divide='ignore', invalid='ignore'): + std_a = xp.std(a, axis=axis, correction=ddof) + result = std_a / mean_a + + return result[()] if result.ndim == 0 else result diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_warnings_errors.py b/parrot/lib/python3.10/site-packages/scipy/stats/_warnings_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..38385b862c9d642b41af8d74279f98c6a427208a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_warnings_errors.py @@ -0,0 +1,38 @@ +# Warnings + + +class DegenerateDataWarning(RuntimeWarning): + """Warns when data is degenerate and results may not be reliable.""" + def __init__(self, msg=None): + if msg is None: + msg = ("Degenerate data encountered; results may not be reliable.") + self.args = (msg,) + + +class ConstantInputWarning(DegenerateDataWarning): + """Warns when all values in data are exactly equal.""" + def __init__(self, msg=None): + if msg is None: + msg = ("All values in data are exactly equal; " + "results may not be reliable.") + self.args = (msg,) + + +class NearConstantInputWarning(DegenerateDataWarning): + """Warns when all values in data are nearly equal.""" + def __init__(self, msg=None): + if msg is None: + msg = ("All values in data are nearly equal; " + "results may not be reliable.") + self.args = (msg,) + + +# Errors + + +class FitError(RuntimeError): + """Represents an error condition when fitting a distribution to data.""" + def __init__(self, msg=None): + if msg is None: + msg = ("An error occurred when fitting a distribution to data.") + self.args = (msg,) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py b/parrot/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py new file mode 100644 index 0000000000000000000000000000000000000000..c59be3100489a7b9a8b8290f9e00616096b3c633 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py @@ -0,0 +1,246 @@ +import warnings +import numpy as np + +from scipy import stats +from ._stats_py import _get_pvalue, _rankdata, _SimpleNormal +from . import _morestats +from ._axis_nan_policy import _broadcast_arrays +from ._hypotests import _get_wilcoxon_distr +from scipy._lib._util import _lazywhere, _get_nan + + +class WilcoxonDistribution: + + def __init__(self, n): + n = np.asarray(n).astype(int, copy=False) + self.n = n + self._dists = {ni: _get_wilcoxon_distr(ni) for ni in np.unique(n)} + + def _cdf1(self, k, n): + pmfs = self._dists[n] + return pmfs[:k + 1].sum() + + def _cdf(self, k, n): + return np.vectorize(self._cdf1, otypes=[float])(k, n) + + def _sf1(self, k, n): + pmfs = self._dists[n] + return pmfs[k:].sum() + + def _sf(self, k, n): + return np.vectorize(self._sf1, otypes=[float])(k, n) + + def mean(self): + return self.n * (self.n + 1) / 4 + + def _prep(self, k): + k = np.asarray(k).astype(int, copy=False) + mn = self.mean() + out = np.empty(k.shape, dtype=np.float64) + return k, mn, out + + def cdf(self, k): + k, mn, out = self._prep(k) + return _lazywhere(k <= mn, (k, self.n), self._cdf, + f2=lambda k, n: 1 - self._sf(k+1, n))[()] + + def sf(self, k): + k, mn, out = self._prep(k) + return _lazywhere(k <= mn, (k, self.n), self._sf, + f2=lambda k, n: 1 - self._cdf(k-1, n))[()] + + +def _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis): + + axis = np.asarray(axis)[()] + message = "`axis` must be an integer." + if not np.issubdtype(axis.dtype, np.integer) or axis.ndim != 0: + raise ValueError(message) + + message = '`axis` must be compatible with the shape(s) of `x` (and `y`)' + try: + if y is None: + x = np.asarray(x) + d = x + else: + x, y = _broadcast_arrays((x, y), axis=axis) + d = x - y + d = np.moveaxis(d, axis, -1) + except np.AxisError as e: + raise ValueError(message) from e + + message = "`x` and `y` must have the same length along `axis`." + if y is not None and x.shape[axis] != y.shape[axis]: + raise ValueError(message) + + message = "`x` (and `y`, if provided) must be an array of real numbers." + if np.issubdtype(d.dtype, np.integer): + d = d.astype(np.float64) + if not np.issubdtype(d.dtype, np.floating): + raise ValueError(message) + + zero_method = str(zero_method).lower() + zero_methods = {"wilcox", "pratt", "zsplit"} + message = f"`zero_method` must be one of {zero_methods}." + if zero_method not in zero_methods: + raise ValueError(message) + + corrections = {True, False} + message = f"`correction` must be one of {corrections}." + if correction not in corrections: + raise ValueError(message) + + alternative = str(alternative).lower() + alternatives = {"two-sided", "less", "greater"} + message = f"`alternative` must be one of {alternatives}." + if alternative not in alternatives: + raise ValueError(message) + + if not isinstance(method, stats.PermutationMethod): + methods = {"auto", "approx", "exact"} + message = (f"`method` must be one of {methods} or " + "an instance of `stats.PermutationMethod`.") + if method not in methods: + raise ValueError(message) + output_z = True if method == 'approx' else False + + # logic unchanged here for backward compatibility + n_zero = np.sum(d == 0, axis=-1) + has_zeros = np.any(n_zero > 0) + if method == "auto": + if d.shape[-1] <= 50 and not has_zeros: + method = "exact" + else: + method = "approx" + + n_zero = np.sum(d == 0) + if n_zero > 0 and method == "exact": + method = "approx" + warnings.warn("Exact p-value calculation does not work if there are " + "zeros. Switching to normal approximation.", + stacklevel=2) + + if (method == "approx" and zero_method in ["wilcox", "pratt"] + and n_zero == d.size and d.size > 0 and d.ndim == 1): + raise ValueError("zero_method 'wilcox' and 'pratt' do not " + "work if x - y is zero for all elements.") + + if 0 < d.shape[-1] < 10 and method == "approx": + warnings.warn("Sample size too small for normal approximation.", stacklevel=2) + + return d, zero_method, correction, alternative, method, axis, output_z + + +def _wilcoxon_statistic(d, zero_method='wilcox'): + + i_zeros = (d == 0) + + if zero_method == 'wilcox': + # Wilcoxon's method for treating zeros was to remove them from + # the calculation. We do this by replacing 0s with NaNs, which + # are ignored anyway. + if not d.flags['WRITEABLE']: + d = d.copy() + d[i_zeros] = np.nan + + i_nan = np.isnan(d) + n_nan = np.sum(i_nan, axis=-1) + count = d.shape[-1] - n_nan + + r, t = _rankdata(abs(d), 'average', return_ties=True) + + r_plus = np.sum((d > 0) * r, axis=-1) + r_minus = np.sum((d < 0) * r, axis=-1) + + if zero_method == "zsplit": + # The "zero-split" method for treating zeros is to add half their contribution + # to r_plus and half to r_minus. + # See gh-2263 for the origin of this method. + r_zero_2 = np.sum(i_zeros * r, axis=-1) / 2 + r_plus += r_zero_2 + r_minus += r_zero_2 + + mn = count * (count + 1.) * 0.25 + se = count * (count + 1.) * (2. * count + 1.) + + if zero_method == "pratt": + # Pratt's method for treating zeros was just to modify the z-statistic. + + # normal approximation needs to be adjusted, see Cureton (1967) + n_zero = i_zeros.sum(axis=-1) + mn -= n_zero * (n_zero + 1.) * 0.25 + se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.) + + # zeros are not to be included in tie-correction. + # any tie counts corresponding with zeros are in the 0th column + t[i_zeros.any(axis=-1), 0] = 0 + + tie_correct = (t**3 - t).sum(axis=-1) + se -= tie_correct/2 + se = np.sqrt(se / 24) + + z = (r_plus - mn) / se + + return r_plus, r_minus, se, z, count + + +def _correction_sign(z, alternative): + if alternative == 'greater': + return 1 + elif alternative == 'less': + return -1 + else: + return np.sign(z) + + +def _wilcoxon_nd(x, y=None, zero_method='wilcox', correction=True, + alternative='two-sided', method='auto', axis=0): + + temp = _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis) + d, zero_method, correction, alternative, method, axis, output_z = temp + + if d.size == 0: + NaN = _get_nan(d) + res = _morestats.WilcoxonResult(statistic=NaN, pvalue=NaN) + if method == 'approx': + res.zstatistic = NaN + return res + + r_plus, r_minus, se, z, count = _wilcoxon_statistic(d, zero_method) + + if method == 'approx': + if correction: + sign = _correction_sign(z, alternative) + z -= sign * 0.5 / se + p = _get_pvalue(z, _SimpleNormal(), alternative, xp=np) + elif method == 'exact': + dist = WilcoxonDistribution(count) + # The null distribution in `dist` is exact only if there are no ties + # or zeros. If there are ties or zeros, the statistic can be non- + # integral, but the null distribution is only defined for integral + # values of the statistic. Therefore, we're conservative: round + # non-integral statistic up before computing CDF and down before + # computing SF. This preserves symmetry w.r.t. alternatives and + # order of the input arguments. See gh-19872. + if alternative == 'less': + p = dist.cdf(np.ceil(r_plus)) + elif alternative == 'greater': + p = dist.sf(np.floor(r_plus)) + else: + p = 2 * np.minimum(dist.sf(np.floor(r_plus)), + dist.cdf(np.ceil(r_plus))) + p = np.clip(p, 0, 1) + else: # `PermutationMethod` instance (already validated) + p = stats.permutation_test( + (d,), lambda d: _wilcoxon_statistic(d, zero_method)[0], + permutation_type='samples', **method._asdict(), + alternative=alternative, axis=-1).pvalue + + # for backward compatibility... + statistic = np.minimum(r_plus, r_minus) if alternative=='two-sided' else r_plus + z = -np.abs(z) if (alternative == 'two-sided' and method == 'approx') else z + + res = _morestats.WilcoxonResult(statistic=statistic, pvalue=p[()]) + if output_z: + res.zstatistic = z[()] + return res diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/biasedurn.py b/parrot/lib/python3.10/site-packages/scipy/stats/biasedurn.py new file mode 100644 index 0000000000000000000000000000000000000000..2b1c9f1cf2b3b39acdcaeda97a90e8c11c589d89 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/biasedurn.py @@ -0,0 +1,16 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="biasedurn", + private_modules=["_biasedurn"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/distributions.py b/parrot/lib/python3.10/site-packages/scipy/stats/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9c37aa98c9545b2616c8d32e8f676d8d49289e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/distributions.py @@ -0,0 +1,24 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +# NOTE: To look at history using `git blame`, use `git blame -M -C -C` +# instead of `git blame -Lxxx,+x`. +# +from ._distn_infrastructure import (rv_discrete, rv_continuous, rv_frozen) # noqa: F401 + +from . import _continuous_distns +from . import _discrete_distns + +from ._continuous_distns import * # noqa: F403 +from ._levy_stable import levy_stable +from ._discrete_distns import * # noqa: F403 +from ._entropy import entropy + +# For backwards compatibility e.g. pymc expects distributions.__all__. +__all__ = ['rv_discrete', 'rv_continuous', 'rv_histogram', 'entropy'] # noqa: F405 + +# Add only the distribution names, not the *_gen names. +__all__ += _continuous_distns._distn_names +__all__ += ['levy_stable'] +__all__ += _discrete_distns._distn_names diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/kde.py b/parrot/lib/python3.10/site-packages/scipy/stats/kde.py new file mode 100644 index 0000000000000000000000000000000000000000..4401da5a30f4452ab394232d3928493d0e3b77ec --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/kde.py @@ -0,0 +1,18 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = ["gaussian_kde"] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="kde", + private_modules=["_kde"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/morestats.py b/parrot/lib/python3.10/site-packages/scipy/stats/morestats.py new file mode 100644 index 0000000000000000000000000000000000000000..ee8e6f43b7aaf7cb9af0fc1b37fdcee3a63277a3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/morestats.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'mvsdist', + 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', + 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', + 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', + 'fligner', 'mood', 'wilcoxon', 'median_test', + 'circmean', 'circvar', 'circstd', 'anderson_ksamp', + 'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax', + 'yeojohnson_normplot', 'find_repeats', 'chi2_contingency', 'distributions', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="morestats", + private_modules=["_morestats"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/mstats.py b/parrot/lib/python3.10/site-packages/scipy/stats/mstats.py new file mode 100644 index 0000000000000000000000000000000000000000..88016af71803dc5c4ebadba168f22cdcd8273dbb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/mstats.py @@ -0,0 +1,140 @@ +""" +=================================================================== +Statistical functions for masked arrays (:mod:`scipy.stats.mstats`) +=================================================================== + +.. currentmodule:: scipy.stats.mstats + +This module contains a large number of statistical functions that can +be used with masked arrays. + +Most of these functions are similar to those in `scipy.stats` but might +have small differences in the API or in the algorithm used. Since this +is a relatively new package, some API changes are still possible. + +Summary statistics +================== + +.. autosummary:: + :toctree: generated/ + + describe + gmean + hmean + kurtosis + mode + mquantiles + hdmedian + hdquantiles + hdquantiles_sd + idealfourths + plotting_positions + meppf + moment + skew + tmean + tvar + tmin + tmax + tsem + variation + find_repeats + sem + trimmed_mean + trimmed_mean_ci + trimmed_std + trimmed_var + +Frequency statistics +==================== + +.. autosummary:: + :toctree: generated/ + + scoreatpercentile + +Correlation functions +===================== + +.. autosummary:: + :toctree: generated/ + + f_oneway + pearsonr + spearmanr + pointbiserialr + kendalltau + kendalltau_seasonal + linregress + siegelslopes + theilslopes + sen_seasonal_slopes + +Statistical tests +================= + +.. autosummary:: + :toctree: generated/ + + ttest_1samp + ttest_onesamp + ttest_ind + ttest_rel + chisquare + kstest + ks_2samp + ks_1samp + ks_twosamp + mannwhitneyu + rankdata + kruskal + kruskalwallis + friedmanchisquare + brunnermunzel + skewtest + kurtosistest + normaltest + +Transformations +=============== + +.. autosummary:: + :toctree: generated/ + + obrientransform + trim + trima + trimmed_stde + trimr + trimtail + trimboth + winsorize + zmap + zscore + +Other +===== + +.. autosummary:: + :toctree: generated/ + + argstoarray + count_tied_groups + msign + compare_medians_ms + median_cihs + mjci + mquantiles_cimj + rsh + +""" +from . import _mstats_basic +from . import _mstats_extras +from ._mstats_basic import * # noqa: F403 +from ._mstats_extras import * # noqa: F403 +# Functions that support masked array input in stats but need to be kept in the +# mstats namespace for backwards compatibility: +from scipy.stats import gmean, hmean, zmap, zscore, chisquare + +__all__ = _mstats_basic.__all__ + _mstats_extras.__all__ +__all__ += ['gmean', 'hmean', 'zmap', 'zscore', 'chisquare'] diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/mstats_basic.py b/parrot/lib/python3.10/site-packages/scipy/stats/mstats_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..19cc67a6acdfa054ffa2b29b6e774dd7aafda263 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/mstats_basic.py @@ -0,0 +1,42 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'argstoarray', + 'count_tied_groups', + 'describe', + 'f_oneway', 'find_repeats','friedmanchisquare', + 'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis', + 'ks_twosamp', 'ks_2samp', 'kurtosis', 'kurtosistest', + 'ks_1samp', 'kstest', + 'linregress', + 'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign', + 'normaltest', + 'obrientransform', + 'pearsonr','plotting_positions','pointbiserialr', + 'rankdata', + 'scoreatpercentile','sem', + 'sen_seasonal_slopes','skew','skewtest','spearmanr', + 'siegelslopes', 'theilslopes', + 'tmax','tmean','tmin','trim','trimboth', + 'trimtail','trima','trimr','trimmed_mean','trimmed_std', + 'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp', + 'ttest_ind','ttest_rel','tvar', + 'variation', + 'winsorize', + 'brunnermunzel', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="mstats_basic", + private_modules=["_mstats_basic"], all=__all__, + attribute=name, correct_module="mstats") diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/mstats_extras.py b/parrot/lib/python3.10/site-packages/scipy/stats/mstats_extras.py new file mode 100644 index 0000000000000000000000000000000000000000..fec695329cf2c2d58a4918cc99e209c0650c3ea6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/mstats_extras.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'compare_medians_ms', + 'hdquantiles', 'hdmedian', 'hdquantiles_sd', + 'idealfourths', + 'median_cihs','mjci','mquantiles_cimj', + 'rsh', + 'trimmed_mean_ci', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="mstats_extras", + private_modules=["_mstats_extras"], all=__all__, + attribute=name, correct_module="mstats") diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/mvn.py b/parrot/lib/python3.10/site-packages/scipy/stats/mvn.py new file mode 100644 index 0000000000000000000000000000000000000000..65da9e20f6a4e6d24c1cb206c59821730fb6ab83 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/mvn.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="mvn", + private_modules=["_mvn"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/qmc.py b/parrot/lib/python3.10/site-packages/scipy/stats/qmc.py new file mode 100644 index 0000000000000000000000000000000000000000..a8a08343cf4c759938b31c29e32aaa644bf6e0fd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/qmc.py @@ -0,0 +1,236 @@ +r""" +==================================================== +Quasi-Monte Carlo submodule (:mod:`scipy.stats.qmc`) +==================================================== + +.. currentmodule:: scipy.stats.qmc + +This module provides Quasi-Monte Carlo generators and associated helper +functions. + + +Quasi-Monte Carlo +================= + +Engines +------- + +.. autosummary:: + :toctree: generated/ + + QMCEngine + Sobol + Halton + LatinHypercube + PoissonDisk + MultinomialQMC + MultivariateNormalQMC + +Helpers +------- + +.. autosummary:: + :toctree: generated/ + + discrepancy + geometric_discrepancy + update_discrepancy + scale + + +Introduction to Quasi-Monte Carlo +================================= + +Quasi-Monte Carlo (QMC) methods [1]_, [2]_, [3]_ provide an +:math:`n \times d` array of numbers in :math:`[0,1]`. They can be used in +place of :math:`n` points from the :math:`U[0,1]^{d}` distribution. Compared to +random points, QMC points are designed to have fewer gaps and clumps. This is +quantified by discrepancy measures [4]_. From the Koksma-Hlawka +inequality [5]_ we know that low discrepancy reduces a bound on +integration error. Averaging a function :math:`f` over :math:`n` QMC points +can achieve an integration error close to :math:`O(n^{-1})` for well +behaved functions [2]_. + +Most QMC constructions are designed for special values of :math:`n` +such as powers of 2 or large primes. Changing the sample +size by even one can degrade their performance, even their +rate of convergence [6]_. For instance :math:`n=100` points may give less +accuracy than :math:`n=64` if the method was designed for :math:`n=2^m`. + +Some QMC constructions are extensible in :math:`n`: we can find +another special sample size :math:`n' > n` and often an infinite +sequence of increasing special sample sizes. Some QMC +constructions are extensible in :math:`d`: we can increase the dimension, +possibly to some upper bound, and typically without requiring +special values of :math:`d`. Some QMC methods are extensible in +both :math:`n` and :math:`d`. + +QMC points are deterministic. That makes it hard to estimate the accuracy of +integrals estimated by averages over QMC points. Randomized QMC (RQMC) [7]_ +points are constructed so that each point is individually :math:`U[0,1]^{d}` +while collectively the :math:`n` points retain their low discrepancy. +One can make :math:`R` independent replications of RQMC points to +see how stable a computation is. From :math:`R` independent values, +a t-test (or bootstrap t-test [8]_) then gives approximate confidence +intervals on the mean value. Some RQMC methods produce a +root mean squared error that is actually :math:`o(1/n)` and smaller than +the rate seen in unrandomized QMC. An intuitive explanation is +that the error is a sum of many small ones and random errors +cancel in a way that deterministic ones do not. RQMC also +has advantages on integrands that are singular or, for other +reasons, fail to be Riemann integrable. + +(R)QMC cannot beat Bahkvalov's curse of dimension (see [9]_). For +any random or deterministic method, there are worst case functions +that will give it poor performance in high dimensions. A worst +case function for QMC might be 0 at all n points but very +large elsewhere. Worst case analyses get very pessimistic +in high dimensions. (R)QMC can bring a great improvement over +MC when the functions on which it is used are not worst case. +For instance (R)QMC can be especially effective on integrands +that are well approximated by sums of functions of +some small number of their input variables at a time [10]_, [11]_. +That property is often a surprising finding about those functions. + +Also, to see an improvement over IID MC, (R)QMC requires a bit of smoothness of +the integrand, roughly the mixed first order derivative in each direction, +:math:`\partial^d f/\partial x_1 \cdots \partial x_d`, must be integral. +For instance, a function that is 1 inside the hypersphere and 0 outside of it +has infinite variation in the sense of Hardy and Krause for any dimension +:math:`d = 2`. + +Scrambled nets are a kind of RQMC that have some valuable robustness +properties [12]_. If the integrand is square integrable, they give variance +:math:`var_{SNET} = o(1/n)`. There is a finite upper bound on +:math:`var_{SNET} / var_{MC}` that holds simultaneously for every square +integrable integrand. Scrambled nets satisfy a strong law of large numbers +for :math:`f` in :math:`L^p` when :math:`p>1`. In some +special cases there is a central limit theorem [13]_. For smooth enough +integrands they can achieve RMSE nearly :math:`O(n^{-3})`. See [12]_ +for references about these properties. + +The main kinds of QMC methods are lattice rules [14]_ and digital +nets and sequences [2]_, [15]_. The theories meet up in polynomial +lattice rules [16]_ which can produce digital nets. Lattice rules +require some form of search for good constructions. For digital +nets there are widely used default constructions. + +The most widely used QMC methods are Sobol' sequences [17]_. +These are digital nets. They are extensible in both :math:`n` and :math:`d`. +They can be scrambled. The special sample sizes are powers +of 2. Another popular method are Halton sequences [18]_. +The constructions resemble those of digital nets. The earlier +dimensions have much better equidistribution properties than +later ones. There are essentially no special sample sizes. +They are not thought to be as accurate as Sobol' sequences. +They can be scrambled. The nets of Faure [19]_ are also widely +used. All dimensions are equally good, but the special sample +sizes grow rapidly with dimension :math:`d`. They can be scrambled. +The nets of Niederreiter and Xing [20]_ have the best asymptotic +properties but have not shown good empirical performance [21]_. + +Higher order digital nets are formed by a digit interleaving process +in the digits of the constructed points. They can achieve higher +levels of asymptotic accuracy given higher smoothness conditions on :math:`f` +and they can be scrambled [22]_. There is little or no empirical work +showing the improved rate to be attained. + +Using QMC is like using the entire period of a small random +number generator. The constructions are similar and so +therefore are the computational costs [23]_. + +(R)QMC is sometimes improved by passing the points through +a baker's transformation (tent function) prior to using them. +That function has the form :math:`1-2|x-1/2|`. As :math:`x` goes from 0 to +1, this function goes from 0 to 1 and then back. It is very +useful to produce a periodic function for lattice rules [14]_, +and sometimes it improves the convergence rate [24]_. + +It is not straightforward to apply QMC methods to Markov +chain Monte Carlo (MCMC). We can think of MCMC as using +:math:`n=1` point in :math:`[0,1]^{d}` for very large :math:`d`, with +ergodic results corresponding to :math:`d \to \infty`. One proposal is +in [25]_ and under strong conditions an improved rate of convergence +has been shown [26]_. + +Returning to Sobol' points: there are many versions depending +on what are called direction numbers. Those are the result of +searches and are tabulated. A very widely used set of direction +numbers come from [27]_. It is extensible in dimension up to +:math:`d=21201`. + +References +---------- +.. [1] Owen, Art B. "Monte Carlo Book: the Quasi-Monte Carlo parts." 2019. +.. [2] Niederreiter, Harald. "Random number generation and quasi-Monte Carlo + methods." Society for Industrial and Applied Mathematics, 1992. +.. [3] Dick, Josef, Frances Y. Kuo, and Ian H. Sloan. "High-dimensional + integration: the quasi-Monte Carlo way." Acta Numerica no. 22: 133, 2013. +.. [4] Aho, A. V., C. Aistleitner, T. Anderson, K. Appel, V. Arnol'd, N. + Aronszajn, D. Asotsky et al. "W. Chen et al.(eds.), "A Panorama of + Discrepancy Theory", Sringer International Publishing, + Switzerland: 679, 2014. +.. [5] Hickernell, Fred J. "Koksma-Hlawka Inequality." Wiley StatsRef: + Statistics Reference Online, 2014. +.. [6] Owen, Art B. "On dropping the first Sobol' point." :arxiv:`2008.08051`, + 2020. +.. [7] L'Ecuyer, Pierre, and Christiane Lemieux. "Recent advances in randomized + quasi-Monte Carlo methods." In Modeling uncertainty, pp. 419-474. Springer, + New York, NY, 2002. +.. [8] DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence + intervals." Statistical science: 189-212, 1996. +.. [9] Dimov, Ivan T. "Monte Carlo methods for applied scientists." World + Scientific, 2008. +.. [10] Caflisch, Russel E., William J. Morokoff, and Art B. Owen. "Valuation + of mortgage backed securities using Brownian bridges to reduce effective + dimension." Journal of Computational Finance: no. 1 27-46, 1997. +.. [11] Sloan, Ian H., and Henryk Wozniakowski. "When are quasi-Monte Carlo + algorithms efficient for high dimensional integrals?." Journal of Complexity + 14, no. 1 (1998): 1-33. +.. [12] Owen, Art B., and Daniel Rudolf, "A strong law of large numbers for + scrambled net integration." SIAM Review, to appear. +.. [13] Loh, Wei-Liem. "On the asymptotic distribution of scrambled net + quadrature." The Annals of Statistics 31, no. 4: 1282-1324, 2003. +.. [14] Sloan, Ian H. and S. Joe. "Lattice methods for multiple integration." + Oxford University Press, 1994. +.. [15] Dick, Josef, and Friedrich Pillichshammer. "Digital nets and sequences: + discrepancy theory and quasi-Monte Carlo integration." Cambridge University + Press, 2010. +.. [16] Dick, Josef, F. Kuo, Friedrich Pillichshammer, and I. Sloan. + "Construction algorithms for polynomial lattice rules for multivariate + integration." Mathematics of computation 74, no. 252: 1895-1921, 2005. +.. [17] Sobol', Il'ya Meerovich. "On the distribution of points in a cube and + the approximate evaluation of integrals." Zhurnal Vychislitel'noi Matematiki + i Matematicheskoi Fiziki 7, no. 4: 784-802, 1967. +.. [18] Halton, John H. "On the efficiency of certain quasi-random sequences of + points in evaluating multi-dimensional integrals." Numerische Mathematik 2, + no. 1: 84-90, 1960. +.. [19] Faure, Henri. "Discrepance de suites associees a un systeme de + numeration (en dimension s)." Acta arithmetica 41, no. 4: 337-351, 1982. +.. [20] Niederreiter, Harold, and Chaoping Xing. "Low-discrepancy sequences and + global function fields with many rational places." Finite Fields and their + applications 2, no. 3: 241-273, 1996. +.. [21] Hong, Hee Sun, and Fred J. Hickernell. "Algorithm 823: Implementing + scrambled digital sequences." ACM Transactions on Mathematical Software + (TOMS) 29, no. 2: 95-109, 2003. +.. [22] Dick, Josef. "Higher order scrambled digital nets achieve the optimal + rate of the root mean square error for smooth integrands." The Annals of + Statistics 39, no. 3: 1372-1398, 2011. +.. [23] Niederreiter, Harald. "Multidimensional numerical integration using + pseudorandom numbers." In Stochastic Programming 84 Part I, pp. 17-38. + Springer, Berlin, Heidelberg, 1986. +.. [24] Hickernell, Fred J. "Obtaining O (N-2+e) Convergence for Lattice + Quadrature Rules." In Monte Carlo and Quasi-Monte Carlo Methods 2000, + pp. 274-289. Springer, Berlin, Heidelberg, 2002. +.. [25] Owen, Art B., and Seth D. Tribble. "A quasi-Monte Carlo Metropolis + algorithm." Proceedings of the National Academy of Sciences 102, + no. 25: 8844-8849, 2005. +.. [26] Chen, Su. "Consistency and convergence rate of Markov chain quasi Monte + Carlo with examples." PhD diss., Stanford University, 2011. +.. [27] Joe, Stephen, and Frances Y. Kuo. "Constructing Sobol sequences with + better two-dimensional projections." SIAM Journal on Scientific Computing + 30, no. 5: 2635-2654, 2008. + +""" +from ._qmc import * # noqa: F403 +from ._qmc import __all__ # noqa: F401 diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/sampling.py b/parrot/lib/python3.10/site-packages/scipy/stats/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..12174d9dfb3cb93fa33811ed4b5d233817512e36 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/sampling.py @@ -0,0 +1,73 @@ +""" +====================================================== +Random Number Generators (:mod:`scipy.stats.sampling`) +====================================================== + +.. currentmodule:: scipy.stats.sampling + +This module contains a collection of random number generators to sample +from univariate continuous and discrete distributions. It uses the +implementation of a C library called "UNU.RAN". The only exception is +RatioUniforms, which is a pure Python implementation of the +Ratio-of-Uniforms method. + +Generators Wrapped +================== + +For continuous distributions +---------------------------- + +.. autosummary:: + :toctree: generated/ + + NumericalInverseHermite + NumericalInversePolynomial + TransformedDensityRejection + SimpleRatioUniforms + RatioUniforms + +For discrete distributions +-------------------------- + +.. autosummary:: + :toctree: generated/ + + DiscreteAliasUrn + DiscreteGuideTable + +Warnings / Errors used in :mod:`scipy.stats.sampling` +----------------------------------------------------- + +.. autosummary:: + :toctree: generated/ + + UNURANError + + +Generators for pre-defined distributions +======================================== + +To easily apply the above methods for some of the continuous distributions +in :mod:`scipy.stats`, the following functionality can be used: + +.. autosummary:: + :toctree: generated/ + + FastGeneratorInversion + +""" +from ._sampling import FastGeneratorInversion, RatioUniforms # noqa: F401 +from ._unuran.unuran_wrapper import ( # noqa: F401 + TransformedDensityRejection, + DiscreteAliasUrn, + DiscreteGuideTable, + NumericalInversePolynomial, + NumericalInverseHermite, + SimpleRatioUniforms, + UNURANError +) + +__all__ = ["NumericalInverseHermite", "NumericalInversePolynomial", + "TransformedDensityRejection", "SimpleRatioUniforms", + "RatioUniforms", "DiscreteAliasUrn", "DiscreteGuideTable", + "UNURANError", "FastGeneratorInversion"] diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/stats.py b/parrot/lib/python3.10/site-packages/scipy/stats/stats.py new file mode 100644 index 0000000000000000000000000000000000000000..d5d278e209ce8d487235ee281620af8520d76d87 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/stats.py @@ -0,0 +1,41 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'find_repeats', 'gmean', 'hmean', 'pmean', 'mode', 'tmean', 'tvar', + 'tmin', 'tmax', 'tstd', 'tsem', 'moment', + 'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest', + 'normaltest', 'jarque_bera', + 'scoreatpercentile', 'percentileofscore', + 'cumfreq', 'relfreq', 'obrientransform', + 'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd', + 'median_abs_deviation', + 'sigmaclip', 'trimboth', 'trim1', 'trim_mean', + 'f_oneway', + 'pearsonr', 'fisher_exact', + 'spearmanr', 'pointbiserialr', + 'kendalltau', 'weightedtau', 'multiscale_graphcorr', + 'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp', + 'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', + 'kstest', 'ks_1samp', 'ks_2samp', + 'chisquare', 'power_divergence', + 'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare', + 'rankdata', + 'combine_pvalues', 'wasserstein_distance', 'energy_distance', + 'brunnermunzel', 'alexandergovern', 'distributions', + 'mstats_basic', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="stats", + private_modules=["_stats_py", "_mgc"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/tests/test_resampling.py b/parrot/lib/python3.10/site-packages/scipy/stats/tests/test_resampling.py new file mode 100644 index 0000000000000000000000000000000000000000..8972d626d71627e52fb90e7753bdc4c35a3ebc69 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/tests/test_resampling.py @@ -0,0 +1,2025 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose, assert_equal, suppress_warnings + +from scipy.conftest import array_api_compatible +from scipy._lib._util import rng_integers +from scipy._lib._array_api import (is_numpy, xp_assert_close, + xp_assert_equal, array_namespace) +from scipy import stats, special +from scipy.optimize import root + +from scipy.stats import bootstrap, monte_carlo_test, permutation_test, power +import scipy.stats._resampling as _resampling + + +def test_bootstrap_iv(): + + message = "`data` must be a sequence of samples." + with pytest.raises(ValueError, match=message): + bootstrap(1, np.mean) + + message = "`data` must contain at least one sample." + with pytest.raises(ValueError, match=message): + bootstrap(tuple(), np.mean) + + message = "each sample in `data` must contain two or more observations..." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3], [1]), np.mean) + + message = ("When `paired is True`, all samples must have the same length ") + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True) + + message = "`vectorized` must be `True`, `False`, or `None`." + with pytest.raises(ValueError, match=message): + bootstrap(1, np.mean, vectorized='ekki') + + message = "`axis` must be an integer." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, axis=1.5) + + message = "could not convert string to float" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, confidence_level='ni') + + message = "`n_resamples` must be a non-negative integer." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000) + + message = "`n_resamples` must be a non-negative integer." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, batch=-1000) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, batch=1000.5) + + message = "`method` must be in" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, method='ekki') + + message = "`bootstrap_result` must have attribute `bootstrap_distribution'" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, bootstrap_result=10) + + message = "Either `bootstrap_result.bootstrap_distribution.size`" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, n_resamples=0) + + message = "'herring' cannot be used to seed a" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, random_state='herring') + + +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +@pytest.mark.parametrize("axis", [0, 1, 2]) +def test_bootstrap_batch(method, axis): + # for one-sample statistics, batch size shouldn't affect the result + np.random.seed(0) + + x = np.random.rand(10, 11, 12) + res1 = bootstrap((x,), np.mean, batch=None, method=method, + random_state=0, axis=axis, n_resamples=100) + res2 = bootstrap((x,), np.mean, batch=10, method=method, + random_state=0, axis=axis, n_resamples=100) + + assert_equal(res2.confidence_interval.low, res1.confidence_interval.low) + assert_equal(res2.confidence_interval.high, res1.confidence_interval.high) + assert_equal(res2.standard_error, res1.standard_error) + + +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +def test_bootstrap_paired(method): + # test that `paired` works as expected + np.random.seed(0) + n = 100 + x = np.random.rand(n) + y = np.random.rand(n) + + def my_statistic(x, y, axis=-1): + return ((x-y)**2).mean(axis=axis) + + def my_paired_statistic(i, axis=-1): + a = x[i] + b = y[i] + res = my_statistic(a, b) + return res + + i = np.arange(len(x)) + + res1 = bootstrap((i,), my_paired_statistic, random_state=0) + res2 = bootstrap((x, y), my_statistic, paired=True, random_state=0) + + assert_allclose(res1.confidence_interval, res2.confidence_interval) + assert_allclose(res1.standard_error, res2.standard_error) + + +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +@pytest.mark.parametrize("axis", [0, 1, 2]) +@pytest.mark.parametrize("paired", [True, False]) +def test_bootstrap_vectorized(method, axis, paired): + # test that paired is vectorized as expected: when samples are tiled, + # CI and standard_error of each axis-slice is the same as those of the + # original 1d sample + + np.random.seed(0) + + def my_statistic(x, y, z, axis=-1): + return x.mean(axis=axis) + y.mean(axis=axis) + z.mean(axis=axis) + + shape = 10, 11, 12 + n_samples = shape[axis] + + x = np.random.rand(n_samples) + y = np.random.rand(n_samples) + z = np.random.rand(n_samples) + res1 = bootstrap((x, y, z), my_statistic, paired=paired, method=method, + random_state=0, axis=0, n_resamples=100) + assert (res1.bootstrap_distribution.shape + == res1.standard_error.shape + (100,)) + + reshape = [1, 1, 1] + reshape[axis] = n_samples + x = np.broadcast_to(x.reshape(reshape), shape) + y = np.broadcast_to(y.reshape(reshape), shape) + z = np.broadcast_to(z.reshape(reshape), shape) + res2 = bootstrap((x, y, z), my_statistic, paired=paired, method=method, + random_state=0, axis=axis, n_resamples=100) + + assert_allclose(res2.confidence_interval.low, + res1.confidence_interval.low) + assert_allclose(res2.confidence_interval.high, + res1.confidence_interval.high) + assert_allclose(res2.standard_error, res1.standard_error) + + result_shape = list(shape) + result_shape.pop(axis) + + assert_equal(res2.confidence_interval.low.shape, result_shape) + assert_equal(res2.confidence_interval.high.shape, result_shape) + assert_equal(res2.standard_error.shape, result_shape) + + +@pytest.mark.slow +@pytest.mark.xfail_on_32bit("MemoryError with BCa observed in CI") +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +def test_bootstrap_against_theory(method): + # based on https://www.statology.org/confidence-intervals-python/ + rng = np.random.default_rng(2442101192988600726) + data = stats.norm.rvs(loc=5, scale=2, size=5000, random_state=rng) + alpha = 0.95 + dist = stats.t(df=len(data)-1, loc=np.mean(data), scale=stats.sem(data)) + expected_interval = dist.interval(confidence=alpha) + expected_se = dist.std() + + config = dict(data=(data,), statistic=np.mean, n_resamples=5000, + method=method, random_state=rng) + res = bootstrap(**config, confidence_level=alpha) + assert_allclose(res.confidence_interval, expected_interval, rtol=5e-4) + assert_allclose(res.standard_error, expected_se, atol=3e-4) + + config.update(dict(n_resamples=0, bootstrap_result=res)) + res = bootstrap(**config, confidence_level=alpha, alternative='less') + assert_allclose(res.confidence_interval.high, dist.ppf(alpha), rtol=5e-4) + + config.update(dict(n_resamples=0, bootstrap_result=res)) + res = bootstrap(**config, confidence_level=alpha, alternative='greater') + assert_allclose(res.confidence_interval.low, dist.ppf(1-alpha), rtol=5e-4) + + +tests_R = {"basic": (23.77, 79.12), + "percentile": (28.86, 84.21), + "BCa": (32.31, 91.43)} + + +@pytest.mark.parametrize("method, expected", tests_R.items()) +def test_bootstrap_against_R(method, expected): + # Compare against R's "boot" library + # library(boot) + + # stat <- function (x, a) { + # mean(x[a]) + # } + + # x <- c(10, 12, 12.5, 12.5, 13.9, 15, 21, 22, + # 23, 34, 50, 81, 89, 121, 134, 213) + + # # Use a large value so we get a few significant digits for the CI. + # n = 1000000 + # bootresult = boot(x, stat, n) + # result <- boot.ci(bootresult) + # print(result) + x = np.array([10, 12, 12.5, 12.5, 13.9, 15, 21, 22, + 23, 34, 50, 81, 89, 121, 134, 213]) + res = bootstrap((x,), np.mean, n_resamples=1000000, method=method, + random_state=0) + assert_allclose(res.confidence_interval, expected, rtol=0.005) + + +tests_against_itself_1samp = {"basic": 1780, + "percentile": 1784, + "BCa": 1784} + + +def test_multisample_BCa_against_R(): + # Because bootstrap is stochastic, it's tricky to test against reference + # behavior. Here, we show that SciPy's BCa CI matches R wboot's BCa CI + # much more closely than the other SciPy CIs do. + + # arbitrary skewed data + x = [0.75859206, 0.5910282, -0.4419409, -0.36654601, + 0.34955357, -1.38835871, 0.76735821] + y = [1.41186073, 0.49775975, 0.08275588, 0.24086388, + 0.03567057, 0.52024419, 0.31966611, 1.32067634] + + # a multi-sample statistic for which the BCa CI tends to be different + # from the other CIs + def statistic(x, y, axis): + s1 = stats.skew(x, axis=axis) + s2 = stats.skew(y, axis=axis) + return s1 - s2 + + # compute confidence intervals using each method + rng = np.random.default_rng(468865032284792692) + + res_basic = stats.bootstrap((x, y), statistic, method='basic', + batch=100, random_state=rng) + res_percent = stats.bootstrap((x, y), statistic, method='percentile', + batch=100, random_state=rng) + res_bca = stats.bootstrap((x, y), statistic, method='bca', + batch=100, random_state=rng) + + # compute midpoints so we can compare just one number for each + mid_basic = np.mean(res_basic.confidence_interval) + mid_percent = np.mean(res_percent.confidence_interval) + mid_bca = np.mean(res_bca.confidence_interval) + + # reference for BCA CI computed using R wboot package: + # library(wBoot) + # library(moments) + + # x = c(0.75859206, 0.5910282, -0.4419409, -0.36654601, + # 0.34955357, -1.38835871, 0.76735821) + # y = c(1.41186073, 0.49775975, 0.08275588, 0.24086388, + # 0.03567057, 0.52024419, 0.31966611, 1.32067634) + + # twoskew <- function(x1, y1) {skewness(x1) - skewness(y1)} + # boot.two.bca(x, y, skewness, conf.level = 0.95, + # R = 9999, stacked = FALSE) + mid_wboot = -1.5519 + + # compute percent difference relative to wboot BCA method + diff_basic = (mid_basic - mid_wboot)/abs(mid_wboot) + diff_percent = (mid_percent - mid_wboot)/abs(mid_wboot) + diff_bca = (mid_bca - mid_wboot)/abs(mid_wboot) + + # SciPy's BCa CI midpoint is much closer than that of the other methods + assert diff_basic < -0.15 + assert diff_percent > 0.15 + assert abs(diff_bca) < 0.03 + + +def test_BCa_acceleration_against_reference(): + # Compare the (deterministic) acceleration parameter for a multi-sample + # problem against a reference value. The example is from [1], but Efron's + # value seems inaccurate. Straightorward code for computing the + # reference acceleration (0.011008228344026734) is available at: + # https://github.com/scipy/scipy/pull/16455#issuecomment-1193400981 + + y = np.array([10, 27, 31, 40, 46, 50, 52, 104, 146]) + z = np.array([16, 23, 38, 94, 99, 141, 197]) + + def statistic(z, y, axis=0): + return np.mean(z, axis=axis) - np.mean(y, axis=axis) + + data = [z, y] + res = stats.bootstrap(data, statistic) + + axis = -1 + alpha = 0.95 + theta_hat_b = res.bootstrap_distribution + batch = 100 + _, _, a_hat = _resampling._bca_interval(data, statistic, axis, alpha, + theta_hat_b, batch) + assert_allclose(a_hat, 0.011008228344026734) + + +@pytest.mark.slow +@pytest.mark.parametrize("method, expected", + tests_against_itself_1samp.items()) +def test_bootstrap_against_itself_1samp(method, expected): + # The expected values in this test were generated using bootstrap + # to check for unintended changes in behavior. The test also makes sure + # that bootstrap works with multi-sample statistics and that the + # `axis` argument works as expected / function is vectorized. + np.random.seed(0) + + n = 100 # size of sample + n_resamples = 999 # number of bootstrap resamples used to form each CI + confidence_level = 0.9 + + # The true mean is 5 + dist = stats.norm(loc=5, scale=1) + stat_true = dist.mean() + + # Do the same thing 2000 times. (The code is fully vectorized.) + n_replications = 2000 + data = dist.rvs(size=(n_replications, n)) + res = bootstrap((data,), + statistic=np.mean, + confidence_level=confidence_level, + n_resamples=n_resamples, + batch=50, + method=method, + axis=-1) + ci = res.confidence_interval + + # ci contains vectors of lower and upper confidence interval bounds + ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1])) + assert ci_contains_true == expected + + # ci_contains_true is not inconsistent with confidence_level + pvalue = stats.binomtest(ci_contains_true, n_replications, + confidence_level).pvalue + assert pvalue > 0.1 + + +tests_against_itself_2samp = {"basic": 892, + "percentile": 890} + + +@pytest.mark.slow +@pytest.mark.parametrize("method, expected", + tests_against_itself_2samp.items()) +def test_bootstrap_against_itself_2samp(method, expected): + # The expected values in this test were generated using bootstrap + # to check for unintended changes in behavior. The test also makes sure + # that bootstrap works with multi-sample statistics and that the + # `axis` argument works as expected / function is vectorized. + np.random.seed(0) + + n1 = 100 # size of sample 1 + n2 = 120 # size of sample 2 + n_resamples = 999 # number of bootstrap resamples used to form each CI + confidence_level = 0.9 + + # The statistic we're interested in is the difference in means + def my_stat(data1, data2, axis=-1): + mean1 = np.mean(data1, axis=axis) + mean2 = np.mean(data2, axis=axis) + return mean1 - mean2 + + # The true difference in the means is -0.1 + dist1 = stats.norm(loc=0, scale=1) + dist2 = stats.norm(loc=0.1, scale=1) + stat_true = dist1.mean() - dist2.mean() + + # Do the same thing 1000 times. (The code is fully vectorized.) + n_replications = 1000 + data1 = dist1.rvs(size=(n_replications, n1)) + data2 = dist2.rvs(size=(n_replications, n2)) + res = bootstrap((data1, data2), + statistic=my_stat, + confidence_level=confidence_level, + n_resamples=n_resamples, + batch=50, + method=method, + axis=-1) + ci = res.confidence_interval + + # ci contains vectors of lower and upper confidence interval bounds + ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1])) + assert ci_contains_true == expected + + # ci_contains_true is not inconsistent with confidence_level + pvalue = stats.binomtest(ci_contains_true, n_replications, + confidence_level).pvalue + assert pvalue > 0.1 + + +@pytest.mark.parametrize("method", ["basic", "percentile"]) +@pytest.mark.parametrize("axis", [0, 1]) +def test_bootstrap_vectorized_3samp(method, axis): + def statistic(*data, axis=0): + # an arbitrary, vectorized statistic + return sum(sample.mean(axis) for sample in data) + + def statistic_1d(*data): + # the same statistic, not vectorized + for sample in data: + assert sample.ndim == 1 + return statistic(*data, axis=0) + + np.random.seed(0) + x = np.random.rand(4, 5) + y = np.random.rand(4, 5) + z = np.random.rand(4, 5) + res1 = bootstrap((x, y, z), statistic, vectorized=True, + axis=axis, n_resamples=100, method=method, random_state=0) + res2 = bootstrap((x, y, z), statistic_1d, vectorized=False, + axis=axis, n_resamples=100, method=method, random_state=0) + assert_allclose(res1.confidence_interval, res2.confidence_interval) + assert_allclose(res1.standard_error, res2.standard_error) + + +@pytest.mark.xfail_on_32bit("Failure is not concerning; see gh-14107") +@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"]) +@pytest.mark.parametrize("axis", [0, 1]) +def test_bootstrap_vectorized_1samp(method, axis): + def statistic(x, axis=0): + # an arbitrary, vectorized statistic + return x.mean(axis=axis) + + def statistic_1d(x): + # the same statistic, not vectorized + assert x.ndim == 1 + return statistic(x, axis=0) + + np.random.seed(0) + x = np.random.rand(4, 5) + res1 = bootstrap((x,), statistic, vectorized=True, axis=axis, + n_resamples=100, batch=None, method=method, + random_state=0) + res2 = bootstrap((x,), statistic_1d, vectorized=False, axis=axis, + n_resamples=100, batch=10, method=method, + random_state=0) + assert_allclose(res1.confidence_interval, res2.confidence_interval) + assert_allclose(res1.standard_error, res2.standard_error) + + +@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"]) +def test_bootstrap_degenerate(method): + data = 35 * [10000.] + if method == "BCa": + with np.errstate(invalid='ignore'): + msg = "The BCa confidence interval cannot be calculated" + with pytest.warns(stats.DegenerateDataWarning, match=msg): + res = bootstrap([data, ], np.mean, method=method) + assert_equal(res.confidence_interval, (np.nan, np.nan)) + else: + res = bootstrap([data, ], np.mean, method=method) + assert_equal(res.confidence_interval, (10000., 10000.)) + assert_equal(res.standard_error, 0) + + +@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"]) +def test_bootstrap_gh15678(method): + # Check that gh-15678 is fixed: when statistic function returned a Python + # float, method="BCa" failed when trying to add a dimension to the float + rng = np.random.default_rng(354645618886684) + dist = stats.norm(loc=2, scale=4) + data = dist.rvs(size=100, random_state=rng) + data = (data,) + res = bootstrap(data, stats.skew, method=method, n_resamples=100, + random_state=np.random.default_rng(9563)) + # this always worked because np.apply_along_axis returns NumPy data type + ref = bootstrap(data, stats.skew, method=method, n_resamples=100, + random_state=np.random.default_rng(9563), vectorized=False) + assert_allclose(res.confidence_interval, ref.confidence_interval) + assert_allclose(res.standard_error, ref.standard_error) + assert isinstance(res.standard_error, np.float64) + + +def test_bootstrap_min(): + # Check that gh-15883 is fixed: percentileofscore should + # behave according to the 'mean' behavior and not trigger nan for BCa + rng = np.random.default_rng(1891289180021102) + dist = stats.norm(loc=2, scale=4) + data = dist.rvs(size=100, random_state=rng) + true_min = np.min(data) + data = (data,) + res = bootstrap(data, np.min, method="BCa", n_resamples=100, + random_state=np.random.default_rng(3942)) + assert true_min == res.confidence_interval.low + res2 = bootstrap(-np.array(data), np.max, method="BCa", n_resamples=100, + random_state=np.random.default_rng(3942)) + assert_allclose(-res.confidence_interval.low, + res2.confidence_interval.high) + assert_allclose(-res.confidence_interval.high, + res2.confidence_interval.low) + + +@pytest.mark.parametrize("additional_resamples", [0, 1000]) +def test_re_bootstrap(additional_resamples): + # Test behavior of parameter `bootstrap_result` + rng = np.random.default_rng(8958153316228384) + x = rng.random(size=100) + + n1 = 1000 + n2 = additional_resamples + n3 = n1 + additional_resamples + + rng = np.random.default_rng(296689032789913033) + res = stats.bootstrap((x,), np.mean, n_resamples=n1, random_state=rng, + confidence_level=0.95, method='percentile') + res = stats.bootstrap((x,), np.mean, n_resamples=n2, random_state=rng, + confidence_level=0.90, method='BCa', + bootstrap_result=res) + + rng = np.random.default_rng(296689032789913033) + ref = stats.bootstrap((x,), np.mean, n_resamples=n3, random_state=rng, + confidence_level=0.90, method='BCa') + + assert_allclose(res.standard_error, ref.standard_error, rtol=1e-14) + assert_allclose(res.confidence_interval, ref.confidence_interval, + rtol=1e-14) + + +@pytest.mark.xfail_on_32bit("Sensible to machine precision") +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +def test_bootstrap_alternative(method): + rng = np.random.default_rng(5894822712842015040) + dist = stats.norm(loc=2, scale=4) + data = (dist.rvs(size=(100), random_state=rng),) + + config = dict(data=data, statistic=np.std, random_state=rng, axis=-1) + t = stats.bootstrap(**config, confidence_level=0.9) + + config.update(dict(n_resamples=0, bootstrap_result=t)) + l = stats.bootstrap(**config, confidence_level=0.95, alternative='less') + g = stats.bootstrap(**config, confidence_level=0.95, alternative='greater') + + assert_allclose(l.confidence_interval.high, t.confidence_interval.high, + rtol=1e-14) + assert_allclose(g.confidence_interval.low, t.confidence_interval.low, + rtol=1e-14) + assert np.isneginf(l.confidence_interval.low) + assert np.isposinf(g.confidence_interval.high) + + with pytest.raises(ValueError, match='`alternative` must be one of'): + stats.bootstrap(**config, alternative='ekki-ekki') + + +def test_jackknife_resample(): + shape = 3, 4, 5, 6 + np.random.seed(0) + x = np.random.rand(*shape) + y = next(_resampling._jackknife_resample(x)) + + for i in range(shape[-1]): + # each resample is indexed along second to last axis + # (last axis is the one the statistic will be taken over / consumed) + slc = y[..., i, :] + expected = np.delete(x, i, axis=-1) + + assert np.array_equal(slc, expected) + + y2 = np.concatenate(list(_resampling._jackknife_resample(x, batch=2)), + axis=-2) + assert np.array_equal(y2, y) + + +@pytest.mark.parametrize("rng_name", ["RandomState", "default_rng"]) +def test_bootstrap_resample(rng_name): + rng = getattr(np.random, rng_name, None) + if rng is None: + pytest.skip(f"{rng_name} not available.") + rng1 = rng(0) + rng2 = rng(0) + + n_resamples = 10 + shape = 3, 4, 5, 6 + + np.random.seed(0) + x = np.random.rand(*shape) + y = _resampling._bootstrap_resample(x, n_resamples, random_state=rng1) + + for i in range(n_resamples): + # each resample is indexed along second to last axis + # (last axis is the one the statistic will be taken over / consumed) + slc = y[..., i, :] + + js = rng_integers(rng2, 0, shape[-1], shape[-1]) + expected = x[..., js] + + assert np.array_equal(slc, expected) + + +@pytest.mark.parametrize("score", [0, 0.5, 1]) +@pytest.mark.parametrize("axis", [0, 1, 2]) +def test_percentile_of_score(score, axis): + shape = 10, 20, 30 + np.random.seed(0) + x = np.random.rand(*shape) + p = _resampling._percentile_of_score(x, score, axis=-1) + + def vectorized_pos(a, score, axis): + return np.apply_along_axis(stats.percentileofscore, axis, a, score) + + p2 = vectorized_pos(x, score, axis=-1)/100 + + assert_allclose(p, p2, 1e-15) + + +def test_percentile_along_axis(): + # the difference between _percentile_along_axis and np.percentile is that + # np.percentile gets _all_ the qs for each axis slice, whereas + # _percentile_along_axis gets the q corresponding with each axis slice + + shape = 10, 20 + np.random.seed(0) + x = np.random.rand(*shape) + q = np.random.rand(*shape[:-1]) * 100 + y = _resampling._percentile_along_axis(x, q) + + for i in range(shape[0]): + res = y[i] + expected = np.percentile(x[i], q[i], axis=-1) + assert_allclose(res, expected, 1e-15) + + +@pytest.mark.parametrize("axis", [0, 1, 2]) +def test_vectorize_statistic(axis): + # test that _vectorize_statistic vectorizes a statistic along `axis` + + def statistic(*data, axis): + # an arbitrary, vectorized statistic + return sum(sample.mean(axis) for sample in data) + + def statistic_1d(*data): + # the same statistic, not vectorized + for sample in data: + assert sample.ndim == 1 + return statistic(*data, axis=0) + + # vectorize the non-vectorized statistic + statistic2 = _resampling._vectorize_statistic(statistic_1d) + + np.random.seed(0) + x = np.random.rand(4, 5, 6) + y = np.random.rand(4, 1, 6) + z = np.random.rand(1, 5, 6) + + res1 = statistic(x, y, z, axis=axis) + res2 = statistic2(x, y, z, axis=axis) + assert_allclose(res1, res2) + + +@pytest.mark.slow +@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"]) +def test_vector_valued_statistic(method): + # Generate 95% confidence interval around MLE of normal distribution + # parameters. Repeat 100 times, each time on sample of size 100. + # Check that confidence interval contains true parameters ~95 times. + # Confidence intervals are estimated and stochastic; a test failure + # does not necessarily indicate that something is wrong. More important + # than values of `counts` below is that the shapes of the outputs are + # correct. + + rng = np.random.default_rng(2196847219) + params = 1, 0.5 + sample = stats.norm.rvs(*params, size=(100, 100), random_state=rng) + + def statistic(data, axis): + return np.asarray([np.mean(data, axis), + np.std(data, axis, ddof=1)]) + + res = bootstrap((sample,), statistic, method=method, axis=-1, + n_resamples=9999, batch=200) + + counts = np.sum((res.confidence_interval.low.T < params) + & (res.confidence_interval.high.T > params), + axis=0) + assert np.all(counts >= 90) + assert np.all(counts <= 100) + assert res.confidence_interval.low.shape == (2, 100) + assert res.confidence_interval.high.shape == (2, 100) + assert res.standard_error.shape == (2, 100) + assert res.bootstrap_distribution.shape == (2, 100, 9999) + + +@pytest.mark.slow +@pytest.mark.filterwarnings('ignore::RuntimeWarning') +def test_vector_valued_statistic_gh17715(): + # gh-17715 reported a mistake introduced in the extension of BCa to + # multi-sample statistics; a `len` should have been `.shape[-1]`. Check + # that this is resolved. + + rng = np.random.default_rng(141921000979291141) + + def concordance(x, y, axis): + xm = x.mean(axis) + ym = y.mean(axis) + cov = ((x - xm[..., None]) * (y - ym[..., None])).mean(axis) + return (2 * cov) / (x.var(axis) + y.var(axis) + (xm - ym) ** 2) + + def statistic(tp, tn, fp, fn, axis): + actual = tp + fp + expected = tp + fn + return np.nan_to_num(concordance(actual, expected, axis)) + + def statistic_extradim(*args, axis): + return statistic(*args, axis)[np.newaxis, ...] + + data = [[4, 0, 0, 2], # (tp, tn, fp, fn) + [2, 1, 2, 1], + [0, 6, 0, 0], + [0, 6, 3, 0], + [0, 8, 1, 0]] + data = np.array(data).T + + res = bootstrap(data, statistic_extradim, random_state=rng, paired=True) + ref = bootstrap(data, statistic, random_state=rng, paired=True) + assert_allclose(res.confidence_interval.low[0], + ref.confidence_interval.low, atol=1e-15) + assert_allclose(res.confidence_interval.high[0], + ref.confidence_interval.high, atol=1e-15) + + +def test_gh_20850(): + rng = np.random.default_rng(2085020850) + x = rng.random((10, 2)) + y = rng.random((11, 2)) + def statistic(x, y, axis): + return stats.ttest_ind(x, y, axis=axis).statistic + + # The shapes do *not* need to be the same along axis + stats.bootstrap((x, y), statistic) + stats.bootstrap((x.T, y.T), statistic, axis=1) + # But even when the shapes *are* the same along axis, the lengths + # along other dimensions have to be the same (or `bootstrap` warns). + message = "Ignoring the dimension specified by `axis`..." + with pytest.warns(FutureWarning, match=message): + stats.bootstrap((x, y[:10, 0]), statistic) # this won't work after 1.16 + with pytest.warns(FutureWarning, match=message): + stats.bootstrap((x, y[:10, 0:1]), statistic) # this will + with pytest.warns(FutureWarning, match=message): + stats.bootstrap((x.T, y.T[0:1, :10]), statistic, axis=1) # this will + + +# --- Test Monte Carlo Hypothesis Test --- # + +class TestMonteCarloHypothesisTest: + atol = 2.5e-2 # for comparing p-value + + def get_rvs(self, rvs_in, rs, dtype=None, xp=np): + return lambda *args, **kwds: xp.asarray(rvs_in(*args, random_state=rs, **kwds), + dtype=dtype) + + def get_statistic(self, xp): + def statistic(x, axis): + m = xp.mean(x, axis=axis) + v = xp.var(x, axis=axis, correction=1) + n = x.shape[axis] + return m / (v/n)**0.5 + # return stats.ttest_1samp(x, popmean=0., axis=axis).statistic) + return statistic + + @array_api_compatible + def test_input_validation(self, xp): + # test that the appropriate error messages are raised for invalid input + + data = xp.asarray([1., 2., 3.]) + def stat(x, axis=None): + return xp.mean(x, axis=axis) + + message = "Array shapes are incompatible for broadcasting." + temp = (xp.zeros((2, 5)), xp.zeros((3, 5))) + rvs = (stats.norm.rvs, stats.norm.rvs) + with pytest.raises(ValueError, match=message): + monte_carlo_test(temp, rvs, lambda x, y, axis: 1, axis=-1) + + message = "`axis` must be an integer." + with pytest.raises(ValueError, match=message): + monte_carlo_test(data, stats.norm.rvs, stat, axis=1.5) + + message = "`vectorized` must be `True`, `False`, or `None`." + with pytest.raises(ValueError, match=message): + monte_carlo_test(data, stats.norm.rvs, stat, vectorized=1.5) + + message = "`rvs` must be callable or sequence of callables." + with pytest.raises(TypeError, match=message): + monte_carlo_test(data, None, stat) + with pytest.raises(TypeError, match=message): + temp = xp.asarray([[1., 2.], [3., 4.]]) + monte_carlo_test(temp, [lambda x: x, None], stat) + + message = "If `rvs` is a sequence..." + with pytest.raises(ValueError, match=message): + temp = xp.asarray([[1., 2., 3.]]) + monte_carlo_test(temp, [lambda x: x, lambda x: x], stat) + + message = "`statistic` must be callable." + with pytest.raises(TypeError, match=message): + monte_carlo_test(data, stats.norm.rvs, None) + + message = "`n_resamples` must be a positive integer." + with pytest.raises(ValueError, match=message): + monte_carlo_test(data, stats.norm.rvs, stat, n_resamples=-1000) + + message = "`n_resamples` must be a positive integer." + with pytest.raises(ValueError, match=message): + monte_carlo_test(data, stats.norm.rvs, stat, n_resamples=1000.5) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + monte_carlo_test(data, stats.norm.rvs, stat, batch=-1000) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + monte_carlo_test(data, stats.norm.rvs, stat, batch=1000.5) + + message = "`alternative` must be in..." + with pytest.raises(ValueError, match=message): + monte_carlo_test(data, stats.norm.rvs, stat, alternative='ekki') + + # *If* this raises a value error, make sure it has the intended message + message = "Signature inspection of statistic" + def rvs(size): + return xp.asarray(stats.norm.rvs(size=size)) + try: + monte_carlo_test(data, rvs, xp.mean) + except ValueError as e: + assert str(e).startswith(message) + + @array_api_compatible + def test_input_validation_xp(self, xp): + def non_vectorized_statistic(x): + return xp.mean(x) + + message = "`statistic` must be vectorized..." + sample = xp.asarray([1., 2., 3.]) + if is_numpy(xp): + monte_carlo_test(sample, stats.norm.rvs, non_vectorized_statistic) + return + + with pytest.raises(ValueError, match=message): + monte_carlo_test(sample, stats.norm.rvs, non_vectorized_statistic) + with pytest.raises(ValueError, match=message): + monte_carlo_test(sample, stats.norm.rvs, xp.mean, vectorized=False) + + @pytest.mark.xslow + @array_api_compatible + def test_batch(self, xp): + # make sure that the `batch` parameter is respected by checking the + # maximum batch size provided in calls to `statistic` + rng = np.random.default_rng(23492340193) + x = xp.asarray(rng.standard_normal(size=10)) + + xp_test = array_namespace(x) # numpy.std doesn't have `correction` + def statistic(x, axis): + batch_size = 1 if x.ndim == 1 else x.shape[0] + statistic.batch_size = max(batch_size, statistic.batch_size) + statistic.counter += 1 + return self.get_statistic(xp_test)(x, axis=axis) + statistic.counter = 0 + statistic.batch_size = 0 + + kwds = {'sample': x, 'statistic': statistic, + 'n_resamples': 1000, 'vectorized': True} + + kwds['rvs'] = self.get_rvs(stats.norm.rvs, np.random.default_rng(328423), xp=xp) + res1 = monte_carlo_test(batch=1, **kwds) + assert_equal(statistic.counter, 1001) + assert_equal(statistic.batch_size, 1) + + kwds['rvs'] = self.get_rvs(stats.norm.rvs, np.random.default_rng(328423), xp=xp) + statistic.counter = 0 + res2 = monte_carlo_test(batch=50, **kwds) + assert_equal(statistic.counter, 21) + assert_equal(statistic.batch_size, 50) + + kwds['rvs'] = self.get_rvs(stats.norm.rvs, np.random.default_rng(328423), xp=xp) + statistic.counter = 0 + res3 = monte_carlo_test(**kwds) + assert_equal(statistic.counter, 2) + assert_equal(statistic.batch_size, 1000) + + xp_assert_equal(res1.pvalue, res3.pvalue) + xp_assert_equal(res2.pvalue, res3.pvalue) + + @array_api_compatible + @pytest.mark.parametrize('axis', range(-3, 3)) + def test_axis_dtype(self, axis, xp): + # test that Nd-array samples are handled correctly for valid values + # of the `axis` parameter; also make sure non-default dtype is maintained + rng = np.random.default_rng(2389234) + size = [2, 3, 4] + size[axis] = 100 + + # Determine non-default dtype + dtype_default = xp.asarray(1.).dtype + dtype_str = 'float32'if ("64" in str(dtype_default)) else 'float64' + dtype_np = getattr(np, dtype_str) + dtype = getattr(xp, dtype_str) + + # ttest_1samp is CPU array-API compatible, but it would be good to + # include CuPy in this test. We'll perform ttest_1samp with a + # NumPy array, but all the rest with be done with fully array-API + # compatible code. + x = rng.standard_normal(size=size, dtype=dtype_np) + expected = stats.ttest_1samp(x, popmean=0., axis=axis) + + x = xp.asarray(x, dtype=dtype) + xp_test = array_namespace(x) # numpy.std doesn't have `correction` + statistic = self.get_statistic(xp_test) + rvs = self.get_rvs(stats.norm.rvs, rng, dtype=dtype, xp=xp) + + res = monte_carlo_test(x, rvs, statistic, vectorized=True, + n_resamples=20000, axis=axis) + + ref_statistic = xp.asarray(expected.statistic, dtype=dtype) + ref_pvalue = xp.asarray(expected.pvalue, dtype=dtype) + xp_assert_close(res.statistic, ref_statistic) + xp_assert_close(res.pvalue, ref_pvalue, atol=self.atol) + + @array_api_compatible + @pytest.mark.parametrize('alternative', ("two-sided", "less", "greater")) + def test_alternative(self, alternative, xp): + # test that `alternative` is working as expected + rng = np.random.default_rng(65723433) + + x = rng.standard_normal(size=30) + ref = stats.ttest_1samp(x, 0., alternative=alternative) + + x = xp.asarray(x) + xp_test = array_namespace(x) # numpy.std doesn't have `correction` + statistic = self.get_statistic(xp_test) + rvs = self.get_rvs(stats.norm.rvs, rng, xp=xp) + + res = monte_carlo_test(x, rvs, statistic, alternative=alternative) + + xp_assert_close(res.statistic, xp.asarray(ref.statistic)) + xp_assert_close(res.pvalue, xp.asarray(ref.pvalue), atol=self.atol) + + + # Tests below involve statistics that are not yet array-API compatible. + # They can be converted when the statistics are converted. + @pytest.mark.slow + @pytest.mark.parametrize('alternative', ("less", "greater")) + @pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness + def test_against_ks_1samp(self, alternative, a): + # test that monte_carlo_test can reproduce pvalue of ks_1samp + rng = np.random.default_rng(65723433) + + x = stats.skewnorm.rvs(a=a, size=30, random_state=rng) + expected = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative) + + def statistic1d(x): + return stats.ks_1samp(x, stats.norm.cdf, mode='asymp', + alternative=alternative).statistic + + norm_rvs = self.get_rvs(stats.norm.rvs, rng) + res = monte_carlo_test(x, norm_rvs, statistic1d, + n_resamples=1000, vectorized=False, + alternative=alternative) + + assert_allclose(res.statistic, expected.statistic) + if alternative == 'greater': + assert_allclose(res.pvalue, expected.pvalue, atol=self.atol) + elif alternative == 'less': + assert_allclose(1-res.pvalue, expected.pvalue, atol=self.atol) + + @pytest.mark.parametrize('hypotest', (stats.skewtest, stats.kurtosistest)) + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + @pytest.mark.parametrize('a', np.linspace(-2, 2, 5)) # skewness + def test_against_normality_tests(self, hypotest, alternative, a): + # test that monte_carlo_test can reproduce pvalue of normality tests + rng = np.random.default_rng(85723405) + + x = stats.skewnorm.rvs(a=a, size=150, random_state=rng) + expected = hypotest(x, alternative=alternative) + + def statistic(x, axis): + return hypotest(x, axis=axis).statistic + + norm_rvs = self.get_rvs(stats.norm.rvs, rng) + res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True, + alternative=alternative) + + assert_allclose(res.statistic, expected.statistic) + assert_allclose(res.pvalue, expected.pvalue, atol=self.atol) + + @pytest.mark.parametrize('a', np.arange(-2, 3)) # skewness parameter + def test_against_normaltest(self, a): + # test that monte_carlo_test can reproduce pvalue of normaltest + rng = np.random.default_rng(12340513) + + x = stats.skewnorm.rvs(a=a, size=150, random_state=rng) + expected = stats.normaltest(x) + + def statistic(x, axis): + return stats.normaltest(x, axis=axis).statistic + + norm_rvs = self.get_rvs(stats.norm.rvs, rng) + res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True, + alternative='greater') + + assert_allclose(res.statistic, expected.statistic) + assert_allclose(res.pvalue, expected.pvalue, atol=self.atol) + + @pytest.mark.xslow + @pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness + def test_against_cramervonmises(self, a): + # test that monte_carlo_test can reproduce pvalue of cramervonmises + rng = np.random.default_rng(234874135) + + x = stats.skewnorm.rvs(a=a, size=30, random_state=rng) + expected = stats.cramervonmises(x, stats.norm.cdf) + + def statistic1d(x): + return stats.cramervonmises(x, stats.norm.cdf).statistic + + norm_rvs = self.get_rvs(stats.norm.rvs, rng) + res = monte_carlo_test(x, norm_rvs, statistic1d, + n_resamples=1000, vectorized=False, + alternative='greater') + + assert_allclose(res.statistic, expected.statistic) + assert_allclose(res.pvalue, expected.pvalue, atol=self.atol) + + @pytest.mark.slow + @pytest.mark.parametrize('dist_name', ('norm', 'logistic')) + @pytest.mark.parametrize('i', range(5)) + def test_against_anderson(self, dist_name, i): + # test that monte_carlo_test can reproduce results of `anderson`. Note: + # `anderson` does not provide a p-value; it provides a list of + # significance levels and the associated critical value of the test + # statistic. `i` used to index this list. + + # find the skewness for which the sample statistic matches one of the + # critical values provided by `stats.anderson` + + def fun(a): + rng = np.random.default_rng(394295467) + x = stats.tukeylambda.rvs(a, size=100, random_state=rng) + expected = stats.anderson(x, dist_name) + return expected.statistic - expected.critical_values[i] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sol = root(fun, x0=0) + assert sol.success + + # get the significance level (p-value) associated with that critical + # value + a = sol.x[0] + rng = np.random.default_rng(394295467) + x = stats.tukeylambda.rvs(a, size=100, random_state=rng) + expected = stats.anderson(x, dist_name) + expected_stat = expected.statistic + expected_p = expected.significance_level[i]/100 + + # perform equivalent Monte Carlo test and compare results + def statistic1d(x): + return stats.anderson(x, dist_name).statistic + + dist_rvs = self.get_rvs(getattr(stats, dist_name).rvs, rng) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = monte_carlo_test(x, dist_rvs, + statistic1d, n_resamples=1000, + vectorized=False, alternative='greater') + + assert_allclose(res.statistic, expected_stat) + assert_allclose(res.pvalue, expected_p, atol=2*self.atol) + + def test_p_never_zero(self): + # Use biased estimate of p-value to ensure that p-value is never zero + # per monte_carlo_test reference [1] + rng = np.random.default_rng(2190176673029737545) + x = np.zeros(100) + res = monte_carlo_test(x, rng.random, np.mean, + vectorized=True, alternative='less') + assert res.pvalue == 0.0001 + + def test_against_ttest_ind(self): + # test that `monte_carlo_test` can reproduce results of `ttest_ind`. + rng = np.random.default_rng(219017667302737545) + data = rng.random(size=(2, 5)), rng.random(size=7) # broadcastable + rvs = rng.normal, rng.normal + def statistic(x, y, axis): + return stats.ttest_ind(x, y, axis).statistic + + res = stats.monte_carlo_test(data, rvs, statistic, axis=-1) + ref = stats.ttest_ind(data[0], [data[1]], axis=-1) + assert_allclose(res.statistic, ref.statistic) + assert_allclose(res.pvalue, ref.pvalue, rtol=2e-2) + + def test_against_f_oneway(self): + # test that `monte_carlo_test` can reproduce results of `f_oneway`. + rng = np.random.default_rng(219017667302737545) + data = (rng.random(size=(2, 100)), rng.random(size=(2, 101)), + rng.random(size=(2, 102)), rng.random(size=(2, 103))) + rvs = rng.normal, rng.normal, rng.normal, rng.normal + + def statistic(*args, axis): + return stats.f_oneway(*args, axis=axis).statistic + + res = stats.monte_carlo_test(data, rvs, statistic, axis=-1, + alternative='greater') + ref = stats.f_oneway(*data, axis=-1) + + assert_allclose(res.statistic, ref.statistic) + assert_allclose(res.pvalue, ref.pvalue, atol=1e-2) + + @pytest.mark.xfail_on_32bit("Statistic may not depend on sample order on 32-bit") + def test_finite_precision_statistic(self): + # Some statistics return numerically distinct values when the values + # should be equal in theory. Test that `monte_carlo_test` accounts + # for this in some way. + rng = np.random.default_rng(2549824598234528) + n_resamples = 9999 + def rvs(size): + return 1. * stats.bernoulli(p=0.333).rvs(size=size, random_state=rng) + + x = rvs(100) + res = stats.monte_carlo_test(x, rvs, np.var, alternative='less', + n_resamples=n_resamples) + # show that having a tolerance matters + c0 = np.sum(res.null_distribution <= res.statistic) + c1 = np.sum(res.null_distribution <= res.statistic*(1+1e-15)) + assert c0 != c1 + assert res.pvalue == (c1 + 1)/(n_resamples + 1) + + +class TestPower: + def test_input_validation(self): + # test that the appropriate error messages are raised for invalid input + rng = np.random.default_rng(8519895914314711673) + + test = stats.ttest_ind + rvs = (rng.normal, rng.normal) + n_observations = (10, 12) + + message = "`vectorized` must be `True`, `False`, or `None`." + with pytest.raises(ValueError, match=message): + power(test, rvs, n_observations, vectorized=1.5) + + message = "`rvs` must be callable or sequence of callables." + with pytest.raises(TypeError, match=message): + power(test, None, n_observations) + with pytest.raises(TypeError, match=message): + power(test, (rng.normal, 'ekki'), n_observations) + + message = "If `rvs` is a sequence..." + with pytest.raises(ValueError, match=message): + power(test, (rng.normal,), n_observations) + with pytest.raises(ValueError, match=message): + power(test, rvs, (10,)) + + message = "`significance` must contain floats between 0 and 1." + with pytest.raises(ValueError, match=message): + power(test, rvs, n_observations, significance=2) + with pytest.raises(ValueError, match=message): + power(test, rvs, n_observations, significance=np.linspace(-1, 1)) + + message = "`kwargs` must be a dictionary" + with pytest.raises(TypeError, match=message): + power(test, rvs, n_observations, kwargs=(1, 2, 3)) + + message = "shape mismatch: objects cannot be broadcast" + with pytest.raises(ValueError, match=message): + power(test, rvs, ([10, 11], [12, 13, 14])) + with pytest.raises(ValueError, match=message): + power(test, rvs, ([10, 11], [12, 13]), kwargs={'x': [1, 2, 3]}) + + message = "`test` must be callable" + with pytest.raises(TypeError, match=message): + power(None, rvs, n_observations) + + message = "`n_resamples` must be a positive integer" + with pytest.raises(ValueError, match=message): + power(test, rvs, n_observations, n_resamples=-10) + with pytest.raises(ValueError, match=message): + power(test, rvs, n_observations, n_resamples=10.5) + + message = "`batch` must be a positive integer" + with pytest.raises(ValueError, match=message): + power(test, rvs, n_observations, batch=-10) + with pytest.raises(ValueError, match=message): + power(test, rvs, n_observations, batch=10.5) + + @pytest.mark.slow + def test_batch(self): + # make sure that the `batch` parameter is respected by checking the + # maximum batch size provided in calls to `test` + rng = np.random.default_rng(23492340193) + + def test(x, axis): + batch_size = 1 if x.ndim == 1 else len(x) + test.batch_size = max(batch_size, test.batch_size) + test.counter += 1 + return stats.ttest_1samp(x, 0, axis=axis).pvalue + test.counter = 0 + test.batch_size = 0 + + kwds = dict(test=test, n_observations=10, n_resamples=1000) + + rng = np.random.default_rng(23492340193) + res1 = power(**kwds, rvs=rng.normal, batch=1) + assert_equal(test.counter, 1000) + assert_equal(test.batch_size, 1) + + rng = np.random.default_rng(23492340193) + test.counter = 0 + res2 = power(**kwds, rvs=rng.normal, batch=50) + assert_equal(test.counter, 20) + assert_equal(test.batch_size, 50) + + rng = np.random.default_rng(23492340193) + test.counter = 0 + res3 = power(**kwds, rvs=rng.normal, batch=1000) + assert_equal(test.counter, 1) + assert_equal(test.batch_size, 1000) + + assert_equal(res1.power, res3.power) + assert_equal(res2.power, res3.power) + + @pytest.mark.slow + def test_vectorization(self): + # Test that `power` is vectorized as expected + rng = np.random.default_rng(25495254834552) + + # Single vectorized call + popmeans = np.array([0, 0.2]) + def test(x, alternative, axis=-1): + # ensure that popmeans axis is zeroth and orthogonal to the rest + popmeans_expanded = np.expand_dims(popmeans, tuple(range(1, x.ndim + 1))) + return stats.ttest_1samp(x, popmeans_expanded, alternative=alternative, + axis=axis) + + # nx and kwargs broadcast against one another + nx = np.asarray([10, 15, 20, 50, 100])[:, np.newaxis] + kwargs = {'alternative': ['less', 'greater', 'two-sided']} + + # This dimension is added to the beginning + significance = np.asarray([0.01, 0.025, 0.05, 0.1]) + res = stats.power(test, rng.normal, nx, significance=significance, + kwargs=kwargs) + + # Looping over all combinations + ref = [] + for significance_i in significance: + for nx_i in nx: + for alternative_i in kwargs['alternative']: + for popmean_i in popmeans: + def test2(x, axis=-1): + return stats.ttest_1samp(x, popmean_i, axis=axis, + alternative=alternative_i) + + tmp = stats.power(test2, rng.normal, nx_i, + significance=significance_i) + ref.append(tmp.power) + ref = np.reshape(ref, res.power.shape) + + # Show that results are similar + assert_allclose(res.power, ref, rtol=2e-2, atol=1e-2) + + def test_ttest_ind_null(self): + # Check that the p-values of `ttest_ind` are uniformly distributed under + # the null hypothesis + rng = np.random.default_rng(254952548345528) + + test = stats.ttest_ind + n_observations = rng.integers(10, 100, size=(2, 10)) + rvs = rng.normal, rng.normal + significance = np.asarray([0.01, 0.05, 0.1]) + res = stats.power(test, rvs, n_observations, significance=significance) + significance = np.broadcast_to(significance[:, np.newaxis], res.power.shape) + assert_allclose(res.power, significance, atol=1e-2) + + def test_ttest_1samp_power(self): + # Check simulated ttest_1samp power against reference + rng = np.random.default_rng(254952548345528) + + # Reference values computed with statmodels + # import numpy as np + # from statsmodels.stats.power import tt_solve_power + # tt_solve_power = np.vectorize(tt_solve_power) + # tt_solve_power([0.1, 0.5, 0.9], [[10], [20]], [[[0.01]], [[0.05]]]) + ref = [[[0.0126515 , 0.10269751, 0.40415802], + [0.01657775, 0.29734608, 0.86228288]], + [[0.0592903 , 0.29317561, 0.71718121], + [0.07094116, 0.56450441, 0.96815163]]] + + kwargs = {'popmean': [0.1, 0.5, 0.9]} + n_observations = [[10], [20]] + significance = [0.01, 0.05] + res = stats.power(stats.ttest_1samp, rng.normal, n_observations, + significance=significance, kwargs=kwargs) + assert_allclose(res.power, ref, atol=1e-2) + + +class TestPermutationTest: + + rtol = 1e-14 + + def setup_method(self): + self.rng = np.random.default_rng(7170559330470561044) + + # -- Input validation -- # + + def test_permutation_test_iv(self): + + def stat(x, y, axis): + return stats.ttest_ind((x, y), axis).statistic + + message = "each sample in `data` must contain two or more ..." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1]), stat) + + message = "`data` must be a tuple containing at least two samples" + with pytest.raises(ValueError, match=message): + permutation_test((1,), stat) + with pytest.raises(TypeError, match=message): + permutation_test(1, stat) + + message = "`axis` must be an integer." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, axis=1.5) + + message = "`permutation_type` must be in..." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, + permutation_type="ekki") + + message = "`vectorized` must be `True`, `False`, or `None`." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, vectorized=1.5) + + message = "`n_resamples` must be a positive integer." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=-1000) + + message = "`n_resamples` must be a positive integer." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=1000.5) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=-1000) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=1000.5) + + message = "`alternative` must be in..." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, alternative='ekki') + + message = "'herring' cannot be used to seed a" + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, + random_state='herring') + + # -- Test Parameters -- # + @pytest.mark.parametrize('random_state', [np.random.RandomState, + np.random.default_rng]) + @pytest.mark.parametrize('permutation_type', + ['pairings', 'samples', 'independent']) + def test_batch(self, permutation_type, random_state): + # make sure that the `batch` parameter is respected by checking the + # maximum batch size provided in calls to `statistic` + x = self.rng.random(10) + y = self.rng.random(10) + + def statistic(x, y, axis): + batch_size = 1 if x.ndim == 1 else len(x) + statistic.batch_size = max(batch_size, statistic.batch_size) + statistic.counter += 1 + return np.mean(x, axis=axis) - np.mean(y, axis=axis) + statistic.counter = 0 + statistic.batch_size = 0 + + kwds = {'n_resamples': 1000, 'permutation_type': permutation_type, + 'vectorized': True} + res1 = stats.permutation_test((x, y), statistic, batch=1, + random_state=random_state(0), **kwds) + assert_equal(statistic.counter, 1001) + assert_equal(statistic.batch_size, 1) + + statistic.counter = 0 + res2 = stats.permutation_test((x, y), statistic, batch=50, + random_state=random_state(0), **kwds) + assert_equal(statistic.counter, 21) + assert_equal(statistic.batch_size, 50) + + statistic.counter = 0 + res3 = stats.permutation_test((x, y), statistic, batch=1000, + random_state=random_state(0), **kwds) + assert_equal(statistic.counter, 2) + assert_equal(statistic.batch_size, 1000) + + assert_equal(res1.pvalue, res3.pvalue) + assert_equal(res2.pvalue, res3.pvalue) + + @pytest.mark.parametrize('random_state', [np.random.RandomState, + np.random.default_rng]) + @pytest.mark.parametrize('permutation_type, exact_size', + [('pairings', special.factorial(3)**2), + ('samples', 2**3), + ('independent', special.binom(6, 3))]) + def test_permutations(self, permutation_type, exact_size, random_state): + # make sure that the `permutations` parameter is respected by checking + # the size of the null distribution + x = self.rng.random(3) + y = self.rng.random(3) + + def statistic(x, y, axis): + return np.mean(x, axis=axis) - np.mean(y, axis=axis) + + kwds = {'permutation_type': permutation_type, + 'vectorized': True} + res = stats.permutation_test((x, y), statistic, n_resamples=3, + random_state=random_state(0), **kwds) + assert_equal(res.null_distribution.size, 3) + + res = stats.permutation_test((x, y), statistic, **kwds) + assert_equal(res.null_distribution.size, exact_size) + + # -- Randomized Permutation Tests -- # + + # To get reasonable accuracy, these next three tests are somewhat slow. + # Originally, I had them passing for all combinations of permutation type, + # alternative, and RNG, but that takes too long for CI. Instead, split + # into three tests, each testing a particular combination of the three + # parameters. + + def test_randomized_test_against_exact_both(self): + # check that the randomized and exact tests agree to reasonable + # precision for permutation_type='both + + alternative, rng = 'less', 0 + + nx, ny, permutations = 8, 9, 24000 + assert special.binom(nx + ny, nx) > permutations + + x = stats.norm.rvs(size=nx) + y = stats.norm.rvs(size=ny) + data = x, y + + def statistic(x, y, axis): + return np.mean(x, axis=axis) - np.mean(y, axis=axis) + + kwds = {'vectorized': True, 'permutation_type': 'independent', + 'batch': 100, 'alternative': alternative, 'random_state': rng} + res = permutation_test(data, statistic, n_resamples=permutations, + **kwds) + res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds) + + assert res.statistic == res2.statistic + assert_allclose(res.pvalue, res2.pvalue, atol=1e-2) + + @pytest.mark.slow() + def test_randomized_test_against_exact_samples(self): + # check that the randomized and exact tests agree to reasonable + # precision for permutation_type='samples' + + alternative, rng = 'greater', None + + nx, ny, permutations = 15, 15, 32000 + assert 2**nx > permutations + + x = stats.norm.rvs(size=nx) + y = stats.norm.rvs(size=ny) + data = x, y + + def statistic(x, y, axis): + return np.mean(x - y, axis=axis) + + kwds = {'vectorized': True, 'permutation_type': 'samples', + 'batch': 100, 'alternative': alternative, 'random_state': rng} + res = permutation_test(data, statistic, n_resamples=permutations, + **kwds) + res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds) + + assert res.statistic == res2.statistic + assert_allclose(res.pvalue, res2.pvalue, atol=1e-2) + + def test_randomized_test_against_exact_pairings(self): + # check that the randomized and exact tests agree to reasonable + # precision for permutation_type='pairings' + + alternative, rng = 'two-sided', self.rng + + nx, ny, permutations = 8, 8, 40000 + assert special.factorial(nx) > permutations + + x = stats.norm.rvs(size=nx) + y = stats.norm.rvs(size=ny) + data = [x] + + def statistic1d(x): + return stats.pearsonr(x, y)[0] + + statistic = _resampling._vectorize_statistic(statistic1d) + + kwds = {'vectorized': True, 'permutation_type': 'samples', + 'batch': 100, 'alternative': alternative, 'random_state': rng} + res = permutation_test(data, statistic, n_resamples=permutations, + **kwds) + res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds) + + assert res.statistic == res2.statistic + assert_allclose(res.pvalue, res2.pvalue, atol=1e-2) + + @pytest.mark.parametrize('alternative', ('less', 'greater')) + # Different conventions for two-sided p-value here VS ttest_ind. + # Eventually, we can add multiple options for the two-sided alternative + # here in permutation_test. + @pytest.mark.parametrize('permutations', (30, 1e9)) + @pytest.mark.parametrize('axis', (0, 1, 2)) + def test_against_permutation_ttest(self, alternative, permutations, axis): + # check that this function and ttest_ind with permutations give + # essentially identical results. + + x = np.arange(3*4*5).reshape(3, 4, 5) + y = np.moveaxis(np.arange(4)[:, None, None], 0, axis) + + rng1 = np.random.default_rng(4337234444626115331) + res1 = stats.ttest_ind(x, y, permutations=permutations, axis=axis, + random_state=rng1, alternative=alternative) + + def statistic(x, y, axis): + return stats.ttest_ind(x, y, axis=axis).statistic + + rng2 = np.random.default_rng(4337234444626115331) + res2 = permutation_test((x, y), statistic, vectorized=True, + n_resamples=permutations, + alternative=alternative, axis=axis, + random_state=rng2) + + assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol) + assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol) + + # -- Independent (Unpaired) Sample Tests -- # + + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_ks_2samp(self, alternative): + + x = self.rng.normal(size=4, scale=1) + y = self.rng.normal(size=5, loc=3, scale=3) + + expected = stats.ks_2samp(x, y, alternative=alternative, mode='exact') + + def statistic1d(x, y): + return stats.ks_2samp(x, y, mode='asymp', + alternative=alternative).statistic + + # ks_2samp is always a one-tailed 'greater' test + # it's the statistic that changes (D+ vs D- vs max(D+, D-)) + res = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative='greater', random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_ansari(self, alternative): + + x = self.rng.normal(size=4, scale=1) + y = self.rng.normal(size=5, scale=3) + + # ansari has a different convention for 'alternative' + alternative_correspondence = {"less": "greater", + "greater": "less", + "two-sided": "two-sided"} + alternative_scipy = alternative_correspondence[alternative] + expected = stats.ansari(x, y, alternative=alternative_scipy) + + def statistic1d(x, y): + return stats.ansari(x, y).statistic + + res = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative=alternative, random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_mannwhitneyu(self, alternative): + + x = stats.uniform.rvs(size=(3, 5, 2), loc=0, random_state=self.rng) + y = stats.uniform.rvs(size=(3, 5, 2), loc=0.05, random_state=self.rng) + + expected = stats.mannwhitneyu(x, y, axis=1, alternative=alternative) + + def statistic(x, y, axis): + return stats.mannwhitneyu(x, y, axis=axis).statistic + + res = permutation_test((x, y), statistic, vectorized=True, + n_resamples=np.inf, alternative=alternative, + axis=1, random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + def test_against_cvm(self): + + x = stats.norm.rvs(size=4, scale=1, random_state=self.rng) + y = stats.norm.rvs(size=5, loc=3, scale=3, random_state=self.rng) + + expected = stats.cramervonmises_2samp(x, y, method='exact') + + def statistic1d(x, y): + return stats.cramervonmises_2samp(x, y, + method='asymptotic').statistic + + # cramervonmises_2samp has only one alternative, greater + res = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative='greater', random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + @pytest.mark.xslow() + @pytest.mark.parametrize('axis', (-1, 2)) + def test_vectorized_nsamp_ptype_both(self, axis): + # Test that permutation_test with permutation_type='independent' works + # properly for a 3-sample statistic with nd array samples of different + # (but compatible) shapes and ndims. Show that exact permutation test + # and random permutation tests approximate SciPy's asymptotic pvalues + # and that exact and random permutation test results are even closer + # to one another (than they are to the asymptotic results). + + # Three samples, different (but compatible) shapes with different ndims + rng = np.random.default_rng(6709265303529651545) + x = rng.random(size=(3)) + y = rng.random(size=(1, 3, 2)) + z = rng.random(size=(2, 1, 4)) + data = (x, y, z) + + # Define the statistic (and pvalue for comparison) + def statistic1d(*data): + return stats.kruskal(*data).statistic + + def pvalue1d(*data): + return stats.kruskal(*data).pvalue + + statistic = _resampling._vectorize_statistic(statistic1d) + pvalue = _resampling._vectorize_statistic(pvalue1d) + + # Calculate the expected results + x2 = np.broadcast_to(x, (2, 3, 3)) # broadcast manually because + y2 = np.broadcast_to(y, (2, 3, 2)) # _vectorize_statistic doesn't + z2 = np.broadcast_to(z, (2, 3, 4)) + expected_statistic = statistic(x2, y2, z2, axis=axis) + expected_pvalue = pvalue(x2, y2, z2, axis=axis) + + # Calculate exact and randomized permutation results + kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater', + 'permutation_type': 'independent', 'random_state': self.rng} + res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds) + res2 = permutation_test(data, statistic1d, n_resamples=1000, **kwds) + + # Check results + assert_allclose(res.statistic, expected_statistic, rtol=self.rtol) + assert_allclose(res.statistic, res2.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected_pvalue, atol=6e-2) + assert_allclose(res.pvalue, res2.pvalue, atol=3e-2) + + # -- Paired-Sample Tests -- # + + @pytest.mark.slow + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_wilcoxon(self, alternative): + + x = stats.uniform.rvs(size=(3, 6, 2), loc=0, random_state=self.rng) + y = stats.uniform.rvs(size=(3, 6, 2), loc=0.05, random_state=self.rng) + + # We'll check both 1- and 2-sample versions of the same test; + # we expect identical results to wilcoxon in all cases. + def statistic_1samp_1d(z): + # 'less' ensures we get the same of two statistics every time + return stats.wilcoxon(z, alternative='less').statistic + + def statistic_2samp_1d(x, y): + return stats.wilcoxon(x, y, alternative='less').statistic + + def test_1d(x, y): + return stats.wilcoxon(x, y, alternative=alternative) + + test = _resampling._vectorize_statistic(test_1d) + + expected = test(x, y, axis=1) + expected_stat = expected[0] + expected_p = expected[1] + + kwds = {'vectorized': False, 'axis': 1, 'alternative': alternative, + 'permutation_type': 'samples', 'random_state': self.rng, + 'n_resamples': np.inf} + res1 = permutation_test((x-y,), statistic_1samp_1d, **kwds) + res2 = permutation_test((x, y), statistic_2samp_1d, **kwds) + + # `wilcoxon` returns a different statistic with 'two-sided' + assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol) + if alternative != 'two-sided': + assert_allclose(res2.statistic, expected_stat, rtol=self.rtol) + + assert_allclose(res2.pvalue, expected_p, rtol=self.rtol) + assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol) + + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_binomtest(self, alternative): + + x = self.rng.integers(0, 2, size=10) + x[x == 0] = -1 + # More naturally, the test would flip elements between 0 and one. + # However, permutation_test will flip the _signs_ of the elements. + # So we have to work with +1/-1 instead of 1/0. + + def statistic(x, axis=0): + return np.sum(x > 0, axis=axis) + + k, n, p = statistic(x), 10, 0.5 + expected = stats.binomtest(k, n, p, alternative=alternative) + + res = stats.permutation_test((x,), statistic, vectorized=True, + permutation_type='samples', + n_resamples=np.inf, random_state=self.rng, + alternative=alternative) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + # -- Exact Association Tests -- # + + def test_against_kendalltau(self): + + x = self.rng.normal(size=6) + y = x + self.rng.normal(size=6) + + expected = stats.kendalltau(x, y, method='exact') + + def statistic1d(x): + return stats.kendalltau(x, y, method='asymptotic').statistic + + # kendalltau currently has only one alternative, two-sided + res = permutation_test((x,), statistic1d, permutation_type='pairings', + n_resamples=np.inf, random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + @pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided')) + def test_against_fisher_exact(self, alternative): + + def statistic(x,): + return np.sum((x == 1) & (y == 1)) + + # x and y are binary random variables with some dependence + rng = np.random.default_rng(6235696159000529929) + x = (rng.random(7) > 0.6).astype(float) + y = (rng.random(7) + 0.25*x > 0.6).astype(float) + tab = stats.contingency.crosstab(x, y)[1] + + res = permutation_test((x,), statistic, permutation_type='pairings', + n_resamples=np.inf, alternative=alternative, + random_state=rng) + res2 = stats.fisher_exact(tab, alternative=alternative) + + assert_allclose(res.pvalue, res2[1]) + + @pytest.mark.xslow() + @pytest.mark.parametrize('axis', (-2, 1)) + def test_vectorized_nsamp_ptype_samples(self, axis): + # Test that permutation_test with permutation_type='samples' works + # properly for a 3-sample statistic with nd array samples of different + # (but compatible) shapes and ndims. Show that exact permutation test + # reproduces SciPy's exact pvalue and that random permutation test + # approximates it. + + x = self.rng.random(size=(2, 4, 3)) + y = self.rng.random(size=(1, 4, 3)) + z = self.rng.random(size=(2, 4, 1)) + x = stats.rankdata(x, axis=axis) + y = stats.rankdata(y, axis=axis) + z = stats.rankdata(z, axis=axis) + y = y[0] # to check broadcast with different ndim + data = (x, y, z) + + def statistic1d(*data): + return stats.page_trend_test(data, ranked=True, + method='asymptotic').statistic + + def pvalue1d(*data): + return stats.page_trend_test(data, ranked=True, + method='exact').pvalue + + statistic = _resampling._vectorize_statistic(statistic1d) + pvalue = _resampling._vectorize_statistic(pvalue1d) + + expected_statistic = statistic(*np.broadcast_arrays(*data), axis=axis) + expected_pvalue = pvalue(*np.broadcast_arrays(*data), axis=axis) + + # Let's forgive this use of an integer seed, please. + kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater', + 'permutation_type': 'pairings', 'random_state': 0} + res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds) + res2 = permutation_test(data, statistic1d, n_resamples=5000, **kwds) + + assert_allclose(res.statistic, expected_statistic, rtol=self.rtol) + assert_allclose(res.statistic, res2.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected_pvalue, rtol=self.rtol) + assert_allclose(res.pvalue, res2.pvalue, atol=3e-2) + + # -- Test Against External References -- # + + tie_case_1 = {'x': [1, 2, 3, 4], 'y': [1.5, 2, 2.5], + 'expected_less': 0.2000000000, + 'expected_2sided': 0.4, # 2*expected_less + 'expected_Pr_gte_S_mean': 0.3428571429, # see note below + 'expected_statistic': 7.5, + 'expected_avg': 9.142857, 'expected_std': 1.40698} + tie_case_2 = {'x': [111, 107, 100, 99, 102, 106, 109, 108], + 'y': [107, 108, 106, 98, 105, 103, 110, 105, 104], + 'expected_less': 0.1555738379, + 'expected_2sided': 0.3111476758, + 'expected_Pr_gte_S_mean': 0.2969971205, # see note below + 'expected_statistic': 32.5, + 'expected_avg': 38.117647, 'expected_std': 5.172124} + + @pytest.mark.xslow() # only the second case is slow, really + @pytest.mark.parametrize('case', (tie_case_1, tie_case_2)) + def test_with_ties(self, case): + """ + Results above from SAS PROC NPAR1WAY, e.g. + + DATA myData; + INPUT X Y; + CARDS; + 1 1 + 1 2 + 1 3 + 1 4 + 2 1.5 + 2 2 + 2 2.5 + ods graphics on; + proc npar1way AB data=myData; + class X; + EXACT; + run; + ods graphics off; + + Note: SAS provides Pr >= |S-Mean|, which is different from our + definition of a two-sided p-value. + + """ + + x = case['x'] + y = case['y'] + + expected_statistic = case['expected_statistic'] + expected_less = case['expected_less'] + expected_2sided = case['expected_2sided'] + expected_Pr_gte_S_mean = case['expected_Pr_gte_S_mean'] + expected_avg = case['expected_avg'] + expected_std = case['expected_std'] + + def statistic1d(x, y): + return stats.ansari(x, y).statistic + + with np.testing.suppress_warnings() as sup: + sup.filter(UserWarning, "Ties preclude use of exact statistic") + res = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative='less') + res2 = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative='two-sided') + + assert_allclose(res.statistic, expected_statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected_less, atol=1e-10) + assert_allclose(res2.pvalue, expected_2sided, atol=1e-10) + assert_allclose(res2.null_distribution.mean(), expected_avg, rtol=1e-6) + assert_allclose(res2.null_distribution.std(), expected_std, rtol=1e-6) + + # SAS provides Pr >= |S-Mean|; might as well check against that, too + S = res.statistic + mean = res.null_distribution.mean() + n = len(res.null_distribution) + Pr_gte_S_mean = np.sum(np.abs(res.null_distribution-mean) + >= np.abs(S-mean))/n + assert_allclose(expected_Pr_gte_S_mean, Pr_gte_S_mean) + + @pytest.mark.slow + @pytest.mark.parametrize('alternative, expected_pvalue', + (('less', 0.9708333333333), + ('greater', 0.05138888888889), + ('two-sided', 0.1027777777778))) + def test_against_spearmanr_in_R(self, alternative, expected_pvalue): + """ + Results above from R cor.test, e.g. + + options(digits=16) + x <- c(1.76405235, 0.40015721, 0.97873798, + 2.2408932, 1.86755799, -0.97727788) + y <- c(2.71414076, 0.2488, 0.87551913, + 2.6514917, 2.01160156, 0.47699563) + cor.test(x, y, method = "spearm", alternative = "t") + """ + # data comes from + # np.random.seed(0) + # x = stats.norm.rvs(size=6) + # y = x + stats.norm.rvs(size=6) + x = [1.76405235, 0.40015721, 0.97873798, + 2.2408932, 1.86755799, -0.97727788] + y = [2.71414076, 0.2488, 0.87551913, + 2.6514917, 2.01160156, 0.47699563] + expected_statistic = 0.7714285714285715 + + def statistic1d(x): + return stats.spearmanr(x, y).statistic + + res = permutation_test((x,), statistic1d, permutation_type='pairings', + n_resamples=np.inf, alternative=alternative) + + assert_allclose(res.statistic, expected_statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected_pvalue, atol=1e-13) + + @pytest.mark.parametrize("batch", (-1, 0)) + def test_batch_generator_iv(self, batch): + with pytest.raises(ValueError, match="`batch` must be positive."): + list(_resampling._batch_generator([1, 2, 3], batch)) + + batch_generator_cases = [(range(0), 3, []), + (range(6), 3, [[0, 1, 2], [3, 4, 5]]), + (range(8), 3, [[0, 1, 2], [3, 4, 5], [6, 7]])] + + @pytest.mark.parametrize("iterable, batch, expected", + batch_generator_cases) + def test_batch_generator(self, iterable, batch, expected): + got = list(_resampling._batch_generator(iterable, batch)) + assert got == expected + + def test_finite_precision_statistic(self): + # Some statistics return numerically distinct values when the values + # should be equal in theory. Test that `permutation_test` accounts + # for this in some way. + x = [1, 2, 4, 3] + y = [2, 4, 6, 8] + + def statistic(x, y): + return stats.pearsonr(x, y)[0] + + res = stats.permutation_test((x, y), statistic, vectorized=False, + permutation_type='pairings') + r, pvalue, null = res.statistic, res.pvalue, res.null_distribution + + correct_p = 2 * np.sum(null >= r - 1e-14) / len(null) + assert pvalue == correct_p == 1/3 + # Compare against other exact correlation tests using R corr.test + # options(digits=16) + # x = c(1, 2, 4, 3) + # y = c(2, 4, 6, 8) + # cor.test(x, y, alternative = "t", method = "spearman") # 0.333333333 + # cor.test(x, y, alternative = "t", method = "kendall") # 0.333333333 + + +def test_all_partitions_concatenated(): + # make sure that _all_paritions_concatenated produces the correct number + # of partitions of the data into samples of the given sizes and that + # all are unique + n = np.array([3, 2, 4], dtype=int) + nc = np.cumsum(n) + + all_partitions = set() + counter = 0 + for partition_concatenated in _resampling._all_partitions_concatenated(n): + counter += 1 + partitioning = np.split(partition_concatenated, nc[:-1]) + all_partitions.add(tuple([frozenset(i) for i in partitioning])) + + expected = np.prod([special.binom(sum(n[i:]), sum(n[i+1:])) + for i in range(len(n)-1)]) + + assert_equal(counter, expected) + assert_equal(len(all_partitions), expected) + + +@pytest.mark.parametrize('fun_name', + ['bootstrap', 'permutation_test', 'monte_carlo_test']) +def test_parameter_vectorized(fun_name): + # Check that parameter `vectorized` is working as desired for all + # resampling functions. Results don't matter; just don't fail asserts. + rng = np.random.default_rng(75245098234592) + sample = rng.random(size=10) + + def rvs(size): # needed by `monte_carlo_test` + return stats.norm.rvs(size=size, random_state=rng) + + fun_options = {'bootstrap': {'data': (sample,), 'random_state': rng, + 'method': 'percentile'}, + 'permutation_test': {'data': (sample,), 'random_state': rng, + 'permutation_type': 'samples'}, + 'monte_carlo_test': {'sample': sample, 'rvs': rvs}} + common_options = {'n_resamples': 100} + + fun = getattr(stats, fun_name) + options = fun_options[fun_name] + options.update(common_options) + + def statistic(x, axis): + assert x.ndim > 1 or np.array_equal(x, sample) + return np.mean(x, axis=axis) + fun(statistic=statistic, vectorized=None, **options) + fun(statistic=statistic, vectorized=True, **options) + + def statistic(x): + assert x.ndim == 1 + return np.mean(x) + fun(statistic=statistic, vectorized=None, **options) + fun(statistic=statistic, vectorized=False, **options) diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..675d333f23d5f1fb7d213119eecea4f3dbc8859e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor slow_conv3d_forward_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output); +TORCH_API at::Tensor & slow_conv3d_forward_symint_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & slow_conv3d_forward_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_construct.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_construct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a3128625629d29594c86a67e9fa461b70e329d1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_construct.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_csc.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_csc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7da2eaba841fe229f90ef7157f469edf35e8624 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_csc.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_util.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe4c8f883156f4617d360e3902a5b29f206152d3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_util.cpython-310.pyc differ