Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc +3 -0
- parrot/lib/python3.10/site-packages/numpy/lib/tests/data/win64python2.npy +3 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE +29 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py +116 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py +704 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/stats/__init__.py +649 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_axis_nan_policy.py +686 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd +27 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_binomtest.py +375 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_bws_test.py +177 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_censored_data.py +459 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_crosstab.py +204 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_discrete_distns.py +1922 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_distr_params.py +292 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_entropy.py +426 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_fit.py +1354 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_hypotests.py +2027 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_kde.py +725 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_ksstats.py +600 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py +494 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_morestats.py +0 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_mstats_basic.py +0 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_multicomp.py +459 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_multivariate.py +0 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_mvn.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py +482 -0
.gitattributes
CHANGED
|
@@ -1440,3 +1440,4 @@ vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache
|
|
| 1440 |
vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1441 |
vglm/bin/python filter=lfs diff=lfs merge=lfs -text
|
| 1442 |
vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1440 |
vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1441 |
vglm/bin/python filter=lfs diff=lfs merge=lfs -text
|
| 1442 |
vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1443 |
+
parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82269618ca3158bed8098a2e1a4fdd2725dfa6cdf520df1c090d03d95c82debd
|
| 3 |
+
size 166337
|
parrot/lib/python3.10/site-packages/numpy/lib/tests/data/win64python2.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6a039c807558149ad5fa7ad12436c69d49c5e194cf617b92785f8cb60ec63297
|
| 3 |
+
size 96
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (519 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc
ADDED
|
Binary file (13.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc
ADDED
|
Binary file (7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc
ADDED
|
Binary file (6.41 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc
ADDED
|
Binary file (18.9 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc
ADDED
|
Binary file (3.04 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc
ADDED
|
Binary file (12.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc
ADDED
|
Binary file (9.88 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc
ADDED
|
Binary file (2.28 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc
ADDED
|
Binary file (2.74 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc
ADDED
|
Binary file (30.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc
ADDED
|
Binary file (11.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc
ADDED
|
Binary file (7.38 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
BSD 3-Clause License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2018, Quansight-Labs
|
| 4 |
+
All rights reserved.
|
| 5 |
+
|
| 6 |
+
Redistribution and use in source and binary forms, with or without
|
| 7 |
+
modification, are permitted provided that the following conditions are met:
|
| 8 |
+
|
| 9 |
+
* Redistributions of source code must retain the above copyright notice, this
|
| 10 |
+
list of conditions and the following disclaimer.
|
| 11 |
+
|
| 12 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
| 13 |
+
this list of conditions and the following disclaimer in the documentation
|
| 14 |
+
and/or other materials provided with the distribution.
|
| 15 |
+
|
| 16 |
+
* Neither the name of the copyright holder nor the names of its
|
| 17 |
+
contributors may be used to endorse or promote products derived from
|
| 18 |
+
this software without specific prior written permission.
|
| 19 |
+
|
| 20 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 21 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 22 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 23 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 24 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 25 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 26 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 27 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 28 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 29 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
.. note:
|
| 3 |
+
If you are looking for overrides for NumPy-specific methods, see the
|
| 4 |
+
documentation for :obj:`unumpy`. This page explains how to write
|
| 5 |
+
back-ends and multimethods.
|
| 6 |
+
|
| 7 |
+
``uarray`` is built around a back-end protocol, and overridable multimethods.
|
| 8 |
+
It is necessary to define multimethods for back-ends to be able to override them.
|
| 9 |
+
See the documentation of :obj:`generate_multimethod` on how to write multimethods.
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
Let's start with the simplest:
|
| 14 |
+
|
| 15 |
+
``__ua_domain__`` defines the back-end *domain*. The domain consists of period-
|
| 16 |
+
separated string consisting of the modules you extend plus the submodule. For
|
| 17 |
+
example, if a submodule ``module2.submodule`` extends ``module1``
|
| 18 |
+
(i.e., it exposes dispatchables marked as types available in ``module1``),
|
| 19 |
+
then the domain string should be ``"module1.module2.submodule"``.
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
For the purpose of this demonstration, we'll be creating an object and setting
|
| 23 |
+
its attributes directly. However, note that you can use a module or your own type
|
| 24 |
+
as a backend as well.
|
| 25 |
+
|
| 26 |
+
>>> class Backend: pass
|
| 27 |
+
>>> be = Backend()
|
| 28 |
+
>>> be.__ua_domain__ = "ua_examples"
|
| 29 |
+
|
| 30 |
+
It might be useful at this point to sidetrack to the documentation of
|
| 31 |
+
:obj:`generate_multimethod` to find out how to generate a multimethod
|
| 32 |
+
overridable by :obj:`uarray`. Needless to say, writing a backend and
|
| 33 |
+
creating multimethods are mostly orthogonal activities, and knowing
|
| 34 |
+
one doesn't necessarily require knowledge of the other, although it
|
| 35 |
+
is certainly helpful. We expect core API designers/specifiers to write the
|
| 36 |
+
multimethods, and implementors to override them. But, as is often the case,
|
| 37 |
+
similar people write both.
|
| 38 |
+
|
| 39 |
+
Without further ado, here's an example multimethod:
|
| 40 |
+
|
| 41 |
+
>>> import uarray as ua
|
| 42 |
+
>>> from uarray import Dispatchable
|
| 43 |
+
>>> def override_me(a, b):
|
| 44 |
+
... return Dispatchable(a, int),
|
| 45 |
+
>>> def override_replacer(args, kwargs, dispatchables):
|
| 46 |
+
... return (dispatchables[0], args[1]), {}
|
| 47 |
+
>>> overridden_me = ua.generate_multimethod(
|
| 48 |
+
... override_me, override_replacer, "ua_examples"
|
| 49 |
+
... )
|
| 50 |
+
|
| 51 |
+
Next comes the part about overriding the multimethod. This requires
|
| 52 |
+
the ``__ua_function__`` protocol, and the ``__ua_convert__``
|
| 53 |
+
protocol. The ``__ua_function__`` protocol has the signature
|
| 54 |
+
``(method, args, kwargs)`` where ``method`` is the passed
|
| 55 |
+
multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables``
|
| 56 |
+
is the list of converted dispatchables passed in.
|
| 57 |
+
|
| 58 |
+
>>> def __ua_function__(method, args, kwargs):
|
| 59 |
+
... return method.__name__, args, kwargs
|
| 60 |
+
>>> be.__ua_function__ = __ua_function__
|
| 61 |
+
|
| 62 |
+
The other protocol of interest is the ``__ua_convert__`` protocol. It has the
|
| 63 |
+
signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion
|
| 64 |
+
between the formats should ideally be an ``O(1)`` operation, but it means that
|
| 65 |
+
no memory copying should be involved, only views of the existing data.
|
| 66 |
+
|
| 67 |
+
>>> def __ua_convert__(dispatchables, coerce):
|
| 68 |
+
... for d in dispatchables:
|
| 69 |
+
... if d.type is int:
|
| 70 |
+
... if coerce and d.coercible:
|
| 71 |
+
... yield str(d.value)
|
| 72 |
+
... else:
|
| 73 |
+
... yield d.value
|
| 74 |
+
>>> be.__ua_convert__ = __ua_convert__
|
| 75 |
+
|
| 76 |
+
Now that we have defined the backend, the next thing to do is to call the multimethod.
|
| 77 |
+
|
| 78 |
+
>>> with ua.set_backend(be):
|
| 79 |
+
... overridden_me(1, "2")
|
| 80 |
+
('override_me', (1, '2'), {})
|
| 81 |
+
|
| 82 |
+
Note that the marked type has no effect on the actual type of the passed object.
|
| 83 |
+
We can also coerce the type of the input.
|
| 84 |
+
|
| 85 |
+
>>> with ua.set_backend(be, coerce=True):
|
| 86 |
+
... overridden_me(1, "2")
|
| 87 |
+
... overridden_me(1.0, "2")
|
| 88 |
+
('override_me', ('1', '2'), {})
|
| 89 |
+
('override_me', ('1.0', '2'), {})
|
| 90 |
+
|
| 91 |
+
Another feature is that if you remove ``__ua_convert__``, the arguments are not
|
| 92 |
+
converted at all and it's up to the backend to handle that.
|
| 93 |
+
|
| 94 |
+
>>> del be.__ua_convert__
|
| 95 |
+
>>> with ua.set_backend(be):
|
| 96 |
+
... overridden_me(1, "2")
|
| 97 |
+
('override_me', (1, '2'), {})
|
| 98 |
+
|
| 99 |
+
You also have the option to return ``NotImplemented``, in which case processing moves on
|
| 100 |
+
to the next back-end, which in this case, doesn't exist. The same applies to
|
| 101 |
+
``__ua_convert__``.
|
| 102 |
+
|
| 103 |
+
>>> be.__ua_function__ = lambda *a, **kw: NotImplemented
|
| 104 |
+
>>> with ua.set_backend(be):
|
| 105 |
+
... overridden_me(1, "2")
|
| 106 |
+
Traceback (most recent call last):
|
| 107 |
+
...
|
| 108 |
+
uarray.BackendNotImplementedError: ...
|
| 109 |
+
|
| 110 |
+
The last possibility is if we don't have ``__ua_convert__``, in which case the job is
|
| 111 |
+
left up to ``__ua_function__``, but putting things back into arrays after conversion
|
| 112 |
+
will not be possible.
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
from ._backend import *
|
| 116 |
+
__version__ = '0.8.8.dev0+aa94c5a4.scipy'
|
parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (4.68 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc
ADDED
|
Binary file (20.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py
ADDED
|
@@ -0,0 +1,704 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import typing
|
| 2 |
+
import types
|
| 3 |
+
import inspect
|
| 4 |
+
import functools
|
| 5 |
+
from . import _uarray
|
| 6 |
+
import copyreg
|
| 7 |
+
import pickle
|
| 8 |
+
import contextlib
|
| 9 |
+
|
| 10 |
+
from ._uarray import ( # type: ignore
|
| 11 |
+
BackendNotImplementedError,
|
| 12 |
+
_Function,
|
| 13 |
+
_SkipBackendContext,
|
| 14 |
+
_SetBackendContext,
|
| 15 |
+
_BackendState,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
"set_backend",
|
| 20 |
+
"set_global_backend",
|
| 21 |
+
"skip_backend",
|
| 22 |
+
"register_backend",
|
| 23 |
+
"determine_backend",
|
| 24 |
+
"determine_backend_multi",
|
| 25 |
+
"clear_backends",
|
| 26 |
+
"create_multimethod",
|
| 27 |
+
"generate_multimethod",
|
| 28 |
+
"_Function",
|
| 29 |
+
"BackendNotImplementedError",
|
| 30 |
+
"Dispatchable",
|
| 31 |
+
"wrap_single_convertor",
|
| 32 |
+
"wrap_single_convertor_instance",
|
| 33 |
+
"all_of_type",
|
| 34 |
+
"mark_as",
|
| 35 |
+
"set_state",
|
| 36 |
+
"get_state",
|
| 37 |
+
"reset_state",
|
| 38 |
+
"_BackendState",
|
| 39 |
+
"_SkipBackendContext",
|
| 40 |
+
"_SetBackendContext",
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
ArgumentExtractorType = typing.Callable[..., tuple["Dispatchable", ...]]
|
| 44 |
+
ArgumentReplacerType = typing.Callable[
|
| 45 |
+
[tuple, dict, tuple], tuple[tuple, dict]
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
def unpickle_function(mod_name, qname, self_):
|
| 49 |
+
import importlib
|
| 50 |
+
|
| 51 |
+
try:
|
| 52 |
+
module = importlib.import_module(mod_name)
|
| 53 |
+
qname = qname.split(".")
|
| 54 |
+
func = module
|
| 55 |
+
for q in qname:
|
| 56 |
+
func = getattr(func, q)
|
| 57 |
+
|
| 58 |
+
if self_ is not None:
|
| 59 |
+
func = types.MethodType(func, self_)
|
| 60 |
+
|
| 61 |
+
return func
|
| 62 |
+
except (ImportError, AttributeError) as e:
|
| 63 |
+
from pickle import UnpicklingError
|
| 64 |
+
|
| 65 |
+
raise UnpicklingError from e
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def pickle_function(func):
|
| 69 |
+
mod_name = getattr(func, "__module__", None)
|
| 70 |
+
qname = getattr(func, "__qualname__", None)
|
| 71 |
+
self_ = getattr(func, "__self__", None)
|
| 72 |
+
|
| 73 |
+
try:
|
| 74 |
+
test = unpickle_function(mod_name, qname, self_)
|
| 75 |
+
except pickle.UnpicklingError:
|
| 76 |
+
test = None
|
| 77 |
+
|
| 78 |
+
if test is not func:
|
| 79 |
+
raise pickle.PicklingError(
|
| 80 |
+
f"Can't pickle {func}: it's not the same object as {test}"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
return unpickle_function, (mod_name, qname, self_)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def pickle_state(state):
|
| 87 |
+
return _uarray._BackendState._unpickle, state._pickle()
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def pickle_set_backend_context(ctx):
|
| 91 |
+
return _SetBackendContext, ctx._pickle()
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def pickle_skip_backend_context(ctx):
|
| 95 |
+
return _SkipBackendContext, ctx._pickle()
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
copyreg.pickle(_Function, pickle_function)
|
| 99 |
+
copyreg.pickle(_uarray._BackendState, pickle_state)
|
| 100 |
+
copyreg.pickle(_SetBackendContext, pickle_set_backend_context)
|
| 101 |
+
copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def get_state():
|
| 105 |
+
"""
|
| 106 |
+
Returns an opaque object containing the current state of all the backends.
|
| 107 |
+
|
| 108 |
+
Can be used for synchronization between threads/processes.
|
| 109 |
+
|
| 110 |
+
See Also
|
| 111 |
+
--------
|
| 112 |
+
set_state
|
| 113 |
+
Sets the state returned by this function.
|
| 114 |
+
"""
|
| 115 |
+
return _uarray.get_state()
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@contextlib.contextmanager
|
| 119 |
+
def reset_state():
|
| 120 |
+
"""
|
| 121 |
+
Returns a context manager that resets all state once exited.
|
| 122 |
+
|
| 123 |
+
See Also
|
| 124 |
+
--------
|
| 125 |
+
set_state
|
| 126 |
+
Context manager that sets the backend state.
|
| 127 |
+
get_state
|
| 128 |
+
Gets a state to be set by this context manager.
|
| 129 |
+
"""
|
| 130 |
+
with set_state(get_state()):
|
| 131 |
+
yield
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@contextlib.contextmanager
|
| 135 |
+
def set_state(state):
|
| 136 |
+
"""
|
| 137 |
+
A context manager that sets the state of the backends to one returned by :obj:`get_state`.
|
| 138 |
+
|
| 139 |
+
See Also
|
| 140 |
+
--------
|
| 141 |
+
get_state
|
| 142 |
+
Gets a state to be set by this context manager.
|
| 143 |
+
""" # noqa: E501
|
| 144 |
+
old_state = get_state()
|
| 145 |
+
_uarray.set_state(state)
|
| 146 |
+
try:
|
| 147 |
+
yield
|
| 148 |
+
finally:
|
| 149 |
+
_uarray.set_state(old_state, True)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def create_multimethod(*args, **kwargs):
|
| 153 |
+
"""
|
| 154 |
+
Creates a decorator for generating multimethods.
|
| 155 |
+
|
| 156 |
+
This function creates a decorator that can be used with an argument
|
| 157 |
+
extractor in order to generate a multimethod. Other than for the
|
| 158 |
+
argument extractor, all arguments are passed on to
|
| 159 |
+
:obj:`generate_multimethod`.
|
| 160 |
+
|
| 161 |
+
See Also
|
| 162 |
+
--------
|
| 163 |
+
generate_multimethod
|
| 164 |
+
Generates a multimethod.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
def wrapper(a):
|
| 168 |
+
return generate_multimethod(a, *args, **kwargs)
|
| 169 |
+
|
| 170 |
+
return wrapper
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def generate_multimethod(
|
| 174 |
+
argument_extractor: ArgumentExtractorType,
|
| 175 |
+
argument_replacer: ArgumentReplacerType,
|
| 176 |
+
domain: str,
|
| 177 |
+
default: typing.Optional[typing.Callable] = None,
|
| 178 |
+
):
|
| 179 |
+
"""
|
| 180 |
+
Generates a multimethod.
|
| 181 |
+
|
| 182 |
+
Parameters
|
| 183 |
+
----------
|
| 184 |
+
argument_extractor : ArgumentExtractorType
|
| 185 |
+
A callable which extracts the dispatchable arguments. Extracted arguments
|
| 186 |
+
should be marked by the :obj:`Dispatchable` class. It has the same signature
|
| 187 |
+
as the desired multimethod.
|
| 188 |
+
argument_replacer : ArgumentReplacerType
|
| 189 |
+
A callable with the signature (args, kwargs, dispatchables), which should also
|
| 190 |
+
return an (args, kwargs) pair with the dispatchables replaced inside the
|
| 191 |
+
args/kwargs.
|
| 192 |
+
domain : str
|
| 193 |
+
A string value indicating the domain of this multimethod.
|
| 194 |
+
default: Optional[Callable], optional
|
| 195 |
+
The default implementation of this multimethod, where ``None`` (the default)
|
| 196 |
+
specifies there is no default implementation.
|
| 197 |
+
|
| 198 |
+
Examples
|
| 199 |
+
--------
|
| 200 |
+
In this example, ``a`` is to be dispatched over, so we return it, while marking it
|
| 201 |
+
as an ``int``.
|
| 202 |
+
The trailing comma is needed because the args have to be returned as an iterable.
|
| 203 |
+
|
| 204 |
+
>>> def override_me(a, b):
|
| 205 |
+
... return Dispatchable(a, int),
|
| 206 |
+
|
| 207 |
+
Next, we define the argument replacer that replaces the dispatchables inside
|
| 208 |
+
args/kwargs with the supplied ones.
|
| 209 |
+
|
| 210 |
+
>>> def override_replacer(args, kwargs, dispatchables):
|
| 211 |
+
... return (dispatchables[0], args[1]), {}
|
| 212 |
+
|
| 213 |
+
Next, we define the multimethod.
|
| 214 |
+
|
| 215 |
+
>>> overridden_me = generate_multimethod(
|
| 216 |
+
... override_me, override_replacer, "ua_examples"
|
| 217 |
+
... )
|
| 218 |
+
|
| 219 |
+
Notice that there's no default implementation, unless you supply one.
|
| 220 |
+
|
| 221 |
+
>>> overridden_me(1, "a")
|
| 222 |
+
Traceback (most recent call last):
|
| 223 |
+
...
|
| 224 |
+
uarray.BackendNotImplementedError: ...
|
| 225 |
+
|
| 226 |
+
>>> overridden_me2 = generate_multimethod(
|
| 227 |
+
... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y)
|
| 228 |
+
... )
|
| 229 |
+
>>> overridden_me2(1, "a")
|
| 230 |
+
(1, 'a')
|
| 231 |
+
|
| 232 |
+
See Also
|
| 233 |
+
--------
|
| 234 |
+
uarray
|
| 235 |
+
See the module documentation for how to override the method by creating
|
| 236 |
+
backends.
|
| 237 |
+
"""
|
| 238 |
+
kw_defaults, arg_defaults, opts = get_defaults(argument_extractor)
|
| 239 |
+
ua_func = _Function(
|
| 240 |
+
argument_extractor,
|
| 241 |
+
argument_replacer,
|
| 242 |
+
domain,
|
| 243 |
+
arg_defaults,
|
| 244 |
+
kw_defaults,
|
| 245 |
+
default,
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
return functools.update_wrapper(ua_func, argument_extractor)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def set_backend(backend, coerce=False, only=False):
|
| 252 |
+
"""
|
| 253 |
+
A context manager that sets the preferred backend.
|
| 254 |
+
|
| 255 |
+
Parameters
|
| 256 |
+
----------
|
| 257 |
+
backend
|
| 258 |
+
The backend to set.
|
| 259 |
+
coerce
|
| 260 |
+
Whether or not to coerce to a specific backend's types. Implies ``only``.
|
| 261 |
+
only
|
| 262 |
+
Whether or not this should be the last backend to try.
|
| 263 |
+
|
| 264 |
+
See Also
|
| 265 |
+
--------
|
| 266 |
+
skip_backend: A context manager that allows skipping of backends.
|
| 267 |
+
set_global_backend: Set a single, global backend for a domain.
|
| 268 |
+
"""
|
| 269 |
+
try:
|
| 270 |
+
return backend.__ua_cache__["set", coerce, only]
|
| 271 |
+
except AttributeError:
|
| 272 |
+
backend.__ua_cache__ = {}
|
| 273 |
+
except KeyError:
|
| 274 |
+
pass
|
| 275 |
+
|
| 276 |
+
ctx = _SetBackendContext(backend, coerce, only)
|
| 277 |
+
backend.__ua_cache__["set", coerce, only] = ctx
|
| 278 |
+
return ctx
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def skip_backend(backend):
|
| 282 |
+
"""
|
| 283 |
+
A context manager that allows one to skip a given backend from processing
|
| 284 |
+
entirely. This allows one to use another backend's code in a library that
|
| 285 |
+
is also a consumer of the same backend.
|
| 286 |
+
|
| 287 |
+
Parameters
|
| 288 |
+
----------
|
| 289 |
+
backend
|
| 290 |
+
The backend to skip.
|
| 291 |
+
|
| 292 |
+
See Also
|
| 293 |
+
--------
|
| 294 |
+
set_backend: A context manager that allows setting of backends.
|
| 295 |
+
set_global_backend: Set a single, global backend for a domain.
|
| 296 |
+
"""
|
| 297 |
+
try:
|
| 298 |
+
return backend.__ua_cache__["skip"]
|
| 299 |
+
except AttributeError:
|
| 300 |
+
backend.__ua_cache__ = {}
|
| 301 |
+
except KeyError:
|
| 302 |
+
pass
|
| 303 |
+
|
| 304 |
+
ctx = _SkipBackendContext(backend)
|
| 305 |
+
backend.__ua_cache__["skip"] = ctx
|
| 306 |
+
return ctx
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def get_defaults(f):
|
| 310 |
+
sig = inspect.signature(f)
|
| 311 |
+
kw_defaults = {}
|
| 312 |
+
arg_defaults = []
|
| 313 |
+
opts = set()
|
| 314 |
+
for k, v in sig.parameters.items():
|
| 315 |
+
if v.default is not inspect.Parameter.empty:
|
| 316 |
+
kw_defaults[k] = v.default
|
| 317 |
+
if v.kind in (
|
| 318 |
+
inspect.Parameter.POSITIONAL_ONLY,
|
| 319 |
+
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
| 320 |
+
):
|
| 321 |
+
arg_defaults.append(v.default)
|
| 322 |
+
opts.add(k)
|
| 323 |
+
|
| 324 |
+
return kw_defaults, tuple(arg_defaults), opts
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def set_global_backend(backend, coerce=False, only=False, *, try_last=False):
|
| 328 |
+
"""
|
| 329 |
+
This utility method replaces the default backend for permanent use. It
|
| 330 |
+
will be tried in the list of backends automatically, unless the
|
| 331 |
+
``only`` flag is set on a backend. This will be the first tried
|
| 332 |
+
backend outside the :obj:`set_backend` context manager.
|
| 333 |
+
|
| 334 |
+
Note that this method is not thread-safe.
|
| 335 |
+
|
| 336 |
+
.. warning::
|
| 337 |
+
We caution library authors against using this function in
|
| 338 |
+
their code. We do *not* support this use-case. This function
|
| 339 |
+
is meant to be used only by users themselves, or by a reference
|
| 340 |
+
implementation, if one exists.
|
| 341 |
+
|
| 342 |
+
Parameters
|
| 343 |
+
----------
|
| 344 |
+
backend
|
| 345 |
+
The backend to register.
|
| 346 |
+
coerce : bool
|
| 347 |
+
Whether to coerce input types when trying this backend.
|
| 348 |
+
only : bool
|
| 349 |
+
If ``True``, no more backends will be tried if this fails.
|
| 350 |
+
Implied by ``coerce=True``.
|
| 351 |
+
try_last : bool
|
| 352 |
+
If ``True``, the global backend is tried after registered backends.
|
| 353 |
+
|
| 354 |
+
See Also
|
| 355 |
+
--------
|
| 356 |
+
set_backend: A context manager that allows setting of backends.
|
| 357 |
+
skip_backend: A context manager that allows skipping of backends.
|
| 358 |
+
"""
|
| 359 |
+
_uarray.set_global_backend(backend, coerce, only, try_last)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def register_backend(backend):
|
| 363 |
+
"""
|
| 364 |
+
This utility method sets registers backend for permanent use. It
|
| 365 |
+
will be tried in the list of backends automatically, unless the
|
| 366 |
+
``only`` flag is set on a backend.
|
| 367 |
+
|
| 368 |
+
Note that this method is not thread-safe.
|
| 369 |
+
|
| 370 |
+
Parameters
|
| 371 |
+
----------
|
| 372 |
+
backend
|
| 373 |
+
The backend to register.
|
| 374 |
+
"""
|
| 375 |
+
_uarray.register_backend(backend)
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def clear_backends(domain, registered=True, globals=False):
|
| 379 |
+
"""
|
| 380 |
+
This utility method clears registered backends.
|
| 381 |
+
|
| 382 |
+
.. warning::
|
| 383 |
+
We caution library authors against using this function in
|
| 384 |
+
their code. We do *not* support this use-case. This function
|
| 385 |
+
is meant to be used only by users themselves.
|
| 386 |
+
|
| 387 |
+
.. warning::
|
| 388 |
+
Do NOT use this method inside a multimethod call, or the
|
| 389 |
+
program is likely to crash.
|
| 390 |
+
|
| 391 |
+
Parameters
|
| 392 |
+
----------
|
| 393 |
+
domain : Optional[str]
|
| 394 |
+
The domain for which to de-register backends. ``None`` means
|
| 395 |
+
de-register for all domains.
|
| 396 |
+
registered : bool
|
| 397 |
+
Whether or not to clear registered backends. See :obj:`register_backend`.
|
| 398 |
+
globals : bool
|
| 399 |
+
Whether or not to clear global backends. See :obj:`set_global_backend`.
|
| 400 |
+
|
| 401 |
+
See Also
|
| 402 |
+
--------
|
| 403 |
+
register_backend : Register a backend globally.
|
| 404 |
+
set_global_backend : Set a global backend.
|
| 405 |
+
"""
|
| 406 |
+
_uarray.clear_backends(domain, registered, globals)
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
class Dispatchable:
|
| 410 |
+
"""
|
| 411 |
+
A utility class which marks an argument with a specific dispatch type.
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
Attributes
|
| 415 |
+
----------
|
| 416 |
+
value
|
| 417 |
+
The value of the Dispatchable.
|
| 418 |
+
|
| 419 |
+
type
|
| 420 |
+
The type of the Dispatchable.
|
| 421 |
+
|
| 422 |
+
Examples
|
| 423 |
+
--------
|
| 424 |
+
>>> x = Dispatchable(1, str)
|
| 425 |
+
>>> x
|
| 426 |
+
<Dispatchable: type=<class 'str'>, value=1>
|
| 427 |
+
|
| 428 |
+
See Also
|
| 429 |
+
--------
|
| 430 |
+
all_of_type
|
| 431 |
+
Marks all unmarked parameters of a function.
|
| 432 |
+
|
| 433 |
+
mark_as
|
| 434 |
+
Allows one to create a utility function to mark as a given type.
|
| 435 |
+
"""
|
| 436 |
+
|
| 437 |
+
def __init__(self, value, dispatch_type, coercible=True):
|
| 438 |
+
self.value = value
|
| 439 |
+
self.type = dispatch_type
|
| 440 |
+
self.coercible = coercible
|
| 441 |
+
|
| 442 |
+
def __getitem__(self, index):
|
| 443 |
+
return (self.type, self.value)[index]
|
| 444 |
+
|
| 445 |
+
def __str__(self):
|
| 446 |
+
return f"<{type(self).__name__}: type={self.type!r}, value={self.value!r}>"
|
| 447 |
+
|
| 448 |
+
__repr__ = __str__
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def mark_as(dispatch_type):
|
| 452 |
+
"""
|
| 453 |
+
Creates a utility function to mark something as a specific type.
|
| 454 |
+
|
| 455 |
+
Examples
|
| 456 |
+
--------
|
| 457 |
+
>>> mark_int = mark_as(int)
|
| 458 |
+
>>> mark_int(1)
|
| 459 |
+
<Dispatchable: type=<class 'int'>, value=1>
|
| 460 |
+
"""
|
| 461 |
+
return functools.partial(Dispatchable, dispatch_type=dispatch_type)
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def all_of_type(arg_type):
|
| 465 |
+
"""
|
| 466 |
+
Marks all unmarked arguments as a given type.
|
| 467 |
+
|
| 468 |
+
Examples
|
| 469 |
+
--------
|
| 470 |
+
>>> @all_of_type(str)
|
| 471 |
+
... def f(a, b):
|
| 472 |
+
... return a, Dispatchable(b, int)
|
| 473 |
+
>>> f('a', 1)
|
| 474 |
+
(<Dispatchable: type=<class 'str'>, value='a'>,
|
| 475 |
+
<Dispatchable: type=<class 'int'>, value=1>)
|
| 476 |
+
"""
|
| 477 |
+
|
| 478 |
+
def outer(func):
|
| 479 |
+
@functools.wraps(func)
|
| 480 |
+
def inner(*args, **kwargs):
|
| 481 |
+
extracted_args = func(*args, **kwargs)
|
| 482 |
+
return tuple(
|
| 483 |
+
Dispatchable(arg, arg_type)
|
| 484 |
+
if not isinstance(arg, Dispatchable)
|
| 485 |
+
else arg
|
| 486 |
+
for arg in extracted_args
|
| 487 |
+
)
|
| 488 |
+
|
| 489 |
+
return inner
|
| 490 |
+
|
| 491 |
+
return outer
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def wrap_single_convertor(convert_single):
|
| 495 |
+
"""
|
| 496 |
+
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
| 497 |
+
If any of them return ``NotImplemented``, the operation is assumed to be
|
| 498 |
+
undefined.
|
| 499 |
+
|
| 500 |
+
Accepts a signature of (value, type, coerce).
|
| 501 |
+
"""
|
| 502 |
+
|
| 503 |
+
@functools.wraps(convert_single)
|
| 504 |
+
def __ua_convert__(dispatchables, coerce):
|
| 505 |
+
converted = []
|
| 506 |
+
for d in dispatchables:
|
| 507 |
+
c = convert_single(d.value, d.type, coerce and d.coercible)
|
| 508 |
+
|
| 509 |
+
if c is NotImplemented:
|
| 510 |
+
return NotImplemented
|
| 511 |
+
|
| 512 |
+
converted.append(c)
|
| 513 |
+
|
| 514 |
+
return converted
|
| 515 |
+
|
| 516 |
+
return __ua_convert__
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
def wrap_single_convertor_instance(convert_single):
|
| 520 |
+
"""
|
| 521 |
+
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
| 522 |
+
If any of them return ``NotImplemented``, the operation is assumed to be
|
| 523 |
+
undefined.
|
| 524 |
+
|
| 525 |
+
Accepts a signature of (value, type, coerce).
|
| 526 |
+
"""
|
| 527 |
+
|
| 528 |
+
@functools.wraps(convert_single)
|
| 529 |
+
def __ua_convert__(self, dispatchables, coerce):
|
| 530 |
+
converted = []
|
| 531 |
+
for d in dispatchables:
|
| 532 |
+
c = convert_single(self, d.value, d.type, coerce and d.coercible)
|
| 533 |
+
|
| 534 |
+
if c is NotImplemented:
|
| 535 |
+
return NotImplemented
|
| 536 |
+
|
| 537 |
+
converted.append(c)
|
| 538 |
+
|
| 539 |
+
return converted
|
| 540 |
+
|
| 541 |
+
return __ua_convert__
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False):
|
| 545 |
+
"""Set the backend to the first active backend that supports ``value``
|
| 546 |
+
|
| 547 |
+
This is useful for functions that call multimethods without any dispatchable
|
| 548 |
+
arguments. You can use :func:`determine_backend` to ensure the same backend
|
| 549 |
+
is used everywhere in a block of multimethod calls.
|
| 550 |
+
|
| 551 |
+
Parameters
|
| 552 |
+
----------
|
| 553 |
+
value
|
| 554 |
+
The value being tested
|
| 555 |
+
dispatch_type
|
| 556 |
+
The dispatch type associated with ``value``, aka
|
| 557 |
+
":ref:`marking <MarkingGlossary>`".
|
| 558 |
+
domain: string
|
| 559 |
+
The domain to query for backends and set.
|
| 560 |
+
coerce: bool
|
| 561 |
+
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
| 562 |
+
only: bool
|
| 563 |
+
Whether or not this should be the last backend to try.
|
| 564 |
+
|
| 565 |
+
See Also
|
| 566 |
+
--------
|
| 567 |
+
set_backend: For when you know which backend to set
|
| 568 |
+
|
| 569 |
+
Notes
|
| 570 |
+
-----
|
| 571 |
+
|
| 572 |
+
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
| 573 |
+
supporting the type must return ``NotImplemented`` from their
|
| 574 |
+
``__ua_convert__`` if they don't support input of that type.
|
| 575 |
+
|
| 576 |
+
Examples
|
| 577 |
+
--------
|
| 578 |
+
|
| 579 |
+
Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting
|
| 580 |
+
different types, ``TypeA`` and ``TypeB``. Neither supporting the other type:
|
| 581 |
+
|
| 582 |
+
>>> with ua.set_backend(ex.BackendA):
|
| 583 |
+
... ex.call_multimethod(ex.TypeB(), ex.TypeB())
|
| 584 |
+
Traceback (most recent call last):
|
| 585 |
+
...
|
| 586 |
+
uarray.BackendNotImplementedError: ...
|
| 587 |
+
|
| 588 |
+
Now consider a multimethod that creates a new object of ``TypeA``, or
|
| 589 |
+
``TypeB`` depending on the active backend.
|
| 590 |
+
|
| 591 |
+
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
| 592 |
+
... res = ex.creation_multimethod()
|
| 593 |
+
... ex.call_multimethod(res, ex.TypeA())
|
| 594 |
+
Traceback (most recent call last):
|
| 595 |
+
...
|
| 596 |
+
uarray.BackendNotImplementedError: ...
|
| 597 |
+
|
| 598 |
+
``res`` is an object of ``TypeB`` because ``BackendB`` is set in the
|
| 599 |
+
innermost with statement. So, ``call_multimethod`` fails since the types
|
| 600 |
+
don't match.
|
| 601 |
+
|
| 602 |
+
Instead, we need to first find a backend suitable for all of our objects.
|
| 603 |
+
|
| 604 |
+
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
| 605 |
+
... x = ex.TypeA()
|
| 606 |
+
... with ua.determine_backend(x, "mark", domain="ua_examples"):
|
| 607 |
+
... res = ex.creation_multimethod()
|
| 608 |
+
... ex.call_multimethod(res, x)
|
| 609 |
+
TypeA
|
| 610 |
+
|
| 611 |
+
"""
|
| 612 |
+
dispatchables = (Dispatchable(value, dispatch_type, coerce),)
|
| 613 |
+
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
| 614 |
+
|
| 615 |
+
return set_backend(backend, coerce=coerce, only=only)
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
def determine_backend_multi(
|
| 619 |
+
dispatchables, *, domain, only=True, coerce=False, **kwargs
|
| 620 |
+
):
|
| 621 |
+
"""Set a backend supporting all ``dispatchables``
|
| 622 |
+
|
| 623 |
+
This is useful for functions that call multimethods without any dispatchable
|
| 624 |
+
arguments. You can use :func:`determine_backend_multi` to ensure the same
|
| 625 |
+
backend is used everywhere in a block of multimethod calls involving
|
| 626 |
+
multiple arrays.
|
| 627 |
+
|
| 628 |
+
Parameters
|
| 629 |
+
----------
|
| 630 |
+
dispatchables: Sequence[Union[uarray.Dispatchable, Any]]
|
| 631 |
+
The dispatchables that must be supported
|
| 632 |
+
domain: string
|
| 633 |
+
The domain to query for backends and set.
|
| 634 |
+
coerce: bool
|
| 635 |
+
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
| 636 |
+
only: bool
|
| 637 |
+
Whether or not this should be the last backend to try.
|
| 638 |
+
dispatch_type: Optional[Any]
|
| 639 |
+
The default dispatch type associated with ``dispatchables``, aka
|
| 640 |
+
":ref:`marking <MarkingGlossary>`".
|
| 641 |
+
|
| 642 |
+
See Also
|
| 643 |
+
--------
|
| 644 |
+
determine_backend: For a single dispatch value
|
| 645 |
+
set_backend: For when you know which backend to set
|
| 646 |
+
|
| 647 |
+
Notes
|
| 648 |
+
-----
|
| 649 |
+
|
| 650 |
+
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
| 651 |
+
supporting the type must return ``NotImplemented`` from their
|
| 652 |
+
``__ua_convert__`` if they don't support input of that type.
|
| 653 |
+
|
| 654 |
+
Examples
|
| 655 |
+
--------
|
| 656 |
+
|
| 657 |
+
:func:`determine_backend` allows the backend to be set from a single
|
| 658 |
+
object. :func:`determine_backend_multi` allows multiple objects to be
|
| 659 |
+
checked simultaneously for support in the backend. Suppose we have a
|
| 660 |
+
``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call,
|
| 661 |
+
and a ``BackendBC`` that doesn't support ``TypeA``.
|
| 662 |
+
|
| 663 |
+
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
| 664 |
+
... a, b = ex.TypeA(), ex.TypeB()
|
| 665 |
+
... with ua.determine_backend_multi(
|
| 666 |
+
... [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")],
|
| 667 |
+
... domain="ua_examples"
|
| 668 |
+
... ):
|
| 669 |
+
... res = ex.creation_multimethod()
|
| 670 |
+
... ex.call_multimethod(res, a, b)
|
| 671 |
+
TypeA
|
| 672 |
+
|
| 673 |
+
This won't call ``BackendBC`` because it doesn't support ``TypeA``.
|
| 674 |
+
|
| 675 |
+
We can also use leave out the ``ua.Dispatchable`` if we specify the
|
| 676 |
+
default ``dispatch_type`` for the ``dispatchables`` argument.
|
| 677 |
+
|
| 678 |
+
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
| 679 |
+
... a, b = ex.TypeA(), ex.TypeB()
|
| 680 |
+
... with ua.determine_backend_multi(
|
| 681 |
+
... [a, b], dispatch_type="mark", domain="ua_examples"
|
| 682 |
+
... ):
|
| 683 |
+
... res = ex.creation_multimethod()
|
| 684 |
+
... ex.call_multimethod(res, a, b)
|
| 685 |
+
TypeA
|
| 686 |
+
|
| 687 |
+
"""
|
| 688 |
+
if "dispatch_type" in kwargs:
|
| 689 |
+
disp_type = kwargs.pop("dispatch_type")
|
| 690 |
+
dispatchables = tuple(
|
| 691 |
+
d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type)
|
| 692 |
+
for d in dispatchables
|
| 693 |
+
)
|
| 694 |
+
else:
|
| 695 |
+
dispatchables = tuple(dispatchables)
|
| 696 |
+
if not all(isinstance(d, Dispatchable) for d in dispatchables):
|
| 697 |
+
raise TypeError("dispatchables must be instances of uarray.Dispatchable")
|
| 698 |
+
|
| 699 |
+
if len(kwargs) != 0:
|
| 700 |
+
raise TypeError(f"Received unexpected keyword arguments: {kwargs}")
|
| 701 |
+
|
| 702 |
+
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
| 703 |
+
|
| 704 |
+
return set_backend(backend, coerce=coerce, only=only)
|
parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc
ADDED
|
Binary file (3.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc
ADDED
|
Binary file (1.75 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc
ADDED
|
Binary file (14.1 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc
ADDED
|
Binary file (4.25 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc
ADDED
|
Binary file (615 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc
ADDED
|
Binary file (9.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc
ADDED
|
Binary file (1.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/stats/__init__.py
ADDED
|
@@ -0,0 +1,649 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
.. _statsrefmanual:
|
| 3 |
+
|
| 4 |
+
==========================================
|
| 5 |
+
Statistical functions (:mod:`scipy.stats`)
|
| 6 |
+
==========================================
|
| 7 |
+
|
| 8 |
+
.. currentmodule:: scipy.stats
|
| 9 |
+
|
| 10 |
+
This module contains a large number of probability distributions,
|
| 11 |
+
summary and frequency statistics, correlation functions and statistical
|
| 12 |
+
tests, masked statistics, kernel density estimation, quasi-Monte Carlo
|
| 13 |
+
functionality, and more.
|
| 14 |
+
|
| 15 |
+
Statistics is a very large area, and there are topics that are out of scope
|
| 16 |
+
for SciPy and are covered by other packages. Some of the most important ones
|
| 17 |
+
are:
|
| 18 |
+
|
| 19 |
+
- `statsmodels <https://www.statsmodels.org/stable/index.html>`__:
|
| 20 |
+
regression, linear models, time series analysis, extensions to topics
|
| 21 |
+
also covered by ``scipy.stats``.
|
| 22 |
+
- `Pandas <https://pandas.pydata.org/>`__: tabular data, time series
|
| 23 |
+
functionality, interfaces to other statistical languages.
|
| 24 |
+
- `PyMC <https://docs.pymc.io/>`__: Bayesian statistical
|
| 25 |
+
modeling, probabilistic machine learning.
|
| 26 |
+
- `scikit-learn <https://scikit-learn.org/>`__: classification, regression,
|
| 27 |
+
model selection.
|
| 28 |
+
- `Seaborn <https://seaborn.pydata.org/>`__: statistical data visualization.
|
| 29 |
+
- `rpy2 <https://rpy2.github.io/>`__: Python to R bridge.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
Probability distributions
|
| 33 |
+
=========================
|
| 34 |
+
|
| 35 |
+
Each univariate distribution is an instance of a subclass of `rv_continuous`
|
| 36 |
+
(`rv_discrete` for discrete distributions):
|
| 37 |
+
|
| 38 |
+
.. autosummary::
|
| 39 |
+
:toctree: generated/
|
| 40 |
+
|
| 41 |
+
rv_continuous
|
| 42 |
+
rv_discrete
|
| 43 |
+
rv_histogram
|
| 44 |
+
|
| 45 |
+
Continuous distributions
|
| 46 |
+
------------------------
|
| 47 |
+
|
| 48 |
+
.. autosummary::
|
| 49 |
+
:toctree: generated/
|
| 50 |
+
|
| 51 |
+
alpha -- Alpha
|
| 52 |
+
anglit -- Anglit
|
| 53 |
+
arcsine -- Arcsine
|
| 54 |
+
argus -- Argus
|
| 55 |
+
beta -- Beta
|
| 56 |
+
betaprime -- Beta Prime
|
| 57 |
+
bradford -- Bradford
|
| 58 |
+
burr -- Burr (Type III)
|
| 59 |
+
burr12 -- Burr (Type XII)
|
| 60 |
+
cauchy -- Cauchy
|
| 61 |
+
chi -- Chi
|
| 62 |
+
chi2 -- Chi-squared
|
| 63 |
+
cosine -- Cosine
|
| 64 |
+
crystalball -- Crystalball
|
| 65 |
+
dgamma -- Double Gamma
|
| 66 |
+
dweibull -- Double Weibull
|
| 67 |
+
erlang -- Erlang
|
| 68 |
+
expon -- Exponential
|
| 69 |
+
exponnorm -- Exponentially Modified Normal
|
| 70 |
+
exponweib -- Exponentiated Weibull
|
| 71 |
+
exponpow -- Exponential Power
|
| 72 |
+
f -- F (Snecdor F)
|
| 73 |
+
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
|
| 74 |
+
fisk -- Fisk
|
| 75 |
+
foldcauchy -- Folded Cauchy
|
| 76 |
+
foldnorm -- Folded Normal
|
| 77 |
+
genlogistic -- Generalized Logistic
|
| 78 |
+
gennorm -- Generalized normal
|
| 79 |
+
genpareto -- Generalized Pareto
|
| 80 |
+
genexpon -- Generalized Exponential
|
| 81 |
+
genextreme -- Generalized Extreme Value
|
| 82 |
+
gausshyper -- Gauss Hypergeometric
|
| 83 |
+
gamma -- Gamma
|
| 84 |
+
gengamma -- Generalized gamma
|
| 85 |
+
genhalflogistic -- Generalized Half Logistic
|
| 86 |
+
genhyperbolic -- Generalized Hyperbolic
|
| 87 |
+
geninvgauss -- Generalized Inverse Gaussian
|
| 88 |
+
gibrat -- Gibrat
|
| 89 |
+
gompertz -- Gompertz (Truncated Gumbel)
|
| 90 |
+
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
|
| 91 |
+
gumbel_l -- Left Sided Gumbel, etc.
|
| 92 |
+
halfcauchy -- Half Cauchy
|
| 93 |
+
halflogistic -- Half Logistic
|
| 94 |
+
halfnorm -- Half Normal
|
| 95 |
+
halfgennorm -- Generalized Half Normal
|
| 96 |
+
hypsecant -- Hyperbolic Secant
|
| 97 |
+
invgamma -- Inverse Gamma
|
| 98 |
+
invgauss -- Inverse Gaussian
|
| 99 |
+
invweibull -- Inverse Weibull
|
| 100 |
+
irwinhall -- Irwin-Hall
|
| 101 |
+
jf_skew_t -- Jones and Faddy Skew-T
|
| 102 |
+
johnsonsb -- Johnson SB
|
| 103 |
+
johnsonsu -- Johnson SU
|
| 104 |
+
kappa4 -- Kappa 4 parameter
|
| 105 |
+
kappa3 -- Kappa 3 parameter
|
| 106 |
+
ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic
|
| 107 |
+
kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic
|
| 108 |
+
kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
|
| 109 |
+
laplace -- Laplace
|
| 110 |
+
laplace_asymmetric -- Asymmetric Laplace
|
| 111 |
+
levy -- Levy
|
| 112 |
+
levy_l
|
| 113 |
+
levy_stable
|
| 114 |
+
logistic -- Logistic
|
| 115 |
+
loggamma -- Log-Gamma
|
| 116 |
+
loglaplace -- Log-Laplace (Log Double Exponential)
|
| 117 |
+
lognorm -- Log-Normal
|
| 118 |
+
loguniform -- Log-Uniform
|
| 119 |
+
lomax -- Lomax (Pareto of the second kind)
|
| 120 |
+
maxwell -- Maxwell
|
| 121 |
+
mielke -- Mielke's Beta-Kappa
|
| 122 |
+
moyal -- Moyal
|
| 123 |
+
nakagami -- Nakagami
|
| 124 |
+
ncx2 -- Non-central chi-squared
|
| 125 |
+
ncf -- Non-central F
|
| 126 |
+
nct -- Non-central Student's T
|
| 127 |
+
norm -- Normal (Gaussian)
|
| 128 |
+
norminvgauss -- Normal Inverse Gaussian
|
| 129 |
+
pareto -- Pareto
|
| 130 |
+
pearson3 -- Pearson type III
|
| 131 |
+
powerlaw -- Power-function
|
| 132 |
+
powerlognorm -- Power log normal
|
| 133 |
+
powernorm -- Power normal
|
| 134 |
+
rdist -- R-distribution
|
| 135 |
+
rayleigh -- Rayleigh
|
| 136 |
+
rel_breitwigner -- Relativistic Breit-Wigner
|
| 137 |
+
rice -- Rice
|
| 138 |
+
recipinvgauss -- Reciprocal Inverse Gaussian
|
| 139 |
+
semicircular -- Semicircular
|
| 140 |
+
skewcauchy -- Skew Cauchy
|
| 141 |
+
skewnorm -- Skew normal
|
| 142 |
+
studentized_range -- Studentized Range
|
| 143 |
+
t -- Student's T
|
| 144 |
+
trapezoid -- Trapezoidal
|
| 145 |
+
triang -- Triangular
|
| 146 |
+
truncexpon -- Truncated Exponential
|
| 147 |
+
truncnorm -- Truncated Normal
|
| 148 |
+
truncpareto -- Truncated Pareto
|
| 149 |
+
truncweibull_min -- Truncated minimum Weibull distribution
|
| 150 |
+
tukeylambda -- Tukey-Lambda
|
| 151 |
+
uniform -- Uniform
|
| 152 |
+
vonmises -- Von-Mises (Circular)
|
| 153 |
+
vonmises_line -- Von-Mises (Line)
|
| 154 |
+
wald -- Wald
|
| 155 |
+
weibull_min -- Minimum Weibull (see Frechet)
|
| 156 |
+
weibull_max -- Maximum Weibull (see Frechet)
|
| 157 |
+
wrapcauchy -- Wrapped Cauchy
|
| 158 |
+
|
| 159 |
+
The ``fit`` method of the univariate continuous distributions uses
|
| 160 |
+
maximum likelihood estimation to fit the distribution to a data set.
|
| 161 |
+
The ``fit`` method can accept regular data or *censored data*.
|
| 162 |
+
Censored data is represented with instances of the `CensoredData`
|
| 163 |
+
class.
|
| 164 |
+
|
| 165 |
+
.. autosummary::
|
| 166 |
+
:toctree: generated/
|
| 167 |
+
|
| 168 |
+
CensoredData
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
Multivariate distributions
|
| 172 |
+
--------------------------
|
| 173 |
+
|
| 174 |
+
.. autosummary::
|
| 175 |
+
:toctree: generated/
|
| 176 |
+
|
| 177 |
+
multivariate_normal -- Multivariate normal distribution
|
| 178 |
+
matrix_normal -- Matrix normal distribution
|
| 179 |
+
dirichlet -- Dirichlet
|
| 180 |
+
dirichlet_multinomial -- Dirichlet multinomial distribution
|
| 181 |
+
wishart -- Wishart
|
| 182 |
+
invwishart -- Inverse Wishart
|
| 183 |
+
multinomial -- Multinomial distribution
|
| 184 |
+
special_ortho_group -- SO(N) group
|
| 185 |
+
ortho_group -- O(N) group
|
| 186 |
+
unitary_group -- U(N) group
|
| 187 |
+
random_correlation -- random correlation matrices
|
| 188 |
+
multivariate_t -- Multivariate t-distribution
|
| 189 |
+
multivariate_hypergeom -- Multivariate hypergeometric distribution
|
| 190 |
+
random_table -- Distribution of random tables with given marginals
|
| 191 |
+
uniform_direction -- Uniform distribution on S(N-1)
|
| 192 |
+
vonmises_fisher -- Von Mises-Fisher distribution
|
| 193 |
+
|
| 194 |
+
`scipy.stats.multivariate_normal` methods accept instances
|
| 195 |
+
of the following class to represent the covariance.
|
| 196 |
+
|
| 197 |
+
.. autosummary::
|
| 198 |
+
:toctree: generated/
|
| 199 |
+
|
| 200 |
+
Covariance -- Representation of a covariance matrix
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
Discrete distributions
|
| 204 |
+
----------------------
|
| 205 |
+
|
| 206 |
+
.. autosummary::
|
| 207 |
+
:toctree: generated/
|
| 208 |
+
|
| 209 |
+
bernoulli -- Bernoulli
|
| 210 |
+
betabinom -- Beta-Binomial
|
| 211 |
+
betanbinom -- Beta-Negative Binomial
|
| 212 |
+
binom -- Binomial
|
| 213 |
+
boltzmann -- Boltzmann (Truncated Discrete Exponential)
|
| 214 |
+
dlaplace -- Discrete Laplacian
|
| 215 |
+
geom -- Geometric
|
| 216 |
+
hypergeom -- Hypergeometric
|
| 217 |
+
logser -- Logarithmic (Log-Series, Series)
|
| 218 |
+
nbinom -- Negative Binomial
|
| 219 |
+
nchypergeom_fisher -- Fisher's Noncentral Hypergeometric
|
| 220 |
+
nchypergeom_wallenius -- Wallenius's Noncentral Hypergeometric
|
| 221 |
+
nhypergeom -- Negative Hypergeometric
|
| 222 |
+
planck -- Planck (Discrete Exponential)
|
| 223 |
+
poisson -- Poisson
|
| 224 |
+
randint -- Discrete Uniform
|
| 225 |
+
skellam -- Skellam
|
| 226 |
+
yulesimon -- Yule-Simon
|
| 227 |
+
zipf -- Zipf (Zeta)
|
| 228 |
+
zipfian -- Zipfian
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
An overview of statistical functions is given below. Many of these functions
|
| 232 |
+
have a similar version in `scipy.stats.mstats` which work for masked arrays.
|
| 233 |
+
|
| 234 |
+
Summary statistics
|
| 235 |
+
==================
|
| 236 |
+
|
| 237 |
+
.. autosummary::
|
| 238 |
+
:toctree: generated/
|
| 239 |
+
|
| 240 |
+
describe -- Descriptive statistics
|
| 241 |
+
gmean -- Geometric mean
|
| 242 |
+
hmean -- Harmonic mean
|
| 243 |
+
pmean -- Power mean
|
| 244 |
+
kurtosis -- Fisher or Pearson kurtosis
|
| 245 |
+
mode -- Modal value
|
| 246 |
+
moment -- Central moment
|
| 247 |
+
expectile -- Expectile
|
| 248 |
+
skew -- Skewness
|
| 249 |
+
kstat --
|
| 250 |
+
kstatvar --
|
| 251 |
+
tmean -- Truncated arithmetic mean
|
| 252 |
+
tvar -- Truncated variance
|
| 253 |
+
tmin --
|
| 254 |
+
tmax --
|
| 255 |
+
tstd --
|
| 256 |
+
tsem --
|
| 257 |
+
variation -- Coefficient of variation
|
| 258 |
+
find_repeats
|
| 259 |
+
rankdata
|
| 260 |
+
tiecorrect
|
| 261 |
+
trim_mean
|
| 262 |
+
gstd -- Geometric Standard Deviation
|
| 263 |
+
iqr
|
| 264 |
+
sem
|
| 265 |
+
bayes_mvs
|
| 266 |
+
mvsdist
|
| 267 |
+
entropy
|
| 268 |
+
differential_entropy
|
| 269 |
+
median_abs_deviation
|
| 270 |
+
|
| 271 |
+
Frequency statistics
|
| 272 |
+
====================
|
| 273 |
+
|
| 274 |
+
.. autosummary::
|
| 275 |
+
:toctree: generated/
|
| 276 |
+
|
| 277 |
+
cumfreq
|
| 278 |
+
percentileofscore
|
| 279 |
+
scoreatpercentile
|
| 280 |
+
relfreq
|
| 281 |
+
|
| 282 |
+
.. autosummary::
|
| 283 |
+
:toctree: generated/
|
| 284 |
+
|
| 285 |
+
binned_statistic -- Compute a binned statistic for a set of data.
|
| 286 |
+
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
|
| 287 |
+
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
|
| 288 |
+
|
| 289 |
+
.. _hypotests:
|
| 290 |
+
|
| 291 |
+
Hypothesis Tests and related functions
|
| 292 |
+
======================================
|
| 293 |
+
SciPy has many functions for performing hypothesis tests that return a
|
| 294 |
+
test statistic and a p-value, and several of them return confidence intervals
|
| 295 |
+
and/or other related information.
|
| 296 |
+
|
| 297 |
+
The headings below are based on common uses of the functions within, but due to
|
| 298 |
+
the wide variety of statistical procedures, any attempt at coarse-grained
|
| 299 |
+
categorization will be imperfect. Also, note that tests within the same heading
|
| 300 |
+
are not interchangeable in general (e.g. many have different distributional
|
| 301 |
+
assumptions).
|
| 302 |
+
|
| 303 |
+
One Sample Tests / Paired Sample Tests
|
| 304 |
+
--------------------------------------
|
| 305 |
+
One sample tests are typically used to assess whether a single sample was
|
| 306 |
+
drawn from a specified distribution or a distribution with specified properties
|
| 307 |
+
(e.g. zero mean).
|
| 308 |
+
|
| 309 |
+
.. autosummary::
|
| 310 |
+
:toctree: generated/
|
| 311 |
+
|
| 312 |
+
ttest_1samp
|
| 313 |
+
binomtest
|
| 314 |
+
quantile_test
|
| 315 |
+
skewtest
|
| 316 |
+
kurtosistest
|
| 317 |
+
normaltest
|
| 318 |
+
jarque_bera
|
| 319 |
+
shapiro
|
| 320 |
+
anderson
|
| 321 |
+
cramervonmises
|
| 322 |
+
ks_1samp
|
| 323 |
+
goodness_of_fit
|
| 324 |
+
chisquare
|
| 325 |
+
power_divergence
|
| 326 |
+
|
| 327 |
+
Paired sample tests are often used to assess whether two samples were drawn
|
| 328 |
+
from the same distribution; they differ from the independent sample tests below
|
| 329 |
+
in that each observation in one sample is treated as paired with a
|
| 330 |
+
closely-related observation in the other sample (e.g. when environmental
|
| 331 |
+
factors are controlled between observations within a pair but not among pairs).
|
| 332 |
+
They can also be interpreted or used as one-sample tests (e.g. tests on the
|
| 333 |
+
mean or median of *differences* between paired observations).
|
| 334 |
+
|
| 335 |
+
.. autosummary::
|
| 336 |
+
:toctree: generated/
|
| 337 |
+
|
| 338 |
+
ttest_rel
|
| 339 |
+
wilcoxon
|
| 340 |
+
|
| 341 |
+
Association/Correlation Tests
|
| 342 |
+
-----------------------------
|
| 343 |
+
|
| 344 |
+
These tests are often used to assess whether there is a relationship (e.g.
|
| 345 |
+
linear) between paired observations in multiple samples or among the
|
| 346 |
+
coordinates of multivariate observations.
|
| 347 |
+
|
| 348 |
+
.. autosummary::
|
| 349 |
+
:toctree: generated/
|
| 350 |
+
|
| 351 |
+
linregress
|
| 352 |
+
pearsonr
|
| 353 |
+
spearmanr
|
| 354 |
+
pointbiserialr
|
| 355 |
+
kendalltau
|
| 356 |
+
weightedtau
|
| 357 |
+
somersd
|
| 358 |
+
siegelslopes
|
| 359 |
+
theilslopes
|
| 360 |
+
page_trend_test
|
| 361 |
+
multiscale_graphcorr
|
| 362 |
+
|
| 363 |
+
These association tests and are to work with samples in the form of contingency
|
| 364 |
+
tables. Supporting functions are available in `scipy.stats.contingency`.
|
| 365 |
+
|
| 366 |
+
.. autosummary::
|
| 367 |
+
:toctree: generated/
|
| 368 |
+
|
| 369 |
+
chi2_contingency
|
| 370 |
+
fisher_exact
|
| 371 |
+
barnard_exact
|
| 372 |
+
boschloo_exact
|
| 373 |
+
|
| 374 |
+
Independent Sample Tests
|
| 375 |
+
------------------------
|
| 376 |
+
Independent sample tests are typically used to assess whether multiple samples
|
| 377 |
+
were independently drawn from the same distribution or different distributions
|
| 378 |
+
with a shared property (e.g. equal means).
|
| 379 |
+
|
| 380 |
+
Some tests are specifically for comparing two samples.
|
| 381 |
+
|
| 382 |
+
.. autosummary::
|
| 383 |
+
:toctree: generated/
|
| 384 |
+
|
| 385 |
+
ttest_ind_from_stats
|
| 386 |
+
poisson_means_test
|
| 387 |
+
ttest_ind
|
| 388 |
+
mannwhitneyu
|
| 389 |
+
bws_test
|
| 390 |
+
ranksums
|
| 391 |
+
brunnermunzel
|
| 392 |
+
mood
|
| 393 |
+
ansari
|
| 394 |
+
cramervonmises_2samp
|
| 395 |
+
epps_singleton_2samp
|
| 396 |
+
ks_2samp
|
| 397 |
+
kstest
|
| 398 |
+
|
| 399 |
+
Others are generalized to multiple samples.
|
| 400 |
+
|
| 401 |
+
.. autosummary::
|
| 402 |
+
:toctree: generated/
|
| 403 |
+
|
| 404 |
+
f_oneway
|
| 405 |
+
tukey_hsd
|
| 406 |
+
dunnett
|
| 407 |
+
kruskal
|
| 408 |
+
alexandergovern
|
| 409 |
+
fligner
|
| 410 |
+
levene
|
| 411 |
+
bartlett
|
| 412 |
+
median_test
|
| 413 |
+
friedmanchisquare
|
| 414 |
+
anderson_ksamp
|
| 415 |
+
|
| 416 |
+
Resampling and Monte Carlo Methods
|
| 417 |
+
----------------------------------
|
| 418 |
+
The following functions can reproduce the p-value and confidence interval
|
| 419 |
+
results of most of the functions above, and often produce accurate results in a
|
| 420 |
+
wider variety of conditions. They can also be used to perform hypothesis tests
|
| 421 |
+
and generate confidence intervals for custom statistics. This flexibility comes
|
| 422 |
+
at the cost of greater computational requirements and stochastic results.
|
| 423 |
+
|
| 424 |
+
.. autosummary::
|
| 425 |
+
:toctree: generated/
|
| 426 |
+
|
| 427 |
+
monte_carlo_test
|
| 428 |
+
permutation_test
|
| 429 |
+
bootstrap
|
| 430 |
+
power
|
| 431 |
+
|
| 432 |
+
Instances of the following object can be passed into some hypothesis test
|
| 433 |
+
functions to perform a resampling or Monte Carlo version of the hypothesis
|
| 434 |
+
test.
|
| 435 |
+
|
| 436 |
+
.. autosummary::
|
| 437 |
+
:toctree: generated/
|
| 438 |
+
|
| 439 |
+
MonteCarloMethod
|
| 440 |
+
PermutationMethod
|
| 441 |
+
BootstrapMethod
|
| 442 |
+
|
| 443 |
+
Multiple Hypothesis Testing and Meta-Analysis
|
| 444 |
+
---------------------------------------------
|
| 445 |
+
These functions are for assessing the results of individual tests as a whole.
|
| 446 |
+
Functions for performing specific multiple hypothesis tests (e.g. post hoc
|
| 447 |
+
tests) are listed above.
|
| 448 |
+
|
| 449 |
+
.. autosummary::
|
| 450 |
+
:toctree: generated/
|
| 451 |
+
|
| 452 |
+
combine_pvalues
|
| 453 |
+
false_discovery_control
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
The following functions are related to the tests above but do not belong in the
|
| 457 |
+
above categories.
|
| 458 |
+
|
| 459 |
+
Quasi-Monte Carlo
|
| 460 |
+
=================
|
| 461 |
+
|
| 462 |
+
.. toctree::
|
| 463 |
+
:maxdepth: 4
|
| 464 |
+
|
| 465 |
+
stats.qmc
|
| 466 |
+
|
| 467 |
+
Contingency Tables
|
| 468 |
+
==================
|
| 469 |
+
|
| 470 |
+
.. toctree::
|
| 471 |
+
:maxdepth: 4
|
| 472 |
+
|
| 473 |
+
stats.contingency
|
| 474 |
+
|
| 475 |
+
Masked statistics functions
|
| 476 |
+
===========================
|
| 477 |
+
|
| 478 |
+
.. toctree::
|
| 479 |
+
|
| 480 |
+
stats.mstats
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
Other statistical functionality
|
| 484 |
+
===============================
|
| 485 |
+
|
| 486 |
+
Transformations
|
| 487 |
+
---------------
|
| 488 |
+
|
| 489 |
+
.. autosummary::
|
| 490 |
+
:toctree: generated/
|
| 491 |
+
|
| 492 |
+
boxcox
|
| 493 |
+
boxcox_normmax
|
| 494 |
+
boxcox_llf
|
| 495 |
+
yeojohnson
|
| 496 |
+
yeojohnson_normmax
|
| 497 |
+
yeojohnson_llf
|
| 498 |
+
obrientransform
|
| 499 |
+
sigmaclip
|
| 500 |
+
trimboth
|
| 501 |
+
trim1
|
| 502 |
+
zmap
|
| 503 |
+
zscore
|
| 504 |
+
gzscore
|
| 505 |
+
|
| 506 |
+
Statistical distances
|
| 507 |
+
---------------------
|
| 508 |
+
|
| 509 |
+
.. autosummary::
|
| 510 |
+
:toctree: generated/
|
| 511 |
+
|
| 512 |
+
wasserstein_distance
|
| 513 |
+
wasserstein_distance_nd
|
| 514 |
+
energy_distance
|
| 515 |
+
|
| 516 |
+
Sampling
|
| 517 |
+
--------
|
| 518 |
+
|
| 519 |
+
.. toctree::
|
| 520 |
+
:maxdepth: 4
|
| 521 |
+
|
| 522 |
+
stats.sampling
|
| 523 |
+
|
| 524 |
+
Random variate generation / CDF Inversion
|
| 525 |
+
-----------------------------------------
|
| 526 |
+
|
| 527 |
+
.. autosummary::
|
| 528 |
+
:toctree: generated/
|
| 529 |
+
|
| 530 |
+
rvs_ratio_uniforms
|
| 531 |
+
|
| 532 |
+
Fitting / Survival Analysis
|
| 533 |
+
---------------------------
|
| 534 |
+
|
| 535 |
+
.. autosummary::
|
| 536 |
+
:toctree: generated/
|
| 537 |
+
|
| 538 |
+
fit
|
| 539 |
+
ecdf
|
| 540 |
+
logrank
|
| 541 |
+
|
| 542 |
+
Directional statistical functions
|
| 543 |
+
---------------------------------
|
| 544 |
+
|
| 545 |
+
.. autosummary::
|
| 546 |
+
:toctree: generated/
|
| 547 |
+
|
| 548 |
+
directional_stats
|
| 549 |
+
circmean
|
| 550 |
+
circvar
|
| 551 |
+
circstd
|
| 552 |
+
|
| 553 |
+
Sensitivity Analysis
|
| 554 |
+
--------------------
|
| 555 |
+
|
| 556 |
+
.. autosummary::
|
| 557 |
+
:toctree: generated/
|
| 558 |
+
|
| 559 |
+
sobol_indices
|
| 560 |
+
|
| 561 |
+
Plot-tests
|
| 562 |
+
----------
|
| 563 |
+
|
| 564 |
+
.. autosummary::
|
| 565 |
+
:toctree: generated/
|
| 566 |
+
|
| 567 |
+
ppcc_max
|
| 568 |
+
ppcc_plot
|
| 569 |
+
probplot
|
| 570 |
+
boxcox_normplot
|
| 571 |
+
yeojohnson_normplot
|
| 572 |
+
|
| 573 |
+
Univariate and multivariate kernel density estimation
|
| 574 |
+
-----------------------------------------------------
|
| 575 |
+
|
| 576 |
+
.. autosummary::
|
| 577 |
+
:toctree: generated/
|
| 578 |
+
|
| 579 |
+
gaussian_kde
|
| 580 |
+
|
| 581 |
+
Warnings / Errors used in :mod:`scipy.stats`
|
| 582 |
+
--------------------------------------------
|
| 583 |
+
|
| 584 |
+
.. autosummary::
|
| 585 |
+
:toctree: generated/
|
| 586 |
+
|
| 587 |
+
DegenerateDataWarning
|
| 588 |
+
ConstantInputWarning
|
| 589 |
+
NearConstantInputWarning
|
| 590 |
+
FitError
|
| 591 |
+
|
| 592 |
+
Result classes used in :mod:`scipy.stats`
|
| 593 |
+
-----------------------------------------
|
| 594 |
+
|
| 595 |
+
.. warning::
|
| 596 |
+
|
| 597 |
+
These classes are private, but they are included here because instances
|
| 598 |
+
of them are returned by other statistical functions. User import and
|
| 599 |
+
instantiation is not supported.
|
| 600 |
+
|
| 601 |
+
.. toctree::
|
| 602 |
+
:maxdepth: 2
|
| 603 |
+
|
| 604 |
+
stats._result_classes
|
| 605 |
+
|
| 606 |
+
""" # noqa: E501
|
| 607 |
+
|
| 608 |
+
from ._warnings_errors import (ConstantInputWarning, NearConstantInputWarning,
|
| 609 |
+
DegenerateDataWarning, FitError)
|
| 610 |
+
from ._stats_py import *
|
| 611 |
+
from ._variation import variation
|
| 612 |
+
from .distributions import *
|
| 613 |
+
from ._morestats import *
|
| 614 |
+
from ._multicomp import *
|
| 615 |
+
from ._binomtest import binomtest
|
| 616 |
+
from ._binned_statistic import *
|
| 617 |
+
from ._kde import gaussian_kde
|
| 618 |
+
from . import mstats
|
| 619 |
+
from . import qmc
|
| 620 |
+
from ._multivariate import *
|
| 621 |
+
from . import contingency
|
| 622 |
+
from .contingency import chi2_contingency
|
| 623 |
+
from ._censored_data import CensoredData
|
| 624 |
+
from ._resampling import (bootstrap, monte_carlo_test, permutation_test, power,
|
| 625 |
+
MonteCarloMethod, PermutationMethod, BootstrapMethod)
|
| 626 |
+
from ._entropy import *
|
| 627 |
+
from ._hypotests import *
|
| 628 |
+
from ._rvs_sampling import rvs_ratio_uniforms
|
| 629 |
+
from ._page_trend_test import page_trend_test
|
| 630 |
+
from ._mannwhitneyu import mannwhitneyu
|
| 631 |
+
from ._bws_test import bws_test
|
| 632 |
+
from ._fit import fit, goodness_of_fit
|
| 633 |
+
from ._covariance import Covariance
|
| 634 |
+
from ._sensitivity_analysis import *
|
| 635 |
+
from ._survival import *
|
| 636 |
+
from ._mgc import multiscale_graphcorr
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
| 640 |
+
from . import (
|
| 641 |
+
biasedurn, kde, morestats, mstats_basic, mstats_extras, mvn, stats
|
| 642 |
+
)
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
|
| 646 |
+
|
| 647 |
+
from scipy._lib._testutils import PytestTester
|
| 648 |
+
test = PytestTester(__name__)
|
| 649 |
+
del PytestTester
|
parrot/lib/python3.10/site-packages/scipy/stats/_axis_nan_policy.py
ADDED
|
@@ -0,0 +1,686 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Many scipy.stats functions support `axis` and `nan_policy` parameters.
|
| 2 |
+
# When the two are combined, it can be tricky to get all the behavior just
|
| 3 |
+
# right. This file contains utility functions useful for scipy.stats functions
|
| 4 |
+
# that support `axis` and `nan_policy`, including a decorator that
|
| 5 |
+
# automatically adds `axis` and `nan_policy` arguments to a function.
|
| 6 |
+
|
| 7 |
+
import warnings
|
| 8 |
+
import numpy as np
|
| 9 |
+
from functools import wraps
|
| 10 |
+
from scipy._lib._docscrape import FunctionDoc, Parameter
|
| 11 |
+
from scipy._lib._util import _contains_nan, AxisError, _get_nan
|
| 12 |
+
from scipy._lib._array_api import array_namespace, is_numpy
|
| 13 |
+
|
| 14 |
+
import inspect
|
| 15 |
+
|
| 16 |
+
too_small_1d_not_omit = (
|
| 17 |
+
"One or more sample arguments is too small; all "
|
| 18 |
+
"returned values will be NaN. "
|
| 19 |
+
"See documentation for sample size requirements.")
|
| 20 |
+
|
| 21 |
+
too_small_1d_omit = (
|
| 22 |
+
"After omitting NaNs, one or more sample arguments "
|
| 23 |
+
"is too small; all returned values will be NaN. "
|
| 24 |
+
"See documentation for sample size requirements.")
|
| 25 |
+
|
| 26 |
+
too_small_nd_not_omit = (
|
| 27 |
+
"All axis-slices of one or more sample arguments are "
|
| 28 |
+
"too small; all elements of returned arrays will be NaN. "
|
| 29 |
+
"See documentation for sample size requirements.")
|
| 30 |
+
|
| 31 |
+
too_small_nd_omit = (
|
| 32 |
+
"After omitting NaNs, one or more axis-slices of one "
|
| 33 |
+
"or more sample arguments is too small; corresponding "
|
| 34 |
+
"elements of returned arrays will be NaN. "
|
| 35 |
+
"See documentation for sample size requirements.")
|
| 36 |
+
|
| 37 |
+
class SmallSampleWarning(RuntimeWarning):
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _broadcast_arrays(arrays, axis=None, xp=None):
|
| 42 |
+
"""
|
| 43 |
+
Broadcast shapes of arrays, ignoring incompatibility of specified axes
|
| 44 |
+
"""
|
| 45 |
+
if not arrays:
|
| 46 |
+
return arrays
|
| 47 |
+
xp = array_namespace(*arrays) if xp is None else xp
|
| 48 |
+
arrays = [xp.asarray(arr) for arr in arrays]
|
| 49 |
+
shapes = [arr.shape for arr in arrays]
|
| 50 |
+
new_shapes = _broadcast_shapes(shapes, axis)
|
| 51 |
+
if axis is None:
|
| 52 |
+
new_shapes = [new_shapes]*len(arrays)
|
| 53 |
+
return [xp.broadcast_to(array, new_shape)
|
| 54 |
+
for array, new_shape in zip(arrays, new_shapes)]
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _broadcast_shapes(shapes, axis=None):
|
| 58 |
+
"""
|
| 59 |
+
Broadcast shapes, ignoring incompatibility of specified axes
|
| 60 |
+
"""
|
| 61 |
+
if not shapes:
|
| 62 |
+
return shapes
|
| 63 |
+
|
| 64 |
+
# input validation
|
| 65 |
+
if axis is not None:
|
| 66 |
+
axis = np.atleast_1d(axis)
|
| 67 |
+
axis_int = axis.astype(int)
|
| 68 |
+
if not np.array_equal(axis_int, axis):
|
| 69 |
+
raise AxisError('`axis` must be an integer, a '
|
| 70 |
+
'tuple of integers, or `None`.')
|
| 71 |
+
axis = axis_int
|
| 72 |
+
|
| 73 |
+
# First, ensure all shapes have same number of dimensions by prepending 1s.
|
| 74 |
+
n_dims = max([len(shape) for shape in shapes])
|
| 75 |
+
new_shapes = np.ones((len(shapes), n_dims), dtype=int)
|
| 76 |
+
for row, shape in zip(new_shapes, shapes):
|
| 77 |
+
row[len(row)-len(shape):] = shape # can't use negative indices (-0:)
|
| 78 |
+
|
| 79 |
+
# Remove the shape elements of the axes to be ignored, but remember them.
|
| 80 |
+
if axis is not None:
|
| 81 |
+
axis[axis < 0] = n_dims + axis[axis < 0]
|
| 82 |
+
axis = np.sort(axis)
|
| 83 |
+
if axis[-1] >= n_dims or axis[0] < 0:
|
| 84 |
+
message = (f"`axis` is out of bounds "
|
| 85 |
+
f"for array of dimension {n_dims}")
|
| 86 |
+
raise AxisError(message)
|
| 87 |
+
|
| 88 |
+
if len(np.unique(axis)) != len(axis):
|
| 89 |
+
raise AxisError("`axis` must contain only distinct elements")
|
| 90 |
+
|
| 91 |
+
removed_shapes = new_shapes[:, axis]
|
| 92 |
+
new_shapes = np.delete(new_shapes, axis, axis=1)
|
| 93 |
+
|
| 94 |
+
# If arrays are broadcastable, shape elements that are 1 may be replaced
|
| 95 |
+
# with a corresponding non-1 shape element. Assuming arrays are
|
| 96 |
+
# broadcastable, that final shape element can be found with:
|
| 97 |
+
new_shape = np.max(new_shapes, axis=0)
|
| 98 |
+
# except in case of an empty array:
|
| 99 |
+
new_shape *= new_shapes.all(axis=0)
|
| 100 |
+
|
| 101 |
+
# Among all arrays, there can only be one unique non-1 shape element.
|
| 102 |
+
# Therefore, if any non-1 shape element does not match what we found
|
| 103 |
+
# above, the arrays must not be broadcastable after all.
|
| 104 |
+
if np.any(~((new_shapes == 1) | (new_shapes == new_shape))):
|
| 105 |
+
raise ValueError("Array shapes are incompatible for broadcasting.")
|
| 106 |
+
|
| 107 |
+
if axis is not None:
|
| 108 |
+
# Add back the shape elements that were ignored
|
| 109 |
+
new_axis = axis - np.arange(len(axis))
|
| 110 |
+
new_shapes = [tuple(np.insert(new_shape, new_axis, removed_shape))
|
| 111 |
+
for removed_shape in removed_shapes]
|
| 112 |
+
return new_shapes
|
| 113 |
+
else:
|
| 114 |
+
return tuple(new_shape)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _broadcast_array_shapes_remove_axis(arrays, axis=None):
|
| 118 |
+
"""
|
| 119 |
+
Broadcast shapes of arrays, dropping specified axes
|
| 120 |
+
|
| 121 |
+
Given a sequence of arrays `arrays` and an integer or tuple `axis`, find
|
| 122 |
+
the shape of the broadcast result after consuming/dropping `axis`.
|
| 123 |
+
In other words, return output shape of a typical hypothesis test on
|
| 124 |
+
`arrays` vectorized along `axis`.
|
| 125 |
+
|
| 126 |
+
Examples
|
| 127 |
+
--------
|
| 128 |
+
>>> import numpy as np
|
| 129 |
+
>>> from scipy.stats._axis_nan_policy import _broadcast_array_shapes_remove_axis
|
| 130 |
+
>>> a = np.zeros((5, 2, 1))
|
| 131 |
+
>>> b = np.zeros((9, 3))
|
| 132 |
+
>>> _broadcast_array_shapes_remove_axis((a, b), 1)
|
| 133 |
+
(5, 3)
|
| 134 |
+
"""
|
| 135 |
+
# Note that here, `axis=None` means do not consume/drop any axes - _not_
|
| 136 |
+
# ravel arrays before broadcasting.
|
| 137 |
+
shapes = [arr.shape for arr in arrays]
|
| 138 |
+
return _broadcast_shapes_remove_axis(shapes, axis)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _broadcast_shapes_remove_axis(shapes, axis=None):
|
| 142 |
+
"""
|
| 143 |
+
Broadcast shapes, dropping specified axes
|
| 144 |
+
|
| 145 |
+
Same as _broadcast_array_shapes_remove_axis, but given a sequence
|
| 146 |
+
of array shapes `shapes` instead of the arrays themselves.
|
| 147 |
+
"""
|
| 148 |
+
shapes = _broadcast_shapes(shapes, axis)
|
| 149 |
+
shape = shapes[0]
|
| 150 |
+
if axis is not None:
|
| 151 |
+
shape = np.delete(shape, axis)
|
| 152 |
+
return tuple(shape)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _broadcast_concatenate(arrays, axis, paired=False):
|
| 156 |
+
"""Concatenate arrays along an axis with broadcasting."""
|
| 157 |
+
arrays = _broadcast_arrays(arrays, axis if not paired else None)
|
| 158 |
+
res = np.concatenate(arrays, axis=axis)
|
| 159 |
+
return res
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
# TODO: add support for `axis` tuples
|
| 163 |
+
def _remove_nans(samples, paired):
|
| 164 |
+
"Remove nans from paired or unpaired 1D samples"
|
| 165 |
+
# potential optimization: don't copy arrays that don't contain nans
|
| 166 |
+
if not paired:
|
| 167 |
+
return [sample[~np.isnan(sample)] for sample in samples]
|
| 168 |
+
|
| 169 |
+
# for paired samples, we need to remove the whole pair when any part
|
| 170 |
+
# has a nan
|
| 171 |
+
nans = np.isnan(samples[0])
|
| 172 |
+
for sample in samples[1:]:
|
| 173 |
+
nans = nans | np.isnan(sample)
|
| 174 |
+
not_nans = ~nans
|
| 175 |
+
return [sample[not_nans] for sample in samples]
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def _remove_sentinel(samples, paired, sentinel):
|
| 179 |
+
"Remove sentinel values from paired or unpaired 1D samples"
|
| 180 |
+
# could consolidate with `_remove_nans`, but it's not quite as simple as
|
| 181 |
+
# passing `sentinel=np.nan` because `(np.nan == np.nan) is False`
|
| 182 |
+
|
| 183 |
+
# potential optimization: don't copy arrays that don't contain sentinel
|
| 184 |
+
if not paired:
|
| 185 |
+
return [sample[sample != sentinel] for sample in samples]
|
| 186 |
+
|
| 187 |
+
# for paired samples, we need to remove the whole pair when any part
|
| 188 |
+
# has a nan
|
| 189 |
+
sentinels = (samples[0] == sentinel)
|
| 190 |
+
for sample in samples[1:]:
|
| 191 |
+
sentinels = sentinels | (sample == sentinel)
|
| 192 |
+
not_sentinels = ~sentinels
|
| 193 |
+
return [sample[not_sentinels] for sample in samples]
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def _masked_arrays_2_sentinel_arrays(samples):
|
| 197 |
+
# masked arrays in `samples` are converted to regular arrays, and values
|
| 198 |
+
# corresponding with masked elements are replaced with a sentinel value
|
| 199 |
+
|
| 200 |
+
# return without modifying arrays if none have a mask
|
| 201 |
+
has_mask = False
|
| 202 |
+
for sample in samples:
|
| 203 |
+
mask = getattr(sample, 'mask', False)
|
| 204 |
+
has_mask = has_mask or np.any(mask)
|
| 205 |
+
if not has_mask:
|
| 206 |
+
return samples, None # None means there is no sentinel value
|
| 207 |
+
|
| 208 |
+
# Choose a sentinel value. We can't use `np.nan`, because sentinel (masked)
|
| 209 |
+
# values are always omitted, but there are different nan policies.
|
| 210 |
+
dtype = np.result_type(*samples)
|
| 211 |
+
dtype = dtype if np.issubdtype(dtype, np.number) else np.float64
|
| 212 |
+
for i in range(len(samples)):
|
| 213 |
+
# Things get more complicated if the arrays are of different types.
|
| 214 |
+
# We could have different sentinel values for each array, but
|
| 215 |
+
# the purpose of this code is convenience, not efficiency.
|
| 216 |
+
samples[i] = samples[i].astype(dtype, copy=False)
|
| 217 |
+
|
| 218 |
+
inexact = np.issubdtype(dtype, np.inexact)
|
| 219 |
+
info = np.finfo if inexact else np.iinfo
|
| 220 |
+
max_possible, min_possible = info(dtype).max, info(dtype).min
|
| 221 |
+
nextafter = np.nextafter if inexact else (lambda x, _: x - 1)
|
| 222 |
+
|
| 223 |
+
sentinel = max_possible
|
| 224 |
+
# For simplicity, min_possible/np.infs are not candidate sentinel values
|
| 225 |
+
while sentinel > min_possible:
|
| 226 |
+
for sample in samples:
|
| 227 |
+
if np.any(sample == sentinel): # choose a new sentinel value
|
| 228 |
+
sentinel = nextafter(sentinel, -np.inf)
|
| 229 |
+
break
|
| 230 |
+
else: # when sentinel value is OK, break the while loop
|
| 231 |
+
break
|
| 232 |
+
else:
|
| 233 |
+
message = ("This function replaces masked elements with sentinel "
|
| 234 |
+
"values, but the data contains all distinct values of this "
|
| 235 |
+
"data type. Consider promoting the dtype to `np.float64`.")
|
| 236 |
+
raise ValueError(message)
|
| 237 |
+
|
| 238 |
+
# replace masked elements with sentinel value
|
| 239 |
+
out_samples = []
|
| 240 |
+
for sample in samples:
|
| 241 |
+
mask = getattr(sample, 'mask', None)
|
| 242 |
+
if mask is not None: # turn all masked arrays into sentinel arrays
|
| 243 |
+
mask = np.broadcast_to(mask, sample.shape)
|
| 244 |
+
sample = sample.data.copy() if np.any(mask) else sample.data
|
| 245 |
+
sample = np.asarray(sample) # `sample.data` could be a memoryview?
|
| 246 |
+
sample[mask] = sentinel
|
| 247 |
+
out_samples.append(sample)
|
| 248 |
+
|
| 249 |
+
return out_samples, sentinel
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def _check_empty_inputs(samples, axis):
|
| 253 |
+
"""
|
| 254 |
+
Check for empty sample; return appropriate output for a vectorized hypotest
|
| 255 |
+
"""
|
| 256 |
+
# if none of the samples are empty, we need to perform the test
|
| 257 |
+
if not any(sample.size == 0 for sample in samples):
|
| 258 |
+
return None
|
| 259 |
+
# otherwise, the statistic and p-value will be either empty arrays or
|
| 260 |
+
# arrays with NaNs. Produce the appropriate array and return it.
|
| 261 |
+
output_shape = _broadcast_array_shapes_remove_axis(samples, axis)
|
| 262 |
+
output = np.ones(output_shape) * _get_nan(*samples)
|
| 263 |
+
return output
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _add_reduced_axes(res, reduced_axes, keepdims):
|
| 267 |
+
"""
|
| 268 |
+
Add reduced axes back to all the arrays in the result object
|
| 269 |
+
if keepdims = True.
|
| 270 |
+
"""
|
| 271 |
+
return ([np.expand_dims(output, reduced_axes)
|
| 272 |
+
if not isinstance(output, int) else output for output in res]
|
| 273 |
+
if keepdims else res)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
# Standard docstring / signature entries for `axis`, `nan_policy`, `keepdims`
|
| 277 |
+
_name = 'axis'
|
| 278 |
+
_desc = (
|
| 279 |
+
"""If an int, the axis of the input along which to compute the statistic.
|
| 280 |
+
The statistic of each axis-slice (e.g. row) of the input will appear in a
|
| 281 |
+
corresponding element of the output.
|
| 282 |
+
If ``None``, the input will be raveled before computing the statistic."""
|
| 283 |
+
.split('\n'))
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def _get_axis_params(default_axis=0, _name=_name, _desc=_desc): # bind NOW
|
| 287 |
+
_type = f"int or None, default: {default_axis}"
|
| 288 |
+
_axis_parameter_doc = Parameter(_name, _type, _desc)
|
| 289 |
+
_axis_parameter = inspect.Parameter(_name,
|
| 290 |
+
inspect.Parameter.KEYWORD_ONLY,
|
| 291 |
+
default=default_axis)
|
| 292 |
+
return _axis_parameter_doc, _axis_parameter
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
_name = 'nan_policy'
|
| 296 |
+
_type = "{'propagate', 'omit', 'raise'}"
|
| 297 |
+
_desc = (
|
| 298 |
+
"""Defines how to handle input NaNs.
|
| 299 |
+
|
| 300 |
+
- ``propagate``: if a NaN is present in the axis slice (e.g. row) along
|
| 301 |
+
which the statistic is computed, the corresponding entry of the output
|
| 302 |
+
will be NaN.
|
| 303 |
+
- ``omit``: NaNs will be omitted when performing the calculation.
|
| 304 |
+
If insufficient data remains in the axis slice along which the
|
| 305 |
+
statistic is computed, the corresponding entry of the output will be
|
| 306 |
+
NaN.
|
| 307 |
+
- ``raise``: if a NaN is present, a ``ValueError`` will be raised."""
|
| 308 |
+
.split('\n'))
|
| 309 |
+
_nan_policy_parameter_doc = Parameter(_name, _type, _desc)
|
| 310 |
+
_nan_policy_parameter = inspect.Parameter(_name,
|
| 311 |
+
inspect.Parameter.KEYWORD_ONLY,
|
| 312 |
+
default='propagate')
|
| 313 |
+
|
| 314 |
+
_name = 'keepdims'
|
| 315 |
+
_type = "bool, default: False"
|
| 316 |
+
_desc = (
|
| 317 |
+
"""If this is set to True, the axes which are reduced are left
|
| 318 |
+
in the result as dimensions with size one. With this option,
|
| 319 |
+
the result will broadcast correctly against the input array."""
|
| 320 |
+
.split('\n'))
|
| 321 |
+
_keepdims_parameter_doc = Parameter(_name, _type, _desc)
|
| 322 |
+
_keepdims_parameter = inspect.Parameter(_name,
|
| 323 |
+
inspect.Parameter.KEYWORD_ONLY,
|
| 324 |
+
default=False)
|
| 325 |
+
|
| 326 |
+
_standard_note_addition = (
|
| 327 |
+
"""\nBeginning in SciPy 1.9, ``np.matrix`` inputs (not recommended for new
|
| 328 |
+
code) are converted to ``np.ndarray`` before the calculation is performed. In
|
| 329 |
+
this case, the output will be a scalar or ``np.ndarray`` of appropriate shape
|
| 330 |
+
rather than a 2D ``np.matrix``. Similarly, while masked elements of masked
|
| 331 |
+
arrays are ignored, the output will be a scalar or ``np.ndarray`` rather than a
|
| 332 |
+
masked array with ``mask=False``.""").split('\n')
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def _axis_nan_policy_factory(tuple_to_result, default_axis=0,
|
| 336 |
+
n_samples=1, paired=False,
|
| 337 |
+
result_to_tuple=None, too_small=0,
|
| 338 |
+
n_outputs=2, kwd_samples=[], override=None):
|
| 339 |
+
"""Factory for a wrapper that adds axis/nan_policy params to a function.
|
| 340 |
+
|
| 341 |
+
Parameters
|
| 342 |
+
----------
|
| 343 |
+
tuple_to_result : callable
|
| 344 |
+
Callable that returns an object of the type returned by the function
|
| 345 |
+
being wrapped (e.g. the namedtuple or dataclass returned by a
|
| 346 |
+
statistical test) provided the separate components (e.g. statistic,
|
| 347 |
+
pvalue).
|
| 348 |
+
default_axis : int, default: 0
|
| 349 |
+
The default value of the axis argument. Standard is 0 except when
|
| 350 |
+
backwards compatibility demands otherwise (e.g. `None`).
|
| 351 |
+
n_samples : int or callable, default: 1
|
| 352 |
+
The number of data samples accepted by the function
|
| 353 |
+
(e.g. `mannwhitneyu`), a callable that accepts a dictionary of
|
| 354 |
+
parameters passed into the function and returns the number of data
|
| 355 |
+
samples (e.g. `wilcoxon`), or `None` to indicate an arbitrary number
|
| 356 |
+
of samples (e.g. `kruskal`).
|
| 357 |
+
paired : {False, True}
|
| 358 |
+
Whether the function being wrapped treats the samples as paired (i.e.
|
| 359 |
+
corresponding elements of each sample should be considered as different
|
| 360 |
+
components of the same sample.)
|
| 361 |
+
result_to_tuple : callable, optional
|
| 362 |
+
Function that unpacks the results of the function being wrapped into
|
| 363 |
+
a tuple. This is essentially the inverse of `tuple_to_result`. Default
|
| 364 |
+
is `None`, which is appropriate for statistical tests that return a
|
| 365 |
+
statistic, pvalue tuple (rather than, e.g., a non-iterable datalass).
|
| 366 |
+
too_small : int or callable, default: 0
|
| 367 |
+
The largest unnacceptably small sample for the function being wrapped.
|
| 368 |
+
For example, some functions require samples of size two or more or they
|
| 369 |
+
raise an error. This argument prevents the error from being raised when
|
| 370 |
+
input is not 1D and instead places a NaN in the corresponding element
|
| 371 |
+
of the result. If callable, it must accept a list of samples, axis,
|
| 372 |
+
and a dictionary of keyword arguments passed to the wrapper function as
|
| 373 |
+
arguments and return a bool indicating weather the samples passed are
|
| 374 |
+
too small.
|
| 375 |
+
n_outputs : int or callable, default: 2
|
| 376 |
+
The number of outputs produced by the function given 1d sample(s). For
|
| 377 |
+
example, hypothesis tests that return a namedtuple or result object
|
| 378 |
+
with attributes ``statistic`` and ``pvalue`` use the default
|
| 379 |
+
``n_outputs=2``; summary statistics with scalar output use
|
| 380 |
+
``n_outputs=1``. Alternatively, may be a callable that accepts a
|
| 381 |
+
dictionary of arguments passed into the wrapped function and returns
|
| 382 |
+
the number of outputs corresponding with those arguments.
|
| 383 |
+
kwd_samples : sequence, default: []
|
| 384 |
+
The names of keyword parameters that should be treated as samples. For
|
| 385 |
+
example, `gmean` accepts as its first argument a sample `a` but
|
| 386 |
+
also `weights` as a fourth, optional keyword argument. In this case, we
|
| 387 |
+
use `n_samples=1` and kwd_samples=['weights'].
|
| 388 |
+
override : dict, default: {'vectorization': False, 'nan_propagation': True}
|
| 389 |
+
Pass a dictionary with ``'vectorization': True`` to ensure that the
|
| 390 |
+
decorator overrides the function's behavior for multimensional input.
|
| 391 |
+
Use ``'nan_propagation': False`` to ensure that the decorator does not
|
| 392 |
+
override the function's behavior for ``nan_policy='propagate'``.
|
| 393 |
+
"""
|
| 394 |
+
# Specify which existing behaviors the decorator must override
|
| 395 |
+
temp = override or {}
|
| 396 |
+
override = {'vectorization': False,
|
| 397 |
+
'nan_propagation': True}
|
| 398 |
+
override.update(temp)
|
| 399 |
+
|
| 400 |
+
if result_to_tuple is None:
|
| 401 |
+
def result_to_tuple(res):
|
| 402 |
+
return res
|
| 403 |
+
|
| 404 |
+
if not callable(too_small):
|
| 405 |
+
def is_too_small(samples, *ts_args, axis=-1, **ts_kwargs):
|
| 406 |
+
for sample in samples:
|
| 407 |
+
if sample.shape[axis] <= too_small:
|
| 408 |
+
return True
|
| 409 |
+
return False
|
| 410 |
+
else:
|
| 411 |
+
is_too_small = too_small
|
| 412 |
+
|
| 413 |
+
def axis_nan_policy_decorator(hypotest_fun_in):
|
| 414 |
+
@wraps(hypotest_fun_in)
|
| 415 |
+
def axis_nan_policy_wrapper(*args, _no_deco=False, **kwds):
|
| 416 |
+
|
| 417 |
+
if _no_deco: # for testing, decorator does nothing
|
| 418 |
+
return hypotest_fun_in(*args, **kwds)
|
| 419 |
+
|
| 420 |
+
# For now, skip the decorator entirely if using array API. In the future,
|
| 421 |
+
# we'll probably want to use it for `keepdims`, `axis` tuples, etc.
|
| 422 |
+
if len(args) == 0: # extract sample from `kwds` if there are no `args`
|
| 423 |
+
used_kwd_samples = list(set(kwds).intersection(set(kwd_samples)))
|
| 424 |
+
temp = used_kwd_samples[:1]
|
| 425 |
+
else:
|
| 426 |
+
temp = args[0]
|
| 427 |
+
|
| 428 |
+
if not is_numpy(array_namespace(temp)):
|
| 429 |
+
msg = ("Use of `nan_policy` and `keepdims` "
|
| 430 |
+
"is incompatible with non-NumPy arrays.")
|
| 431 |
+
if 'nan_policy' in kwds or 'keepdims' in kwds:
|
| 432 |
+
raise NotImplementedError(msg)
|
| 433 |
+
return hypotest_fun_in(*args, **kwds)
|
| 434 |
+
|
| 435 |
+
# We need to be flexible about whether position or keyword
|
| 436 |
+
# arguments are used, but we need to make sure users don't pass
|
| 437 |
+
# both for the same parameter. To complicate matters, some
|
| 438 |
+
# functions accept samples with *args, and some functions already
|
| 439 |
+
# accept `axis` and `nan_policy` as positional arguments.
|
| 440 |
+
# The strategy is to make sure that there is no duplication
|
| 441 |
+
# between `args` and `kwds`, combine the two into `kwds`, then
|
| 442 |
+
# the samples, `nan_policy`, and `axis` from `kwds`, as they are
|
| 443 |
+
# dealt with separately.
|
| 444 |
+
|
| 445 |
+
# Check for intersection between positional and keyword args
|
| 446 |
+
params = list(inspect.signature(hypotest_fun_in).parameters)
|
| 447 |
+
if n_samples is None:
|
| 448 |
+
# Give unique names to each positional sample argument
|
| 449 |
+
# Note that *args can't be provided as a keyword argument
|
| 450 |
+
params = [f"arg{i}" for i in range(len(args))] + params[1:]
|
| 451 |
+
|
| 452 |
+
# raise if there are too many positional args
|
| 453 |
+
maxarg = (np.inf if inspect.getfullargspec(hypotest_fun_in).varargs
|
| 454 |
+
else len(inspect.getfullargspec(hypotest_fun_in).args))
|
| 455 |
+
if len(args) > maxarg: # let the function raise the right error
|
| 456 |
+
hypotest_fun_in(*args, **kwds)
|
| 457 |
+
|
| 458 |
+
# raise if multiple values passed for same parameter
|
| 459 |
+
d_args = dict(zip(params, args))
|
| 460 |
+
intersection = set(d_args) & set(kwds)
|
| 461 |
+
if intersection: # let the function raise the right error
|
| 462 |
+
hypotest_fun_in(*args, **kwds)
|
| 463 |
+
|
| 464 |
+
# Consolidate other positional and keyword args into `kwds`
|
| 465 |
+
kwds.update(d_args)
|
| 466 |
+
|
| 467 |
+
# rename avoids UnboundLocalError
|
| 468 |
+
if callable(n_samples):
|
| 469 |
+
# Future refactoring idea: no need for callable n_samples.
|
| 470 |
+
# Just replace `n_samples` and `kwd_samples` with a single
|
| 471 |
+
# list of the names of all samples, and treat all of them
|
| 472 |
+
# as `kwd_samples` are treated below.
|
| 473 |
+
n_samp = n_samples(kwds)
|
| 474 |
+
else:
|
| 475 |
+
n_samp = n_samples or len(args)
|
| 476 |
+
|
| 477 |
+
# get the number of outputs
|
| 478 |
+
n_out = n_outputs # rename to avoid UnboundLocalError
|
| 479 |
+
if callable(n_out):
|
| 480 |
+
n_out = n_out(kwds)
|
| 481 |
+
|
| 482 |
+
# If necessary, rearrange function signature: accept other samples
|
| 483 |
+
# as positional args right after the first n_samp args
|
| 484 |
+
kwd_samp = [name for name in kwd_samples
|
| 485 |
+
if kwds.get(name, None) is not None]
|
| 486 |
+
n_kwd_samp = len(kwd_samp)
|
| 487 |
+
if not kwd_samp:
|
| 488 |
+
hypotest_fun_out = hypotest_fun_in
|
| 489 |
+
else:
|
| 490 |
+
def hypotest_fun_out(*samples, **kwds):
|
| 491 |
+
new_kwds = dict(zip(kwd_samp, samples[n_samp:]))
|
| 492 |
+
kwds.update(new_kwds)
|
| 493 |
+
return hypotest_fun_in(*samples[:n_samp], **kwds)
|
| 494 |
+
|
| 495 |
+
# Extract the things we need here
|
| 496 |
+
try: # if something is missing
|
| 497 |
+
samples = [np.atleast_1d(kwds.pop(param))
|
| 498 |
+
for param in (params[:n_samp] + kwd_samp)]
|
| 499 |
+
except KeyError: # let the function raise the right error
|
| 500 |
+
# might need to revisit this if required arg is not a "sample"
|
| 501 |
+
hypotest_fun_in(*args, **kwds)
|
| 502 |
+
vectorized = True if 'axis' in params else False
|
| 503 |
+
vectorized = vectorized and not override['vectorization']
|
| 504 |
+
axis = kwds.pop('axis', default_axis)
|
| 505 |
+
nan_policy = kwds.pop('nan_policy', 'propagate')
|
| 506 |
+
keepdims = kwds.pop("keepdims", False)
|
| 507 |
+
del args # avoid the possibility of passing both `args` and `kwds`
|
| 508 |
+
|
| 509 |
+
# convert masked arrays to regular arrays with sentinel values
|
| 510 |
+
samples, sentinel = _masked_arrays_2_sentinel_arrays(samples)
|
| 511 |
+
|
| 512 |
+
# standardize to always work along last axis
|
| 513 |
+
reduced_axes = axis
|
| 514 |
+
if axis is None:
|
| 515 |
+
if samples:
|
| 516 |
+
# when axis=None, take the maximum of all dimensions since
|
| 517 |
+
# all the dimensions are reduced.
|
| 518 |
+
n_dims = np.max([sample.ndim for sample in samples])
|
| 519 |
+
reduced_axes = tuple(range(n_dims))
|
| 520 |
+
samples = [np.asarray(sample.ravel()) for sample in samples]
|
| 521 |
+
else:
|
| 522 |
+
samples = _broadcast_arrays(samples, axis=axis)
|
| 523 |
+
axis = np.atleast_1d(axis)
|
| 524 |
+
n_axes = len(axis)
|
| 525 |
+
# move all axes in `axis` to the end to be raveled
|
| 526 |
+
samples = [np.moveaxis(sample, axis, range(-len(axis), 0))
|
| 527 |
+
for sample in samples]
|
| 528 |
+
shapes = [sample.shape for sample in samples]
|
| 529 |
+
# New shape is unchanged for all axes _not_ in `axis`
|
| 530 |
+
# At the end, we append the product of the shapes of the axes
|
| 531 |
+
# in `axis`. Appending -1 doesn't work for zero-size arrays!
|
| 532 |
+
new_shapes = [shape[:-n_axes] + (np.prod(shape[-n_axes:]),)
|
| 533 |
+
for shape in shapes]
|
| 534 |
+
samples = [sample.reshape(new_shape)
|
| 535 |
+
for sample, new_shape in zip(samples, new_shapes)]
|
| 536 |
+
axis = -1 # work over the last axis
|
| 537 |
+
NaN = _get_nan(*samples) if samples else np.nan
|
| 538 |
+
|
| 539 |
+
# if axis is not needed, just handle nan_policy and return
|
| 540 |
+
ndims = np.array([sample.ndim for sample in samples])
|
| 541 |
+
if np.all(ndims <= 1):
|
| 542 |
+
# Addresses nan_policy == "raise"
|
| 543 |
+
if nan_policy != 'propagate' or override['nan_propagation']:
|
| 544 |
+
contains_nan = [_contains_nan(sample, nan_policy)[0]
|
| 545 |
+
for sample in samples]
|
| 546 |
+
else:
|
| 547 |
+
# Behave as though there are no NaNs (even if there are)
|
| 548 |
+
contains_nan = [False]*len(samples)
|
| 549 |
+
|
| 550 |
+
# Addresses nan_policy == "propagate"
|
| 551 |
+
if any(contains_nan) and (nan_policy == 'propagate'
|
| 552 |
+
and override['nan_propagation']):
|
| 553 |
+
res = np.full(n_out, NaN)
|
| 554 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
| 555 |
+
return tuple_to_result(*res)
|
| 556 |
+
|
| 557 |
+
# Addresses nan_policy == "omit"
|
| 558 |
+
too_small_msg = too_small_1d_not_omit
|
| 559 |
+
if any(contains_nan) and nan_policy == 'omit':
|
| 560 |
+
# consider passing in contains_nan
|
| 561 |
+
samples = _remove_nans(samples, paired)
|
| 562 |
+
too_small_msg = too_small_1d_omit
|
| 563 |
+
|
| 564 |
+
if sentinel:
|
| 565 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
| 566 |
+
|
| 567 |
+
if is_too_small(samples, kwds):
|
| 568 |
+
warnings.warn(too_small_msg, SmallSampleWarning, stacklevel=2)
|
| 569 |
+
res = np.full(n_out, NaN)
|
| 570 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
| 571 |
+
return tuple_to_result(*res)
|
| 572 |
+
|
| 573 |
+
res = hypotest_fun_out(*samples, **kwds)
|
| 574 |
+
res = result_to_tuple(res)
|
| 575 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
| 576 |
+
return tuple_to_result(*res)
|
| 577 |
+
|
| 578 |
+
# check for empty input
|
| 579 |
+
empty_output = _check_empty_inputs(samples, axis)
|
| 580 |
+
# only return empty output if zero sized input is too small.
|
| 581 |
+
if (
|
| 582 |
+
empty_output is not None
|
| 583 |
+
and (is_too_small(samples, kwds) or empty_output.size == 0)
|
| 584 |
+
):
|
| 585 |
+
if is_too_small(samples, kwds) and empty_output.size != 0:
|
| 586 |
+
warnings.warn(too_small_nd_not_omit, SmallSampleWarning,
|
| 587 |
+
stacklevel=2)
|
| 588 |
+
res = [empty_output.copy() for i in range(n_out)]
|
| 589 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
| 590 |
+
return tuple_to_result(*res)
|
| 591 |
+
|
| 592 |
+
# otherwise, concatenate all samples along axis, remembering where
|
| 593 |
+
# each separate sample begins
|
| 594 |
+
lengths = np.array([sample.shape[axis] for sample in samples])
|
| 595 |
+
split_indices = np.cumsum(lengths)
|
| 596 |
+
x = _broadcast_concatenate(samples, axis)
|
| 597 |
+
|
| 598 |
+
# Addresses nan_policy == "raise"
|
| 599 |
+
if nan_policy != 'propagate' or override['nan_propagation']:
|
| 600 |
+
contains_nan, _ = _contains_nan(x, nan_policy)
|
| 601 |
+
else:
|
| 602 |
+
contains_nan = False # behave like there are no NaNs
|
| 603 |
+
|
| 604 |
+
if vectorized and not contains_nan and not sentinel:
|
| 605 |
+
res = hypotest_fun_out(*samples, axis=axis, **kwds)
|
| 606 |
+
res = result_to_tuple(res)
|
| 607 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
| 608 |
+
return tuple_to_result(*res)
|
| 609 |
+
|
| 610 |
+
# Addresses nan_policy == "omit"
|
| 611 |
+
if contains_nan and nan_policy == 'omit':
|
| 612 |
+
def hypotest_fun(x):
|
| 613 |
+
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
|
| 614 |
+
samples = _remove_nans(samples, paired)
|
| 615 |
+
if sentinel:
|
| 616 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
| 617 |
+
if is_too_small(samples, kwds):
|
| 618 |
+
warnings.warn(too_small_nd_omit, SmallSampleWarning,
|
| 619 |
+
stacklevel=4)
|
| 620 |
+
return np.full(n_out, NaN)
|
| 621 |
+
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
|
| 622 |
+
|
| 623 |
+
# Addresses nan_policy == "propagate"
|
| 624 |
+
elif (contains_nan and nan_policy == 'propagate'
|
| 625 |
+
and override['nan_propagation']):
|
| 626 |
+
def hypotest_fun(x):
|
| 627 |
+
if np.isnan(x).any():
|
| 628 |
+
return np.full(n_out, NaN)
|
| 629 |
+
|
| 630 |
+
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
|
| 631 |
+
if sentinel:
|
| 632 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
| 633 |
+
if is_too_small(samples, kwds):
|
| 634 |
+
return np.full(n_out, NaN)
|
| 635 |
+
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
|
| 636 |
+
|
| 637 |
+
else:
|
| 638 |
+
def hypotest_fun(x):
|
| 639 |
+
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
|
| 640 |
+
if sentinel:
|
| 641 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
| 642 |
+
if is_too_small(samples, kwds):
|
| 643 |
+
return np.full(n_out, NaN)
|
| 644 |
+
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
|
| 645 |
+
|
| 646 |
+
x = np.moveaxis(x, axis, 0)
|
| 647 |
+
res = np.apply_along_axis(hypotest_fun, axis=0, arr=x)
|
| 648 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
| 649 |
+
return tuple_to_result(*res)
|
| 650 |
+
|
| 651 |
+
_axis_parameter_doc, _axis_parameter = _get_axis_params(default_axis)
|
| 652 |
+
doc = FunctionDoc(axis_nan_policy_wrapper)
|
| 653 |
+
parameter_names = [param.name for param in doc['Parameters']]
|
| 654 |
+
if 'axis' in parameter_names:
|
| 655 |
+
doc['Parameters'][parameter_names.index('axis')] = (
|
| 656 |
+
_axis_parameter_doc)
|
| 657 |
+
else:
|
| 658 |
+
doc['Parameters'].append(_axis_parameter_doc)
|
| 659 |
+
if 'nan_policy' in parameter_names:
|
| 660 |
+
doc['Parameters'][parameter_names.index('nan_policy')] = (
|
| 661 |
+
_nan_policy_parameter_doc)
|
| 662 |
+
else:
|
| 663 |
+
doc['Parameters'].append(_nan_policy_parameter_doc)
|
| 664 |
+
if 'keepdims' in parameter_names:
|
| 665 |
+
doc['Parameters'][parameter_names.index('keepdims')] = (
|
| 666 |
+
_keepdims_parameter_doc)
|
| 667 |
+
else:
|
| 668 |
+
doc['Parameters'].append(_keepdims_parameter_doc)
|
| 669 |
+
doc['Notes'] += _standard_note_addition
|
| 670 |
+
doc = str(doc).split("\n", 1)[1] # remove signature
|
| 671 |
+
axis_nan_policy_wrapper.__doc__ = str(doc)
|
| 672 |
+
|
| 673 |
+
sig = inspect.signature(axis_nan_policy_wrapper)
|
| 674 |
+
parameters = sig.parameters
|
| 675 |
+
parameter_list = list(parameters.values())
|
| 676 |
+
if 'axis' not in parameters:
|
| 677 |
+
parameter_list.append(_axis_parameter)
|
| 678 |
+
if 'nan_policy' not in parameters:
|
| 679 |
+
parameter_list.append(_nan_policy_parameter)
|
| 680 |
+
if 'keepdims' not in parameters:
|
| 681 |
+
parameter_list.append(_keepdims_parameter)
|
| 682 |
+
sig = sig.replace(parameters=parameter_list)
|
| 683 |
+
axis_nan_policy_wrapper.__signature__ = sig
|
| 684 |
+
|
| 685 |
+
return axis_nan_policy_wrapper
|
| 686 |
+
return axis_nan_policy_decorator
|
parrot/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Declare the class with cdef
|
| 2 |
+
cdef extern from "biasedurn/stocc.h" nogil:
|
| 3 |
+
cdef cppclass CFishersNCHypergeometric:
|
| 4 |
+
CFishersNCHypergeometric(int, int, int, double, double) except +
|
| 5 |
+
int mode()
|
| 6 |
+
double mean()
|
| 7 |
+
double variance()
|
| 8 |
+
double probability(int x)
|
| 9 |
+
double moments(double * mean, double * var)
|
| 10 |
+
|
| 11 |
+
cdef cppclass CWalleniusNCHypergeometric:
|
| 12 |
+
CWalleniusNCHypergeometric() except +
|
| 13 |
+
CWalleniusNCHypergeometric(int, int, int, double, double) except +
|
| 14 |
+
int mode()
|
| 15 |
+
double mean()
|
| 16 |
+
double variance()
|
| 17 |
+
double probability(int x)
|
| 18 |
+
double moments(double * mean, double * var)
|
| 19 |
+
|
| 20 |
+
cdef cppclass StochasticLib3:
|
| 21 |
+
StochasticLib3(int seed) except +
|
| 22 |
+
double Random() except +
|
| 23 |
+
void SetAccuracy(double accur)
|
| 24 |
+
int FishersNCHyp (int n, int m, int N, double odds) except +
|
| 25 |
+
int WalleniusNCHyp (int n, int m, int N, double odds) except +
|
| 26 |
+
double(*next_double)()
|
| 27 |
+
double(*next_normal)(const double m, const double s)
|
parrot/lib/python3.10/site-packages/scipy/stats/_binomtest.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from math import sqrt
|
| 2 |
+
import numpy as np
|
| 3 |
+
from scipy._lib._util import _validate_int
|
| 4 |
+
from scipy.optimize import brentq
|
| 5 |
+
from scipy.special import ndtri
|
| 6 |
+
from ._discrete_distns import binom
|
| 7 |
+
from ._common import ConfidenceInterval
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class BinomTestResult:
|
| 11 |
+
"""
|
| 12 |
+
Result of `scipy.stats.binomtest`.
|
| 13 |
+
|
| 14 |
+
Attributes
|
| 15 |
+
----------
|
| 16 |
+
k : int
|
| 17 |
+
The number of successes (copied from `binomtest` input).
|
| 18 |
+
n : int
|
| 19 |
+
The number of trials (copied from `binomtest` input).
|
| 20 |
+
alternative : str
|
| 21 |
+
Indicates the alternative hypothesis specified in the input
|
| 22 |
+
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
|
| 23 |
+
or ``'less'``.
|
| 24 |
+
statistic: float
|
| 25 |
+
The estimate of the proportion of successes.
|
| 26 |
+
pvalue : float
|
| 27 |
+
The p-value of the hypothesis test.
|
| 28 |
+
|
| 29 |
+
"""
|
| 30 |
+
def __init__(self, k, n, alternative, statistic, pvalue):
|
| 31 |
+
self.k = k
|
| 32 |
+
self.n = n
|
| 33 |
+
self.alternative = alternative
|
| 34 |
+
self.statistic = statistic
|
| 35 |
+
self.pvalue = pvalue
|
| 36 |
+
|
| 37 |
+
# add alias for backward compatibility
|
| 38 |
+
self.proportion_estimate = statistic
|
| 39 |
+
|
| 40 |
+
def __repr__(self):
|
| 41 |
+
s = ("BinomTestResult("
|
| 42 |
+
f"k={self.k}, "
|
| 43 |
+
f"n={self.n}, "
|
| 44 |
+
f"alternative={self.alternative!r}, "
|
| 45 |
+
f"statistic={self.statistic}, "
|
| 46 |
+
f"pvalue={self.pvalue})")
|
| 47 |
+
return s
|
| 48 |
+
|
| 49 |
+
def proportion_ci(self, confidence_level=0.95, method='exact'):
|
| 50 |
+
"""
|
| 51 |
+
Compute the confidence interval for ``statistic``.
|
| 52 |
+
|
| 53 |
+
Parameters
|
| 54 |
+
----------
|
| 55 |
+
confidence_level : float, optional
|
| 56 |
+
Confidence level for the computed confidence interval
|
| 57 |
+
of the estimated proportion. Default is 0.95.
|
| 58 |
+
method : {'exact', 'wilson', 'wilsoncc'}, optional
|
| 59 |
+
Selects the method used to compute the confidence interval
|
| 60 |
+
for the estimate of the proportion:
|
| 61 |
+
|
| 62 |
+
'exact' :
|
| 63 |
+
Use the Clopper-Pearson exact method [1]_.
|
| 64 |
+
'wilson' :
|
| 65 |
+
Wilson's method, without continuity correction ([2]_, [3]_).
|
| 66 |
+
'wilsoncc' :
|
| 67 |
+
Wilson's method, with continuity correction ([2]_, [3]_).
|
| 68 |
+
|
| 69 |
+
Default is ``'exact'``.
|
| 70 |
+
|
| 71 |
+
Returns
|
| 72 |
+
-------
|
| 73 |
+
ci : ``ConfidenceInterval`` object
|
| 74 |
+
The object has attributes ``low`` and ``high`` that hold the
|
| 75 |
+
lower and upper bounds of the confidence interval.
|
| 76 |
+
|
| 77 |
+
References
|
| 78 |
+
----------
|
| 79 |
+
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
|
| 80 |
+
fiducial limits illustrated in the case of the binomial,
|
| 81 |
+
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
|
| 82 |
+
.. [2] E. B. Wilson, Probable inference, the law of succession, and
|
| 83 |
+
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
|
| 84 |
+
(1927).
|
| 85 |
+
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
|
| 86 |
+
single proportion: comparison of seven methods, Statistics
|
| 87 |
+
in Medicine, 17, pp 857-872 (1998).
|
| 88 |
+
|
| 89 |
+
Examples
|
| 90 |
+
--------
|
| 91 |
+
>>> from scipy.stats import binomtest
|
| 92 |
+
>>> result = binomtest(k=7, n=50, p=0.1)
|
| 93 |
+
>>> result.statistic
|
| 94 |
+
0.14
|
| 95 |
+
>>> result.proportion_ci()
|
| 96 |
+
ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846)
|
| 97 |
+
"""
|
| 98 |
+
if method not in ('exact', 'wilson', 'wilsoncc'):
|
| 99 |
+
raise ValueError(f"method ('{method}') must be one of 'exact', "
|
| 100 |
+
"'wilson' or 'wilsoncc'.")
|
| 101 |
+
if not (0 <= confidence_level <= 1):
|
| 102 |
+
raise ValueError(f'confidence_level ({confidence_level}) must be in '
|
| 103 |
+
'the interval [0, 1].')
|
| 104 |
+
if method == 'exact':
|
| 105 |
+
low, high = _binom_exact_conf_int(self.k, self.n,
|
| 106 |
+
confidence_level,
|
| 107 |
+
self.alternative)
|
| 108 |
+
else:
|
| 109 |
+
# method is 'wilson' or 'wilsoncc'
|
| 110 |
+
low, high = _binom_wilson_conf_int(self.k, self.n,
|
| 111 |
+
confidence_level,
|
| 112 |
+
self.alternative,
|
| 113 |
+
correction=method == 'wilsoncc')
|
| 114 |
+
return ConfidenceInterval(low=low, high=high)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _findp(func):
|
| 118 |
+
try:
|
| 119 |
+
p = brentq(func, 0, 1)
|
| 120 |
+
except RuntimeError:
|
| 121 |
+
raise RuntimeError('numerical solver failed to converge when '
|
| 122 |
+
'computing the confidence limits') from None
|
| 123 |
+
except ValueError as exc:
|
| 124 |
+
raise ValueError('brentq raised a ValueError; report this to the '
|
| 125 |
+
'SciPy developers') from exc
|
| 126 |
+
return p
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def _binom_exact_conf_int(k, n, confidence_level, alternative):
|
| 130 |
+
"""
|
| 131 |
+
Compute the estimate and confidence interval for the binomial test.
|
| 132 |
+
|
| 133 |
+
Returns proportion, prop_low, prop_high
|
| 134 |
+
"""
|
| 135 |
+
if alternative == 'two-sided':
|
| 136 |
+
alpha = (1 - confidence_level) / 2
|
| 137 |
+
if k == 0:
|
| 138 |
+
plow = 0.0
|
| 139 |
+
else:
|
| 140 |
+
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
|
| 141 |
+
if k == n:
|
| 142 |
+
phigh = 1.0
|
| 143 |
+
else:
|
| 144 |
+
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
|
| 145 |
+
elif alternative == 'less':
|
| 146 |
+
alpha = 1 - confidence_level
|
| 147 |
+
plow = 0.0
|
| 148 |
+
if k == n:
|
| 149 |
+
phigh = 1.0
|
| 150 |
+
else:
|
| 151 |
+
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
|
| 152 |
+
elif alternative == 'greater':
|
| 153 |
+
alpha = 1 - confidence_level
|
| 154 |
+
if k == 0:
|
| 155 |
+
plow = 0.0
|
| 156 |
+
else:
|
| 157 |
+
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
|
| 158 |
+
phigh = 1.0
|
| 159 |
+
return plow, phigh
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
|
| 163 |
+
# This function assumes that the arguments have already been validated.
|
| 164 |
+
# In particular, `alternative` must be one of 'two-sided', 'less' or
|
| 165 |
+
# 'greater'.
|
| 166 |
+
p = k / n
|
| 167 |
+
if alternative == 'two-sided':
|
| 168 |
+
z = ndtri(0.5 + 0.5*confidence_level)
|
| 169 |
+
else:
|
| 170 |
+
z = ndtri(confidence_level)
|
| 171 |
+
|
| 172 |
+
# For reference, the formulas implemented here are from
|
| 173 |
+
# Newcombe (1998) (ref. [3] in the proportion_ci docstring).
|
| 174 |
+
denom = 2*(n + z**2)
|
| 175 |
+
center = (2*n*p + z**2)/denom
|
| 176 |
+
q = 1 - p
|
| 177 |
+
if correction:
|
| 178 |
+
if alternative == 'less' or k == 0:
|
| 179 |
+
lo = 0.0
|
| 180 |
+
else:
|
| 181 |
+
dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom
|
| 182 |
+
lo = center - dlo
|
| 183 |
+
if alternative == 'greater' or k == n:
|
| 184 |
+
hi = 1.0
|
| 185 |
+
else:
|
| 186 |
+
dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom
|
| 187 |
+
hi = center + dhi
|
| 188 |
+
else:
|
| 189 |
+
delta = z/denom * sqrt(4*n*p*q + z**2)
|
| 190 |
+
if alternative == 'less' or k == 0:
|
| 191 |
+
lo = 0.0
|
| 192 |
+
else:
|
| 193 |
+
lo = center - delta
|
| 194 |
+
if alternative == 'greater' or k == n:
|
| 195 |
+
hi = 1.0
|
| 196 |
+
else:
|
| 197 |
+
hi = center + delta
|
| 198 |
+
|
| 199 |
+
return lo, hi
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def binomtest(k, n, p=0.5, alternative='two-sided'):
|
| 203 |
+
"""
|
| 204 |
+
Perform a test that the probability of success is p.
|
| 205 |
+
|
| 206 |
+
The binomial test [1]_ is a test of the null hypothesis that the
|
| 207 |
+
probability of success in a Bernoulli experiment is `p`.
|
| 208 |
+
|
| 209 |
+
Details of the test can be found in many texts on statistics, such
|
| 210 |
+
as section 24.5 of [2]_.
|
| 211 |
+
|
| 212 |
+
Parameters
|
| 213 |
+
----------
|
| 214 |
+
k : int
|
| 215 |
+
The number of successes.
|
| 216 |
+
n : int
|
| 217 |
+
The number of trials.
|
| 218 |
+
p : float, optional
|
| 219 |
+
The hypothesized probability of success, i.e. the expected
|
| 220 |
+
proportion of successes. The value must be in the interval
|
| 221 |
+
``0 <= p <= 1``. The default value is ``p = 0.5``.
|
| 222 |
+
alternative : {'two-sided', 'greater', 'less'}, optional
|
| 223 |
+
Indicates the alternative hypothesis. The default value is
|
| 224 |
+
'two-sided'.
|
| 225 |
+
|
| 226 |
+
Returns
|
| 227 |
+
-------
|
| 228 |
+
result : `~scipy.stats._result_classes.BinomTestResult` instance
|
| 229 |
+
The return value is an object with the following attributes:
|
| 230 |
+
|
| 231 |
+
k : int
|
| 232 |
+
The number of successes (copied from `binomtest` input).
|
| 233 |
+
n : int
|
| 234 |
+
The number of trials (copied from `binomtest` input).
|
| 235 |
+
alternative : str
|
| 236 |
+
Indicates the alternative hypothesis specified in the input
|
| 237 |
+
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
|
| 238 |
+
or ``'less'``.
|
| 239 |
+
statistic : float
|
| 240 |
+
The estimate of the proportion of successes.
|
| 241 |
+
pvalue : float
|
| 242 |
+
The p-value of the hypothesis test.
|
| 243 |
+
|
| 244 |
+
The object has the following methods:
|
| 245 |
+
|
| 246 |
+
proportion_ci(confidence_level=0.95, method='exact') :
|
| 247 |
+
Compute the confidence interval for ``statistic``.
|
| 248 |
+
|
| 249 |
+
Notes
|
| 250 |
+
-----
|
| 251 |
+
.. versionadded:: 1.7.0
|
| 252 |
+
|
| 253 |
+
References
|
| 254 |
+
----------
|
| 255 |
+
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
|
| 256 |
+
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
|
| 257 |
+
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
|
| 258 |
+
|
| 259 |
+
Examples
|
| 260 |
+
--------
|
| 261 |
+
>>> from scipy.stats import binomtest
|
| 262 |
+
|
| 263 |
+
A car manufacturer claims that no more than 10% of their cars are unsafe.
|
| 264 |
+
15 cars are inspected for safety, 3 were found to be unsafe. Test the
|
| 265 |
+
manufacturer's claim:
|
| 266 |
+
|
| 267 |
+
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
|
| 268 |
+
>>> result.pvalue
|
| 269 |
+
0.18406106910639114
|
| 270 |
+
|
| 271 |
+
The null hypothesis cannot be rejected at the 5% level of significance
|
| 272 |
+
because the returned p-value is greater than the critical value of 5%.
|
| 273 |
+
|
| 274 |
+
The test statistic is equal to the estimated proportion, which is simply
|
| 275 |
+
``3/15``:
|
| 276 |
+
|
| 277 |
+
>>> result.statistic
|
| 278 |
+
0.2
|
| 279 |
+
|
| 280 |
+
We can use the `proportion_ci()` method of the result to compute the
|
| 281 |
+
confidence interval of the estimate:
|
| 282 |
+
|
| 283 |
+
>>> result.proportion_ci(confidence_level=0.95)
|
| 284 |
+
ConfidenceInterval(low=0.05684686759024681, high=1.0)
|
| 285 |
+
|
| 286 |
+
"""
|
| 287 |
+
k = _validate_int(k, 'k', minimum=0)
|
| 288 |
+
n = _validate_int(n, 'n', minimum=1)
|
| 289 |
+
if k > n:
|
| 290 |
+
raise ValueError(f'k ({k}) must not be greater than n ({n}).')
|
| 291 |
+
|
| 292 |
+
if not (0 <= p <= 1):
|
| 293 |
+
raise ValueError(f"p ({p}) must be in range [0,1]")
|
| 294 |
+
|
| 295 |
+
if alternative not in ('two-sided', 'less', 'greater'):
|
| 296 |
+
raise ValueError(f"alternative ('{alternative}') not recognized; \n"
|
| 297 |
+
"must be 'two-sided', 'less' or 'greater'")
|
| 298 |
+
if alternative == 'less':
|
| 299 |
+
pval = binom.cdf(k, n, p)
|
| 300 |
+
elif alternative == 'greater':
|
| 301 |
+
pval = binom.sf(k-1, n, p)
|
| 302 |
+
else:
|
| 303 |
+
# alternative is 'two-sided'
|
| 304 |
+
d = binom.pmf(k, n, p)
|
| 305 |
+
rerr = 1 + 1e-7
|
| 306 |
+
if k == p * n:
|
| 307 |
+
# special case as shortcut, would also be handled by `else` below
|
| 308 |
+
pval = 1.
|
| 309 |
+
elif k < p * n:
|
| 310 |
+
ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p),
|
| 311 |
+
-d*rerr, np.ceil(p * n), n)
|
| 312 |
+
# y is the number of terms between mode and n that are <= d*rerr.
|
| 313 |
+
# ix gave us the first term where a(ix) <= d*rerr < a(ix-1)
|
| 314 |
+
# if the first equality doesn't hold, y=n-ix. Otherwise, we
|
| 315 |
+
# need to include ix as well as the equality holds. Note that
|
| 316 |
+
# the equality will hold in very very rare situations due to rerr.
|
| 317 |
+
y = n - ix + int(d*rerr == binom.pmf(ix, n, p))
|
| 318 |
+
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
|
| 319 |
+
else:
|
| 320 |
+
ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p),
|
| 321 |
+
d*rerr, 0, np.floor(p * n))
|
| 322 |
+
# y is the number of terms between 0 and mode that are <= d*rerr.
|
| 323 |
+
# we need to add a 1 to account for the 0 index.
|
| 324 |
+
# For comparing this with old behavior, see
|
| 325 |
+
# tst_binary_srch_for_binom_tst method in test_morestats.
|
| 326 |
+
y = ix + 1
|
| 327 |
+
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
|
| 328 |
+
|
| 329 |
+
pval = min(1.0, pval)
|
| 330 |
+
|
| 331 |
+
result = BinomTestResult(k=k, n=n, alternative=alternative,
|
| 332 |
+
statistic=k/n, pvalue=pval)
|
| 333 |
+
return result
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def _binary_search_for_binom_tst(a, d, lo, hi):
|
| 337 |
+
"""
|
| 338 |
+
Conducts an implicit binary search on a function specified by `a`.
|
| 339 |
+
|
| 340 |
+
Meant to be used on the binomial PMF for the case of two-sided tests
|
| 341 |
+
to obtain the value on the other side of the mode where the tail
|
| 342 |
+
probability should be computed. The values on either side of
|
| 343 |
+
the mode are always in order, meaning binary search is applicable.
|
| 344 |
+
|
| 345 |
+
Parameters
|
| 346 |
+
----------
|
| 347 |
+
a : callable
|
| 348 |
+
The function over which to perform binary search. Its values
|
| 349 |
+
for inputs lo and hi should be in ascending order.
|
| 350 |
+
d : float
|
| 351 |
+
The value to search.
|
| 352 |
+
lo : int
|
| 353 |
+
The lower end of range to search.
|
| 354 |
+
hi : int
|
| 355 |
+
The higher end of the range to search.
|
| 356 |
+
|
| 357 |
+
Returns
|
| 358 |
+
-------
|
| 359 |
+
int
|
| 360 |
+
The index, i between lo and hi
|
| 361 |
+
such that a(i)<=d<a(i+1)
|
| 362 |
+
"""
|
| 363 |
+
while lo < hi:
|
| 364 |
+
mid = lo + (hi-lo)//2
|
| 365 |
+
midval = a(mid)
|
| 366 |
+
if midval < d:
|
| 367 |
+
lo = mid+1
|
| 368 |
+
elif midval > d:
|
| 369 |
+
hi = mid-1
|
| 370 |
+
else:
|
| 371 |
+
return mid
|
| 372 |
+
if a(lo) <= d:
|
| 373 |
+
return lo
|
| 374 |
+
else:
|
| 375 |
+
return lo-1
|
parrot/lib/python3.10/site-packages/scipy/stats/_bws_test.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from functools import partial
|
| 3 |
+
from scipy import stats
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _bws_input_validation(x, y, alternative, method):
|
| 7 |
+
''' Input validation and standardization for bws test'''
|
| 8 |
+
x, y = np.atleast_1d(x, y)
|
| 9 |
+
if x.ndim > 1 or y.ndim > 1:
|
| 10 |
+
raise ValueError('`x` and `y` must be exactly one-dimensional.')
|
| 11 |
+
if np.isnan(x).any() or np.isnan(y).any():
|
| 12 |
+
raise ValueError('`x` and `y` must not contain NaNs.')
|
| 13 |
+
if np.size(x) == 0 or np.size(y) == 0:
|
| 14 |
+
raise ValueError('`x` and `y` must be of nonzero size.')
|
| 15 |
+
|
| 16 |
+
z = stats.rankdata(np.concatenate((x, y)))
|
| 17 |
+
x, y = z[:len(x)], z[len(x):]
|
| 18 |
+
|
| 19 |
+
alternatives = {'two-sided', 'less', 'greater'}
|
| 20 |
+
alternative = alternative.lower()
|
| 21 |
+
if alternative not in alternatives:
|
| 22 |
+
raise ValueError(f'`alternative` must be one of {alternatives}.')
|
| 23 |
+
|
| 24 |
+
method = stats.PermutationMethod() if method is None else method
|
| 25 |
+
if not isinstance(method, stats.PermutationMethod):
|
| 26 |
+
raise ValueError('`method` must be an instance of '
|
| 27 |
+
'`scipy.stats.PermutationMethod`')
|
| 28 |
+
|
| 29 |
+
return x, y, alternative, method
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _bws_statistic(x, y, alternative, axis):
|
| 33 |
+
'''Compute the BWS test statistic for two independent samples'''
|
| 34 |
+
# Public function currently does not accept `axis`, but `permutation_test`
|
| 35 |
+
# uses `axis` to make vectorized call.
|
| 36 |
+
|
| 37 |
+
Ri, Hj = np.sort(x, axis=axis), np.sort(y, axis=axis)
|
| 38 |
+
n, m = Ri.shape[axis], Hj.shape[axis]
|
| 39 |
+
i, j = np.arange(1, n+1), np.arange(1, m+1)
|
| 40 |
+
|
| 41 |
+
Bx_num = Ri - (m + n)/n * i
|
| 42 |
+
By_num = Hj - (m + n)/m * j
|
| 43 |
+
|
| 44 |
+
if alternative == 'two-sided':
|
| 45 |
+
Bx_num *= Bx_num
|
| 46 |
+
By_num *= By_num
|
| 47 |
+
else:
|
| 48 |
+
Bx_num *= np.abs(Bx_num)
|
| 49 |
+
By_num *= np.abs(By_num)
|
| 50 |
+
|
| 51 |
+
Bx_den = i/(n+1) * (1 - i/(n+1)) * m*(m+n)/n
|
| 52 |
+
By_den = j/(m+1) * (1 - j/(m+1)) * n*(m+n)/m
|
| 53 |
+
|
| 54 |
+
Bx = 1/n * np.sum(Bx_num/Bx_den, axis=axis)
|
| 55 |
+
By = 1/m * np.sum(By_num/By_den, axis=axis)
|
| 56 |
+
|
| 57 |
+
B = (Bx + By) / 2 if alternative == 'two-sided' else (Bx - By) / 2
|
| 58 |
+
|
| 59 |
+
return B
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def bws_test(x, y, *, alternative="two-sided", method=None):
|
| 63 |
+
r'''Perform the Baumgartner-Weiss-Schindler test on two independent samples.
|
| 64 |
+
|
| 65 |
+
The Baumgartner-Weiss-Schindler (BWS) test is a nonparametric test of
|
| 66 |
+
the null hypothesis that the distribution underlying sample `x`
|
| 67 |
+
is the same as the distribution underlying sample `y`. Unlike
|
| 68 |
+
the Kolmogorov-Smirnov, Wilcoxon, and Cramer-Von Mises tests,
|
| 69 |
+
the BWS test weights the integral by the variance of the difference
|
| 70 |
+
in cumulative distribution functions (CDFs), emphasizing the tails of the
|
| 71 |
+
distributions, which increases the power of the test in many applications.
|
| 72 |
+
|
| 73 |
+
Parameters
|
| 74 |
+
----------
|
| 75 |
+
x, y : array-like
|
| 76 |
+
1-d arrays of samples.
|
| 77 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
| 78 |
+
Defines the alternative hypothesis. Default is 'two-sided'.
|
| 79 |
+
Let *F(u)* and *G(u)* be the cumulative distribution functions of the
|
| 80 |
+
distributions underlying `x` and `y`, respectively. Then the following
|
| 81 |
+
alternative hypotheses are available:
|
| 82 |
+
|
| 83 |
+
* 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for
|
| 84 |
+
at least one *u*.
|
| 85 |
+
* 'less': the distribution underlying `x` is stochastically less than
|
| 86 |
+
the distribution underlying `y`, i.e. *F(u) >= G(u)* for all *u*.
|
| 87 |
+
* 'greater': the distribution underlying `x` is stochastically greater
|
| 88 |
+
than the distribution underlying `y`, i.e. *F(u) <= G(u)* for all
|
| 89 |
+
*u*.
|
| 90 |
+
|
| 91 |
+
Under a more restrictive set of assumptions, the alternative hypotheses
|
| 92 |
+
can be expressed in terms of the locations of the distributions;
|
| 93 |
+
see [2] section 5.1.
|
| 94 |
+
method : PermutationMethod, optional
|
| 95 |
+
Configures the method used to compute the p-value. The default is
|
| 96 |
+
the default `PermutationMethod` object.
|
| 97 |
+
|
| 98 |
+
Returns
|
| 99 |
+
-------
|
| 100 |
+
res : PermutationTestResult
|
| 101 |
+
An object with attributes:
|
| 102 |
+
|
| 103 |
+
statistic : float
|
| 104 |
+
The observed test statistic of the data.
|
| 105 |
+
pvalue : float
|
| 106 |
+
The p-value for the given alternative.
|
| 107 |
+
null_distribution : ndarray
|
| 108 |
+
The values of the test statistic generated under the null hypothesis.
|
| 109 |
+
|
| 110 |
+
See also
|
| 111 |
+
--------
|
| 112 |
+
scipy.stats.wilcoxon, scipy.stats.mannwhitneyu, scipy.stats.ttest_ind
|
| 113 |
+
|
| 114 |
+
Notes
|
| 115 |
+
-----
|
| 116 |
+
When ``alternative=='two-sided'``, the statistic is defined by the
|
| 117 |
+
equations given in [1]_ Section 2. This statistic is not appropriate for
|
| 118 |
+
one-sided alternatives; in that case, the statistic is the *negative* of
|
| 119 |
+
that given by the equations in [1]_ Section 2. Consequently, when the
|
| 120 |
+
distribution of the first sample is stochastically greater than that of the
|
| 121 |
+
second sample, the statistic will tend to be positive.
|
| 122 |
+
|
| 123 |
+
References
|
| 124 |
+
----------
|
| 125 |
+
.. [1] Neuhäuser, M. (2005). Exact Tests Based on the
|
| 126 |
+
Baumgartner-Weiss-Schindler Statistic: A Survey. Statistical Papers,
|
| 127 |
+
46(1), 1-29.
|
| 128 |
+
.. [2] Fay, M. P., & Proschan, M. A. (2010). Wilcoxon-Mann-Whitney or t-test?
|
| 129 |
+
On assumptions for hypothesis tests and multiple interpretations of
|
| 130 |
+
decision rules. Statistics surveys, 4, 1.
|
| 131 |
+
|
| 132 |
+
Examples
|
| 133 |
+
--------
|
| 134 |
+
We follow the example of table 3 in [1]_: Fourteen children were divided
|
| 135 |
+
randomly into two groups. Their ranks at performing a specific tests are
|
| 136 |
+
as follows.
|
| 137 |
+
|
| 138 |
+
>>> import numpy as np
|
| 139 |
+
>>> x = [1, 2, 3, 4, 6, 7, 8]
|
| 140 |
+
>>> y = [5, 9, 10, 11, 12, 13, 14]
|
| 141 |
+
|
| 142 |
+
We use the BWS test to assess whether there is a statistically significant
|
| 143 |
+
difference between the two groups.
|
| 144 |
+
The null hypothesis is that there is no difference in the distributions of
|
| 145 |
+
performance between the two groups. We decide that a significance level of
|
| 146 |
+
1% is required to reject the null hypothesis in favor of the alternative
|
| 147 |
+
that the distributions are different.
|
| 148 |
+
Since the number of samples is very small, we can compare the observed test
|
| 149 |
+
statistic against the *exact* distribution of the test statistic under the
|
| 150 |
+
null hypothesis.
|
| 151 |
+
|
| 152 |
+
>>> from scipy.stats import bws_test
|
| 153 |
+
>>> res = bws_test(x, y)
|
| 154 |
+
>>> print(res.statistic)
|
| 155 |
+
5.132167152575315
|
| 156 |
+
|
| 157 |
+
This agrees with :math:`B = 5.132` reported in [1]_. The *p*-value produced
|
| 158 |
+
by `bws_test` also agrees with :math:`p = 0.0029` reported in [1]_.
|
| 159 |
+
|
| 160 |
+
>>> print(res.pvalue)
|
| 161 |
+
0.002913752913752914
|
| 162 |
+
|
| 163 |
+
Because the p-value is below our threshold of 1%, we take this as evidence
|
| 164 |
+
against the null hypothesis in favor of the alternative that there is a
|
| 165 |
+
difference in performance between the two groups.
|
| 166 |
+
'''
|
| 167 |
+
|
| 168 |
+
x, y, alternative, method = _bws_input_validation(x, y, alternative,
|
| 169 |
+
method)
|
| 170 |
+
bws_statistic = partial(_bws_statistic, alternative=alternative)
|
| 171 |
+
|
| 172 |
+
permutation_alternative = 'less' if alternative == 'less' else 'greater'
|
| 173 |
+
res = stats.permutation_test((x, y), bws_statistic,
|
| 174 |
+
alternative=permutation_alternative,
|
| 175 |
+
**method._asdict())
|
| 176 |
+
|
| 177 |
+
return res
|
parrot/lib/python3.10/site-packages/scipy/stats/_censored_data.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def _validate_1d(a, name, allow_inf=False):
|
| 5 |
+
if np.ndim(a) != 1:
|
| 6 |
+
raise ValueError(f'`{name}` must be a one-dimensional sequence.')
|
| 7 |
+
if np.isnan(a).any():
|
| 8 |
+
raise ValueError(f'`{name}` must not contain nan.')
|
| 9 |
+
if not allow_inf and np.isinf(a).any():
|
| 10 |
+
raise ValueError(f'`{name}` must contain only finite values.')
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _validate_interval(interval):
|
| 14 |
+
interval = np.asarray(interval)
|
| 15 |
+
if interval.shape == (0,):
|
| 16 |
+
# The input was a sequence with length 0.
|
| 17 |
+
interval = interval.reshape((0, 2))
|
| 18 |
+
if interval.ndim != 2 or interval.shape[-1] != 2:
|
| 19 |
+
raise ValueError('`interval` must be a two-dimensional array with '
|
| 20 |
+
'shape (m, 2), where m is the number of '
|
| 21 |
+
'interval-censored values, but got shape '
|
| 22 |
+
f'{interval.shape}')
|
| 23 |
+
|
| 24 |
+
if np.isnan(interval).any():
|
| 25 |
+
raise ValueError('`interval` must not contain nan.')
|
| 26 |
+
if np.isinf(interval).all(axis=1).any():
|
| 27 |
+
raise ValueError('In each row in `interval`, both values must not'
|
| 28 |
+
' be infinite.')
|
| 29 |
+
if (interval[:, 0] > interval[:, 1]).any():
|
| 30 |
+
raise ValueError('In each row of `interval`, the left value must not'
|
| 31 |
+
' exceed the right value.')
|
| 32 |
+
|
| 33 |
+
uncensored_mask = interval[:, 0] == interval[:, 1]
|
| 34 |
+
left_mask = np.isinf(interval[:, 0])
|
| 35 |
+
right_mask = np.isinf(interval[:, 1])
|
| 36 |
+
interval_mask = np.isfinite(interval).all(axis=1) & ~uncensored_mask
|
| 37 |
+
|
| 38 |
+
uncensored2 = interval[uncensored_mask, 0]
|
| 39 |
+
left2 = interval[left_mask, 1]
|
| 40 |
+
right2 = interval[right_mask, 0]
|
| 41 |
+
interval2 = interval[interval_mask]
|
| 42 |
+
|
| 43 |
+
return uncensored2, left2, right2, interval2
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _validate_x_censored(x, censored):
|
| 47 |
+
x = np.asarray(x)
|
| 48 |
+
if x.ndim != 1:
|
| 49 |
+
raise ValueError('`x` must be one-dimensional.')
|
| 50 |
+
censored = np.asarray(censored)
|
| 51 |
+
if censored.ndim != 1:
|
| 52 |
+
raise ValueError('`censored` must be one-dimensional.')
|
| 53 |
+
if (~np.isfinite(x)).any():
|
| 54 |
+
raise ValueError('`x` must not contain nan or inf.')
|
| 55 |
+
if censored.size != x.size:
|
| 56 |
+
raise ValueError('`x` and `censored` must have the same length.')
|
| 57 |
+
return x, censored.astype(bool)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class CensoredData:
|
| 61 |
+
"""
|
| 62 |
+
Instances of this class represent censored data.
|
| 63 |
+
|
| 64 |
+
Instances may be passed to the ``fit`` method of continuous
|
| 65 |
+
univariate SciPy distributions for maximum likelihood estimation.
|
| 66 |
+
The *only* method of the univariate continuous distributions that
|
| 67 |
+
understands `CensoredData` is the ``fit`` method. An instance of
|
| 68 |
+
`CensoredData` can not be passed to methods such as ``pdf`` and
|
| 69 |
+
``cdf``.
|
| 70 |
+
|
| 71 |
+
An observation is said to be *censored* when the precise value is unknown,
|
| 72 |
+
but it has a known upper and/or lower bound. The conventional terminology
|
| 73 |
+
is:
|
| 74 |
+
|
| 75 |
+
* left-censored: an observation is below a certain value but it is
|
| 76 |
+
unknown by how much.
|
| 77 |
+
* right-censored: an observation is above a certain value but it is
|
| 78 |
+
unknown by how much.
|
| 79 |
+
* interval-censored: an observation lies somewhere on an interval between
|
| 80 |
+
two values.
|
| 81 |
+
|
| 82 |
+
Left-, right-, and interval-censored data can be represented by
|
| 83 |
+
`CensoredData`.
|
| 84 |
+
|
| 85 |
+
For convenience, the class methods ``left_censored`` and
|
| 86 |
+
``right_censored`` are provided to create a `CensoredData`
|
| 87 |
+
instance from a single one-dimensional array of measurements
|
| 88 |
+
and a corresponding boolean array to indicate which measurements
|
| 89 |
+
are censored. The class method ``interval_censored`` accepts two
|
| 90 |
+
one-dimensional arrays that hold the lower and upper bounds of the
|
| 91 |
+
intervals.
|
| 92 |
+
|
| 93 |
+
Parameters
|
| 94 |
+
----------
|
| 95 |
+
uncensored : array_like, 1D
|
| 96 |
+
Uncensored observations.
|
| 97 |
+
left : array_like, 1D
|
| 98 |
+
Left-censored observations.
|
| 99 |
+
right : array_like, 1D
|
| 100 |
+
Right-censored observations.
|
| 101 |
+
interval : array_like, 2D, with shape (m, 2)
|
| 102 |
+
Interval-censored observations. Each row ``interval[k, :]``
|
| 103 |
+
represents the interval for the kth interval-censored observation.
|
| 104 |
+
|
| 105 |
+
Notes
|
| 106 |
+
-----
|
| 107 |
+
In the input array `interval`, the lower bound of the interval may
|
| 108 |
+
be ``-inf``, and the upper bound may be ``inf``, but at least one must be
|
| 109 |
+
finite. When the lower bound is ``-inf``, the row represents a left-
|
| 110 |
+
censored observation, and when the upper bound is ``inf``, the row
|
| 111 |
+
represents a right-censored observation. If the length of an interval
|
| 112 |
+
is 0 (i.e. ``interval[k, 0] == interval[k, 1]``, the observation is
|
| 113 |
+
treated as uncensored. So one can represent all the types of censored
|
| 114 |
+
and uncensored data in ``interval``, but it is generally more convenient
|
| 115 |
+
to use `uncensored`, `left` and `right` for uncensored, left-censored and
|
| 116 |
+
right-censored observations, respectively.
|
| 117 |
+
|
| 118 |
+
Examples
|
| 119 |
+
--------
|
| 120 |
+
In the most general case, a censored data set may contain values that
|
| 121 |
+
are left-censored, right-censored, interval-censored, and uncensored.
|
| 122 |
+
For example, here we create a data set with five observations. Two
|
| 123 |
+
are uncensored (values 1 and 1.5), one is a left-censored observation
|
| 124 |
+
of 0, one is a right-censored observation of 10 and one is
|
| 125 |
+
interval-censored in the interval [2, 3].
|
| 126 |
+
|
| 127 |
+
>>> import numpy as np
|
| 128 |
+
>>> from scipy.stats import CensoredData
|
| 129 |
+
>>> data = CensoredData(uncensored=[1, 1.5], left=[0], right=[10],
|
| 130 |
+
... interval=[[2, 3]])
|
| 131 |
+
>>> print(data)
|
| 132 |
+
CensoredData(5 values: 2 not censored, 1 left-censored,
|
| 133 |
+
1 right-censored, 1 interval-censored)
|
| 134 |
+
|
| 135 |
+
Equivalently,
|
| 136 |
+
|
| 137 |
+
>>> data = CensoredData(interval=[[1, 1],
|
| 138 |
+
... [1.5, 1.5],
|
| 139 |
+
... [-np.inf, 0],
|
| 140 |
+
... [10, np.inf],
|
| 141 |
+
... [2, 3]])
|
| 142 |
+
>>> print(data)
|
| 143 |
+
CensoredData(5 values: 2 not censored, 1 left-censored,
|
| 144 |
+
1 right-censored, 1 interval-censored)
|
| 145 |
+
|
| 146 |
+
A common case is to have a mix of uncensored observations and censored
|
| 147 |
+
observations that are all right-censored (or all left-censored). For
|
| 148 |
+
example, consider an experiment in which six devices are started at
|
| 149 |
+
various times and left running until they fail. Assume that time is
|
| 150 |
+
measured in hours, and the experiment is stopped after 30 hours, even
|
| 151 |
+
if all the devices have not failed by that time. We might end up with
|
| 152 |
+
data such as this::
|
| 153 |
+
|
| 154 |
+
Device Start-time Fail-time Time-to-failure
|
| 155 |
+
1 0 13 13
|
| 156 |
+
2 2 24 22
|
| 157 |
+
3 5 22 17
|
| 158 |
+
4 8 23 15
|
| 159 |
+
5 10 *** >20
|
| 160 |
+
6 12 *** >18
|
| 161 |
+
|
| 162 |
+
Two of the devices had not failed when the experiment was stopped;
|
| 163 |
+
the observations of the time-to-failure for these two devices are
|
| 164 |
+
right-censored. We can represent this data with
|
| 165 |
+
|
| 166 |
+
>>> data = CensoredData(uncensored=[13, 22, 17, 15], right=[20, 18])
|
| 167 |
+
>>> print(data)
|
| 168 |
+
CensoredData(6 values: 4 not censored, 2 right-censored)
|
| 169 |
+
|
| 170 |
+
Alternatively, we can use the method `CensoredData.right_censored` to
|
| 171 |
+
create a representation of this data. The time-to-failure observations
|
| 172 |
+
are put the list ``ttf``. The ``censored`` list indicates which values
|
| 173 |
+
in ``ttf`` are censored.
|
| 174 |
+
|
| 175 |
+
>>> ttf = [13, 22, 17, 15, 20, 18]
|
| 176 |
+
>>> censored = [False, False, False, False, True, True]
|
| 177 |
+
|
| 178 |
+
Pass these lists to `CensoredData.right_censored` to create an
|
| 179 |
+
instance of `CensoredData`.
|
| 180 |
+
|
| 181 |
+
>>> data = CensoredData.right_censored(ttf, censored)
|
| 182 |
+
>>> print(data)
|
| 183 |
+
CensoredData(6 values: 4 not censored, 2 right-censored)
|
| 184 |
+
|
| 185 |
+
If the input data is interval censored and already stored in two
|
| 186 |
+
arrays, one holding the low end of the intervals and another
|
| 187 |
+
holding the high ends, the class method ``interval_censored`` can
|
| 188 |
+
be used to create the `CensoredData` instance.
|
| 189 |
+
|
| 190 |
+
This example creates an instance with four interval-censored values.
|
| 191 |
+
The intervals are [10, 11], [0.5, 1], [2, 3], and [12.5, 13.5].
|
| 192 |
+
|
| 193 |
+
>>> a = [10, 0.5, 2, 12.5] # Low ends of the intervals
|
| 194 |
+
>>> b = [11, 1.0, 3, 13.5] # High ends of the intervals
|
| 195 |
+
>>> data = CensoredData.interval_censored(low=a, high=b)
|
| 196 |
+
>>> print(data)
|
| 197 |
+
CensoredData(4 values: 0 not censored, 4 interval-censored)
|
| 198 |
+
|
| 199 |
+
Finally, we create and censor some data from the `weibull_min`
|
| 200 |
+
distribution, and then fit `weibull_min` to that data. We'll assume
|
| 201 |
+
that the location parameter is known to be 0.
|
| 202 |
+
|
| 203 |
+
>>> from scipy.stats import weibull_min
|
| 204 |
+
>>> rng = np.random.default_rng()
|
| 205 |
+
|
| 206 |
+
Create the random data set.
|
| 207 |
+
|
| 208 |
+
>>> x = weibull_min.rvs(2.5, loc=0, scale=30, size=250, random_state=rng)
|
| 209 |
+
>>> x[x > 40] = 40 # Right-censor values greater or equal to 40.
|
| 210 |
+
|
| 211 |
+
Create the `CensoredData` instance with the `right_censored` method.
|
| 212 |
+
The censored values are those where the value is 40.
|
| 213 |
+
|
| 214 |
+
>>> data = CensoredData.right_censored(x, x == 40)
|
| 215 |
+
>>> print(data)
|
| 216 |
+
CensoredData(250 values: 215 not censored, 35 right-censored)
|
| 217 |
+
|
| 218 |
+
35 values have been right-censored.
|
| 219 |
+
|
| 220 |
+
Fit `weibull_min` to the censored data. We expect to shape and scale
|
| 221 |
+
to be approximately 2.5 and 30, respectively.
|
| 222 |
+
|
| 223 |
+
>>> weibull_min.fit(data, floc=0)
|
| 224 |
+
(2.3575922823897315, 0, 30.40650074451254)
|
| 225 |
+
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
def __init__(self, uncensored=None, *, left=None, right=None,
|
| 229 |
+
interval=None):
|
| 230 |
+
if uncensored is None:
|
| 231 |
+
uncensored = []
|
| 232 |
+
if left is None:
|
| 233 |
+
left = []
|
| 234 |
+
if right is None:
|
| 235 |
+
right = []
|
| 236 |
+
if interval is None:
|
| 237 |
+
interval = np.empty((0, 2))
|
| 238 |
+
|
| 239 |
+
_validate_1d(uncensored, 'uncensored')
|
| 240 |
+
_validate_1d(left, 'left')
|
| 241 |
+
_validate_1d(right, 'right')
|
| 242 |
+
uncensored2, left2, right2, interval2 = _validate_interval(interval)
|
| 243 |
+
|
| 244 |
+
self._uncensored = np.concatenate((uncensored, uncensored2))
|
| 245 |
+
self._left = np.concatenate((left, left2))
|
| 246 |
+
self._right = np.concatenate((right, right2))
|
| 247 |
+
# Note that by construction, the private attribute _interval
|
| 248 |
+
# will be a 2D array that contains only finite values representing
|
| 249 |
+
# intervals with nonzero but finite length.
|
| 250 |
+
self._interval = interval2
|
| 251 |
+
|
| 252 |
+
def __repr__(self):
|
| 253 |
+
uncensored_str = " ".join(np.array_repr(self._uncensored).split())
|
| 254 |
+
left_str = " ".join(np.array_repr(self._left).split())
|
| 255 |
+
right_str = " ".join(np.array_repr(self._right).split())
|
| 256 |
+
interval_str = " ".join(np.array_repr(self._interval).split())
|
| 257 |
+
return (f"CensoredData(uncensored={uncensored_str}, left={left_str}, "
|
| 258 |
+
f"right={right_str}, interval={interval_str})")
|
| 259 |
+
|
| 260 |
+
def __str__(self):
|
| 261 |
+
num_nc = len(self._uncensored)
|
| 262 |
+
num_lc = len(self._left)
|
| 263 |
+
num_rc = len(self._right)
|
| 264 |
+
num_ic = len(self._interval)
|
| 265 |
+
n = num_nc + num_lc + num_rc + num_ic
|
| 266 |
+
parts = [f'{num_nc} not censored']
|
| 267 |
+
if num_lc > 0:
|
| 268 |
+
parts.append(f'{num_lc} left-censored')
|
| 269 |
+
if num_rc > 0:
|
| 270 |
+
parts.append(f'{num_rc} right-censored')
|
| 271 |
+
if num_ic > 0:
|
| 272 |
+
parts.append(f'{num_ic} interval-censored')
|
| 273 |
+
return f'CensoredData({n} values: ' + ', '.join(parts) + ')'
|
| 274 |
+
|
| 275 |
+
# This is not a complete implementation of the arithmetic operators.
|
| 276 |
+
# All we need is subtracting a scalar and dividing by a scalar.
|
| 277 |
+
|
| 278 |
+
def __sub__(self, other):
|
| 279 |
+
return CensoredData(uncensored=self._uncensored - other,
|
| 280 |
+
left=self._left - other,
|
| 281 |
+
right=self._right - other,
|
| 282 |
+
interval=self._interval - other)
|
| 283 |
+
|
| 284 |
+
def __truediv__(self, other):
|
| 285 |
+
return CensoredData(uncensored=self._uncensored / other,
|
| 286 |
+
left=self._left / other,
|
| 287 |
+
right=self._right / other,
|
| 288 |
+
interval=self._interval / other)
|
| 289 |
+
|
| 290 |
+
def __len__(self):
|
| 291 |
+
"""
|
| 292 |
+
The number of values (censored and not censored).
|
| 293 |
+
"""
|
| 294 |
+
return (len(self._uncensored) + len(self._left) + len(self._right)
|
| 295 |
+
+ len(self._interval))
|
| 296 |
+
|
| 297 |
+
def num_censored(self):
|
| 298 |
+
"""
|
| 299 |
+
Number of censored values.
|
| 300 |
+
"""
|
| 301 |
+
return len(self._left) + len(self._right) + len(self._interval)
|
| 302 |
+
|
| 303 |
+
@classmethod
|
| 304 |
+
def right_censored(cls, x, censored):
|
| 305 |
+
"""
|
| 306 |
+
Create a `CensoredData` instance of right-censored data.
|
| 307 |
+
|
| 308 |
+
Parameters
|
| 309 |
+
----------
|
| 310 |
+
x : array_like
|
| 311 |
+
`x` is the array of observed data or measurements.
|
| 312 |
+
`x` must be a one-dimensional sequence of finite numbers.
|
| 313 |
+
censored : array_like of bool
|
| 314 |
+
`censored` must be a one-dimensional sequence of boolean
|
| 315 |
+
values. If ``censored[k]`` is True, the corresponding value
|
| 316 |
+
in `x` is right-censored. That is, the value ``x[k]``
|
| 317 |
+
is the lower bound of the true (but unknown) value.
|
| 318 |
+
|
| 319 |
+
Returns
|
| 320 |
+
-------
|
| 321 |
+
data : `CensoredData`
|
| 322 |
+
An instance of `CensoredData` that represents the
|
| 323 |
+
collection of uncensored and right-censored values.
|
| 324 |
+
|
| 325 |
+
Examples
|
| 326 |
+
--------
|
| 327 |
+
>>> from scipy.stats import CensoredData
|
| 328 |
+
|
| 329 |
+
Two uncensored values (4 and 10) and two right-censored values
|
| 330 |
+
(24 and 25).
|
| 331 |
+
|
| 332 |
+
>>> data = CensoredData.right_censored([4, 10, 24, 25],
|
| 333 |
+
... [False, False, True, True])
|
| 334 |
+
>>> data
|
| 335 |
+
CensoredData(uncensored=array([ 4., 10.]),
|
| 336 |
+
left=array([], dtype=float64), right=array([24., 25.]),
|
| 337 |
+
interval=array([], shape=(0, 2), dtype=float64))
|
| 338 |
+
>>> print(data)
|
| 339 |
+
CensoredData(4 values: 2 not censored, 2 right-censored)
|
| 340 |
+
"""
|
| 341 |
+
x, censored = _validate_x_censored(x, censored)
|
| 342 |
+
return cls(uncensored=x[~censored], right=x[censored])
|
| 343 |
+
|
| 344 |
+
@classmethod
|
| 345 |
+
def left_censored(cls, x, censored):
|
| 346 |
+
"""
|
| 347 |
+
Create a `CensoredData` instance of left-censored data.
|
| 348 |
+
|
| 349 |
+
Parameters
|
| 350 |
+
----------
|
| 351 |
+
x : array_like
|
| 352 |
+
`x` is the array of observed data or measurements.
|
| 353 |
+
`x` must be a one-dimensional sequence of finite numbers.
|
| 354 |
+
censored : array_like of bool
|
| 355 |
+
`censored` must be a one-dimensional sequence of boolean
|
| 356 |
+
values. If ``censored[k]`` is True, the corresponding value
|
| 357 |
+
in `x` is left-censored. That is, the value ``x[k]``
|
| 358 |
+
is the upper bound of the true (but unknown) value.
|
| 359 |
+
|
| 360 |
+
Returns
|
| 361 |
+
-------
|
| 362 |
+
data : `CensoredData`
|
| 363 |
+
An instance of `CensoredData` that represents the
|
| 364 |
+
collection of uncensored and left-censored values.
|
| 365 |
+
|
| 366 |
+
Examples
|
| 367 |
+
--------
|
| 368 |
+
>>> from scipy.stats import CensoredData
|
| 369 |
+
|
| 370 |
+
Two uncensored values (0.12 and 0.033) and two left-censored values
|
| 371 |
+
(both 1e-3).
|
| 372 |
+
|
| 373 |
+
>>> data = CensoredData.left_censored([0.12, 0.033, 1e-3, 1e-3],
|
| 374 |
+
... [False, False, True, True])
|
| 375 |
+
>>> data
|
| 376 |
+
CensoredData(uncensored=array([0.12 , 0.033]),
|
| 377 |
+
left=array([0.001, 0.001]), right=array([], dtype=float64),
|
| 378 |
+
interval=array([], shape=(0, 2), dtype=float64))
|
| 379 |
+
>>> print(data)
|
| 380 |
+
CensoredData(4 values: 2 not censored, 2 left-censored)
|
| 381 |
+
"""
|
| 382 |
+
x, censored = _validate_x_censored(x, censored)
|
| 383 |
+
return cls(uncensored=x[~censored], left=x[censored])
|
| 384 |
+
|
| 385 |
+
@classmethod
|
| 386 |
+
def interval_censored(cls, low, high):
|
| 387 |
+
"""
|
| 388 |
+
Create a `CensoredData` instance of interval-censored data.
|
| 389 |
+
|
| 390 |
+
This method is useful when all the data is interval-censored, and
|
| 391 |
+
the low and high ends of the intervals are already stored in
|
| 392 |
+
separate one-dimensional arrays.
|
| 393 |
+
|
| 394 |
+
Parameters
|
| 395 |
+
----------
|
| 396 |
+
low : array_like
|
| 397 |
+
The one-dimensional array containing the low ends of the
|
| 398 |
+
intervals.
|
| 399 |
+
high : array_like
|
| 400 |
+
The one-dimensional array containing the high ends of the
|
| 401 |
+
intervals.
|
| 402 |
+
|
| 403 |
+
Returns
|
| 404 |
+
-------
|
| 405 |
+
data : `CensoredData`
|
| 406 |
+
An instance of `CensoredData` that represents the
|
| 407 |
+
collection of censored values.
|
| 408 |
+
|
| 409 |
+
Examples
|
| 410 |
+
--------
|
| 411 |
+
>>> import numpy as np
|
| 412 |
+
>>> from scipy.stats import CensoredData
|
| 413 |
+
|
| 414 |
+
``a`` and ``b`` are the low and high ends of a collection of
|
| 415 |
+
interval-censored values.
|
| 416 |
+
|
| 417 |
+
>>> a = [0.5, 2.0, 3.0, 5.5]
|
| 418 |
+
>>> b = [1.0, 2.5, 3.5, 7.0]
|
| 419 |
+
>>> data = CensoredData.interval_censored(low=a, high=b)
|
| 420 |
+
>>> print(data)
|
| 421 |
+
CensoredData(4 values: 0 not censored, 4 interval-censored)
|
| 422 |
+
"""
|
| 423 |
+
_validate_1d(low, 'low', allow_inf=True)
|
| 424 |
+
_validate_1d(high, 'high', allow_inf=True)
|
| 425 |
+
if len(low) != len(high):
|
| 426 |
+
raise ValueError('`low` and `high` must have the same length.')
|
| 427 |
+
interval = np.column_stack((low, high))
|
| 428 |
+
uncensored, left, right, interval = _validate_interval(interval)
|
| 429 |
+
return cls(uncensored=uncensored, left=left, right=right,
|
| 430 |
+
interval=interval)
|
| 431 |
+
|
| 432 |
+
def _uncensor(self):
|
| 433 |
+
"""
|
| 434 |
+
This function is used when a non-censored version of the data
|
| 435 |
+
is needed to create a rough estimate of the parameters of a
|
| 436 |
+
distribution via the method of moments or some similar method.
|
| 437 |
+
The data is "uncensored" by taking the given endpoints as the
|
| 438 |
+
data for the left- or right-censored data, and the mean for the
|
| 439 |
+
interval-censored data.
|
| 440 |
+
"""
|
| 441 |
+
data = np.concatenate((self._uncensored, self._left, self._right,
|
| 442 |
+
self._interval.mean(axis=1)))
|
| 443 |
+
return data
|
| 444 |
+
|
| 445 |
+
def _supported(self, a, b):
|
| 446 |
+
"""
|
| 447 |
+
Return a subset of self containing the values that are in
|
| 448 |
+
(or overlap with) the interval (a, b).
|
| 449 |
+
"""
|
| 450 |
+
uncensored = self._uncensored
|
| 451 |
+
uncensored = uncensored[(a < uncensored) & (uncensored < b)]
|
| 452 |
+
left = self._left
|
| 453 |
+
left = left[a < left]
|
| 454 |
+
right = self._right
|
| 455 |
+
right = right[right < b]
|
| 456 |
+
interval = self._interval
|
| 457 |
+
interval = interval[(a < interval[:, 1]) & (interval[:, 0] < b)]
|
| 458 |
+
return CensoredData(uncensored, left=left, right=right,
|
| 459 |
+
interval=interval)
|
parrot/lib/python3.10/site-packages/scipy/stats/_crosstab.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy.sparse import coo_matrix
|
| 3 |
+
from scipy._lib._bunch import _make_tuple_bunch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
CrosstabResult = _make_tuple_bunch(
|
| 7 |
+
"CrosstabResult", ["elements", "count"]
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def crosstab(*args, levels=None, sparse=False):
|
| 12 |
+
"""
|
| 13 |
+
Return table of counts for each possible unique combination in ``*args``.
|
| 14 |
+
|
| 15 |
+
When ``len(args) > 1``, the array computed by this function is
|
| 16 |
+
often referred to as a *contingency table* [1]_.
|
| 17 |
+
|
| 18 |
+
The arguments must be sequences with the same length. The second return
|
| 19 |
+
value, `count`, is an integer array with ``len(args)`` dimensions. If
|
| 20 |
+
`levels` is None, the shape of `count` is ``(n0, n1, ...)``, where ``nk``
|
| 21 |
+
is the number of unique elements in ``args[k]``.
|
| 22 |
+
|
| 23 |
+
Parameters
|
| 24 |
+
----------
|
| 25 |
+
*args : sequences
|
| 26 |
+
A sequence of sequences whose unique aligned elements are to be
|
| 27 |
+
counted. The sequences in args must all be the same length.
|
| 28 |
+
levels : sequence, optional
|
| 29 |
+
If `levels` is given, it must be a sequence that is the same length as
|
| 30 |
+
`args`. Each element in `levels` is either a sequence or None. If it
|
| 31 |
+
is a sequence, it gives the values in the corresponding sequence in
|
| 32 |
+
`args` that are to be counted. If any value in the sequences in `args`
|
| 33 |
+
does not occur in the corresponding sequence in `levels`, that value
|
| 34 |
+
is ignored and not counted in the returned array `count`. The default
|
| 35 |
+
value of `levels` for ``args[i]`` is ``np.unique(args[i])``
|
| 36 |
+
sparse : bool, optional
|
| 37 |
+
If True, return a sparse matrix. The matrix will be an instance of
|
| 38 |
+
the `scipy.sparse.coo_matrix` class. Because SciPy's sparse matrices
|
| 39 |
+
must be 2-d, only two input sequences are allowed when `sparse` is
|
| 40 |
+
True. Default is False.
|
| 41 |
+
|
| 42 |
+
Returns
|
| 43 |
+
-------
|
| 44 |
+
res : CrosstabResult
|
| 45 |
+
An object containing the following attributes:
|
| 46 |
+
|
| 47 |
+
elements : tuple of numpy.ndarrays.
|
| 48 |
+
Tuple of length ``len(args)`` containing the arrays of elements
|
| 49 |
+
that are counted in `count`. These can be interpreted as the
|
| 50 |
+
labels of the corresponding dimensions of `count`. If `levels` was
|
| 51 |
+
given, then if ``levels[i]`` is not None, ``elements[i]`` will
|
| 52 |
+
hold the values given in ``levels[i]``.
|
| 53 |
+
count : numpy.ndarray or scipy.sparse.coo_matrix
|
| 54 |
+
Counts of the unique elements in ``zip(*args)``, stored in an
|
| 55 |
+
array. Also known as a *contingency table* when ``len(args) > 1``.
|
| 56 |
+
|
| 57 |
+
See Also
|
| 58 |
+
--------
|
| 59 |
+
numpy.unique
|
| 60 |
+
|
| 61 |
+
Notes
|
| 62 |
+
-----
|
| 63 |
+
.. versionadded:: 1.7.0
|
| 64 |
+
|
| 65 |
+
References
|
| 66 |
+
----------
|
| 67 |
+
.. [1] "Contingency table", http://en.wikipedia.org/wiki/Contingency_table
|
| 68 |
+
|
| 69 |
+
Examples
|
| 70 |
+
--------
|
| 71 |
+
>>> from scipy.stats.contingency import crosstab
|
| 72 |
+
|
| 73 |
+
Given the lists `a` and `x`, create a contingency table that counts the
|
| 74 |
+
frequencies of the corresponding pairs.
|
| 75 |
+
|
| 76 |
+
>>> a = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']
|
| 77 |
+
>>> x = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']
|
| 78 |
+
>>> res = crosstab(a, x)
|
| 79 |
+
>>> avals, xvals = res.elements
|
| 80 |
+
>>> avals
|
| 81 |
+
array(['A', 'B'], dtype='<U1')
|
| 82 |
+
>>> xvals
|
| 83 |
+
array(['X', 'Y', 'Z'], dtype='<U1')
|
| 84 |
+
>>> res.count
|
| 85 |
+
array([[2, 3, 0],
|
| 86 |
+
[1, 0, 4]])
|
| 87 |
+
|
| 88 |
+
So `('A', 'X')` occurs twice, `('A', 'Y')` occurs three times, etc.
|
| 89 |
+
|
| 90 |
+
Higher dimensional contingency tables can be created.
|
| 91 |
+
|
| 92 |
+
>>> p = [0, 0, 0, 0, 1, 1, 1, 0, 0, 1]
|
| 93 |
+
>>> res = crosstab(a, x, p)
|
| 94 |
+
>>> res.count
|
| 95 |
+
array([[[2, 0],
|
| 96 |
+
[2, 1],
|
| 97 |
+
[0, 0]],
|
| 98 |
+
[[1, 0],
|
| 99 |
+
[0, 0],
|
| 100 |
+
[1, 3]]])
|
| 101 |
+
>>> res.count.shape
|
| 102 |
+
(2, 3, 2)
|
| 103 |
+
|
| 104 |
+
The values to be counted can be set by using the `levels` argument.
|
| 105 |
+
It allows the elements of interest in each input sequence to be
|
| 106 |
+
given explicitly instead finding the unique elements of the sequence.
|
| 107 |
+
|
| 108 |
+
For example, suppose one of the arguments is an array containing the
|
| 109 |
+
answers to a survey question, with integer values 1 to 4. Even if the
|
| 110 |
+
value 1 does not occur in the data, we want an entry for it in the table.
|
| 111 |
+
|
| 112 |
+
>>> q1 = [2, 3, 3, 2, 4, 4, 2, 3, 4, 4, 4, 3, 3, 3, 4] # 1 does not occur.
|
| 113 |
+
>>> q2 = [4, 4, 2, 2, 2, 4, 1, 1, 2, 2, 4, 2, 2, 2, 4] # 3 does not occur.
|
| 114 |
+
>>> options = [1, 2, 3, 4]
|
| 115 |
+
>>> res = crosstab(q1, q2, levels=(options, options))
|
| 116 |
+
>>> res.count
|
| 117 |
+
array([[0, 0, 0, 0],
|
| 118 |
+
[1, 1, 0, 1],
|
| 119 |
+
[1, 4, 0, 1],
|
| 120 |
+
[0, 3, 0, 3]])
|
| 121 |
+
|
| 122 |
+
If `levels` is given, but an element of `levels` is None, the unique values
|
| 123 |
+
of the corresponding argument are used. For example,
|
| 124 |
+
|
| 125 |
+
>>> res = crosstab(q1, q2, levels=(None, options))
|
| 126 |
+
>>> res.elements
|
| 127 |
+
[array([2, 3, 4]), [1, 2, 3, 4]]
|
| 128 |
+
>>> res.count
|
| 129 |
+
array([[1, 1, 0, 1],
|
| 130 |
+
[1, 4, 0, 1],
|
| 131 |
+
[0, 3, 0, 3]])
|
| 132 |
+
|
| 133 |
+
If we want to ignore the pairs where 4 occurs in ``q2``, we can
|
| 134 |
+
give just the values [1, 2] to `levels`, and the 4 will be ignored:
|
| 135 |
+
|
| 136 |
+
>>> res = crosstab(q1, q2, levels=(None, [1, 2]))
|
| 137 |
+
>>> res.elements
|
| 138 |
+
[array([2, 3, 4]), [1, 2]]
|
| 139 |
+
>>> res.count
|
| 140 |
+
array([[1, 1],
|
| 141 |
+
[1, 4],
|
| 142 |
+
[0, 3]])
|
| 143 |
+
|
| 144 |
+
Finally, let's repeat the first example, but return a sparse matrix:
|
| 145 |
+
|
| 146 |
+
>>> res = crosstab(a, x, sparse=True)
|
| 147 |
+
>>> res.count
|
| 148 |
+
<COOrdinate sparse matrix of dtype 'int64'
|
| 149 |
+
with 4 stored elements and shape (2, 3)>
|
| 150 |
+
>>> res.count.toarray()
|
| 151 |
+
array([[2, 3, 0],
|
| 152 |
+
[1, 0, 4]])
|
| 153 |
+
|
| 154 |
+
"""
|
| 155 |
+
nargs = len(args)
|
| 156 |
+
if nargs == 0:
|
| 157 |
+
raise TypeError("At least one input sequence is required.")
|
| 158 |
+
|
| 159 |
+
len0 = len(args[0])
|
| 160 |
+
if not all(len(a) == len0 for a in args[1:]):
|
| 161 |
+
raise ValueError("All input sequences must have the same length.")
|
| 162 |
+
|
| 163 |
+
if sparse and nargs != 2:
|
| 164 |
+
raise ValueError("When `sparse` is True, only two input sequences "
|
| 165 |
+
"are allowed.")
|
| 166 |
+
|
| 167 |
+
if levels is None:
|
| 168 |
+
# Call np.unique with return_inverse=True on each argument.
|
| 169 |
+
actual_levels, indices = zip(*[np.unique(a, return_inverse=True)
|
| 170 |
+
for a in args])
|
| 171 |
+
else:
|
| 172 |
+
# `levels` is not None...
|
| 173 |
+
if len(levels) != nargs:
|
| 174 |
+
raise ValueError('len(levels) must equal the number of input '
|
| 175 |
+
'sequences')
|
| 176 |
+
|
| 177 |
+
args = [np.asarray(arg) for arg in args]
|
| 178 |
+
mask = np.zeros((nargs, len0), dtype=np.bool_)
|
| 179 |
+
inv = np.zeros((nargs, len0), dtype=np.intp)
|
| 180 |
+
actual_levels = []
|
| 181 |
+
for k, (levels_list, arg) in enumerate(zip(levels, args)):
|
| 182 |
+
if levels_list is None:
|
| 183 |
+
levels_list, inv[k, :] = np.unique(arg, return_inverse=True)
|
| 184 |
+
mask[k, :] = True
|
| 185 |
+
else:
|
| 186 |
+
q = arg == np.asarray(levels_list).reshape(-1, 1)
|
| 187 |
+
mask[k, :] = np.any(q, axis=0)
|
| 188 |
+
qnz = q.T.nonzero()
|
| 189 |
+
inv[k, qnz[0]] = qnz[1]
|
| 190 |
+
actual_levels.append(levels_list)
|
| 191 |
+
|
| 192 |
+
mask_all = mask.all(axis=0)
|
| 193 |
+
indices = tuple(inv[:, mask_all])
|
| 194 |
+
|
| 195 |
+
if sparse:
|
| 196 |
+
count = coo_matrix((np.ones(len(indices[0]), dtype=int),
|
| 197 |
+
(indices[0], indices[1])))
|
| 198 |
+
count.sum_duplicates()
|
| 199 |
+
else:
|
| 200 |
+
shape = [len(u) for u in actual_levels]
|
| 201 |
+
count = np.zeros(shape, dtype=int)
|
| 202 |
+
np.add.at(count, indices, 1)
|
| 203 |
+
|
| 204 |
+
return CrosstabResult(actual_levels, count)
|
parrot/lib/python3.10/site-packages/scipy/stats/_discrete_distns.py
ADDED
|
@@ -0,0 +1,1922 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Author: Travis Oliphant 2002-2011 with contributions from
|
| 3 |
+
# SciPy Developers 2004-2011
|
| 4 |
+
#
|
| 5 |
+
from functools import partial
|
| 6 |
+
|
| 7 |
+
from scipy import special
|
| 8 |
+
from scipy.special import entr, logsumexp, betaln, gammaln as gamln, zeta
|
| 9 |
+
from scipy._lib._util import _lazywhere, rng_integers
|
| 10 |
+
from scipy.interpolate import interp1d
|
| 11 |
+
|
| 12 |
+
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
|
| 16 |
+
from ._distn_infrastructure import (rv_discrete, get_distribution_names,
|
| 17 |
+
_vectorize_rvs_over_shapes,
|
| 18 |
+
_ShapeInfo, _isintegral)
|
| 19 |
+
from ._biasedurn import (_PyFishersNCHypergeometric,
|
| 20 |
+
_PyWalleniusNCHypergeometric,
|
| 21 |
+
_PyStochasticLib3)
|
| 22 |
+
import scipy.special._ufuncs as scu
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class binom_gen(rv_discrete):
|
| 27 |
+
r"""A binomial discrete random variable.
|
| 28 |
+
|
| 29 |
+
%(before_notes)s
|
| 30 |
+
|
| 31 |
+
Notes
|
| 32 |
+
-----
|
| 33 |
+
The probability mass function for `binom` is:
|
| 34 |
+
|
| 35 |
+
.. math::
|
| 36 |
+
|
| 37 |
+
f(k) = \binom{n}{k} p^k (1-p)^{n-k}
|
| 38 |
+
|
| 39 |
+
for :math:`k \in \{0, 1, \dots, n\}`, :math:`0 \leq p \leq 1`
|
| 40 |
+
|
| 41 |
+
`binom` takes :math:`n` and :math:`p` as shape parameters,
|
| 42 |
+
where :math:`p` is the probability of a single success
|
| 43 |
+
and :math:`1-p` is the probability of a single failure.
|
| 44 |
+
|
| 45 |
+
%(after_notes)s
|
| 46 |
+
|
| 47 |
+
%(example)s
|
| 48 |
+
|
| 49 |
+
See Also
|
| 50 |
+
--------
|
| 51 |
+
hypergeom, nbinom, nhypergeom
|
| 52 |
+
|
| 53 |
+
"""
|
| 54 |
+
def _shape_info(self):
|
| 55 |
+
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
| 56 |
+
_ShapeInfo("p", False, (0, 1), (True, True))]
|
| 57 |
+
|
| 58 |
+
def _rvs(self, n, p, size=None, random_state=None):
|
| 59 |
+
return random_state.binomial(n, p, size)
|
| 60 |
+
|
| 61 |
+
def _argcheck(self, n, p):
|
| 62 |
+
return (n >= 0) & _isintegral(n) & (p >= 0) & (p <= 1)
|
| 63 |
+
|
| 64 |
+
def _get_support(self, n, p):
|
| 65 |
+
return self.a, n
|
| 66 |
+
|
| 67 |
+
def _logpmf(self, x, n, p):
|
| 68 |
+
k = floor(x)
|
| 69 |
+
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
|
| 70 |
+
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
|
| 71 |
+
|
| 72 |
+
def _pmf(self, x, n, p):
|
| 73 |
+
# binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
|
| 74 |
+
return scu._binom_pmf(x, n, p)
|
| 75 |
+
|
| 76 |
+
def _cdf(self, x, n, p):
|
| 77 |
+
k = floor(x)
|
| 78 |
+
return scu._binom_cdf(k, n, p)
|
| 79 |
+
|
| 80 |
+
def _sf(self, x, n, p):
|
| 81 |
+
k = floor(x)
|
| 82 |
+
return scu._binom_sf(k, n, p)
|
| 83 |
+
|
| 84 |
+
def _isf(self, x, n, p):
|
| 85 |
+
return scu._binom_isf(x, n, p)
|
| 86 |
+
|
| 87 |
+
def _ppf(self, q, n, p):
|
| 88 |
+
return scu._binom_ppf(q, n, p)
|
| 89 |
+
|
| 90 |
+
def _stats(self, n, p, moments='mv'):
|
| 91 |
+
mu = n * p
|
| 92 |
+
var = mu - n * np.square(p)
|
| 93 |
+
g1, g2 = None, None
|
| 94 |
+
if 's' in moments:
|
| 95 |
+
pq = p - np.square(p)
|
| 96 |
+
npq_sqrt = np.sqrt(n * pq)
|
| 97 |
+
t1 = np.reciprocal(npq_sqrt)
|
| 98 |
+
t2 = (2.0 * p) / npq_sqrt
|
| 99 |
+
g1 = t1 - t2
|
| 100 |
+
if 'k' in moments:
|
| 101 |
+
pq = p - np.square(p)
|
| 102 |
+
npq = n * pq
|
| 103 |
+
t1 = np.reciprocal(npq)
|
| 104 |
+
t2 = 6.0/n
|
| 105 |
+
g2 = t1 - t2
|
| 106 |
+
return mu, var, g1, g2
|
| 107 |
+
|
| 108 |
+
def _entropy(self, n, p):
|
| 109 |
+
k = np.r_[0:n + 1]
|
| 110 |
+
vals = self._pmf(k, n, p)
|
| 111 |
+
return np.sum(entr(vals), axis=0)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
binom = binom_gen(name='binom')
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class bernoulli_gen(binom_gen):
|
| 118 |
+
r"""A Bernoulli discrete random variable.
|
| 119 |
+
|
| 120 |
+
%(before_notes)s
|
| 121 |
+
|
| 122 |
+
Notes
|
| 123 |
+
-----
|
| 124 |
+
The probability mass function for `bernoulli` is:
|
| 125 |
+
|
| 126 |
+
.. math::
|
| 127 |
+
|
| 128 |
+
f(k) = \begin{cases}1-p &\text{if } k = 0\\
|
| 129 |
+
p &\text{if } k = 1\end{cases}
|
| 130 |
+
|
| 131 |
+
for :math:`k` in :math:`\{0, 1\}`, :math:`0 \leq p \leq 1`
|
| 132 |
+
|
| 133 |
+
`bernoulli` takes :math:`p` as shape parameter,
|
| 134 |
+
where :math:`p` is the probability of a single success
|
| 135 |
+
and :math:`1-p` is the probability of a single failure.
|
| 136 |
+
|
| 137 |
+
%(after_notes)s
|
| 138 |
+
|
| 139 |
+
%(example)s
|
| 140 |
+
|
| 141 |
+
"""
|
| 142 |
+
def _shape_info(self):
|
| 143 |
+
return [_ShapeInfo("p", False, (0, 1), (True, True))]
|
| 144 |
+
|
| 145 |
+
def _rvs(self, p, size=None, random_state=None):
|
| 146 |
+
return binom_gen._rvs(self, 1, p, size=size, random_state=random_state)
|
| 147 |
+
|
| 148 |
+
def _argcheck(self, p):
|
| 149 |
+
return (p >= 0) & (p <= 1)
|
| 150 |
+
|
| 151 |
+
def _get_support(self, p):
|
| 152 |
+
# Overrides binom_gen._get_support!x
|
| 153 |
+
return self.a, self.b
|
| 154 |
+
|
| 155 |
+
def _logpmf(self, x, p):
|
| 156 |
+
return binom._logpmf(x, 1, p)
|
| 157 |
+
|
| 158 |
+
def _pmf(self, x, p):
|
| 159 |
+
# bernoulli.pmf(k) = 1-p if k = 0
|
| 160 |
+
# = p if k = 1
|
| 161 |
+
return binom._pmf(x, 1, p)
|
| 162 |
+
|
| 163 |
+
def _cdf(self, x, p):
|
| 164 |
+
return binom._cdf(x, 1, p)
|
| 165 |
+
|
| 166 |
+
def _sf(self, x, p):
|
| 167 |
+
return binom._sf(x, 1, p)
|
| 168 |
+
|
| 169 |
+
def _isf(self, x, p):
|
| 170 |
+
return binom._isf(x, 1, p)
|
| 171 |
+
|
| 172 |
+
def _ppf(self, q, p):
|
| 173 |
+
return binom._ppf(q, 1, p)
|
| 174 |
+
|
| 175 |
+
def _stats(self, p):
|
| 176 |
+
return binom._stats(1, p)
|
| 177 |
+
|
| 178 |
+
def _entropy(self, p):
|
| 179 |
+
return entr(p) + entr(1-p)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
bernoulli = bernoulli_gen(b=1, name='bernoulli')
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class betabinom_gen(rv_discrete):
|
| 186 |
+
r"""A beta-binomial discrete random variable.
|
| 187 |
+
|
| 188 |
+
%(before_notes)s
|
| 189 |
+
|
| 190 |
+
Notes
|
| 191 |
+
-----
|
| 192 |
+
The beta-binomial distribution is a binomial distribution with a
|
| 193 |
+
probability of success `p` that follows a beta distribution.
|
| 194 |
+
|
| 195 |
+
The probability mass function for `betabinom` is:
|
| 196 |
+
|
| 197 |
+
.. math::
|
| 198 |
+
|
| 199 |
+
f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)}
|
| 200 |
+
|
| 201 |
+
for :math:`k \in \{0, 1, \dots, n\}`, :math:`n \geq 0`, :math:`a > 0`,
|
| 202 |
+
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
|
| 203 |
+
|
| 204 |
+
`betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
|
| 205 |
+
|
| 206 |
+
References
|
| 207 |
+
----------
|
| 208 |
+
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
|
| 209 |
+
|
| 210 |
+
%(after_notes)s
|
| 211 |
+
|
| 212 |
+
.. versionadded:: 1.4.0
|
| 213 |
+
|
| 214 |
+
See Also
|
| 215 |
+
--------
|
| 216 |
+
beta, binom
|
| 217 |
+
|
| 218 |
+
%(example)s
|
| 219 |
+
|
| 220 |
+
"""
|
| 221 |
+
def _shape_info(self):
|
| 222 |
+
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
| 223 |
+
_ShapeInfo("a", False, (0, np.inf), (False, False)),
|
| 224 |
+
_ShapeInfo("b", False, (0, np.inf), (False, False))]
|
| 225 |
+
|
| 226 |
+
def _rvs(self, n, a, b, size=None, random_state=None):
|
| 227 |
+
p = random_state.beta(a, b, size)
|
| 228 |
+
return random_state.binomial(n, p, size)
|
| 229 |
+
|
| 230 |
+
def _get_support(self, n, a, b):
|
| 231 |
+
return 0, n
|
| 232 |
+
|
| 233 |
+
def _argcheck(self, n, a, b):
|
| 234 |
+
return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0)
|
| 235 |
+
|
| 236 |
+
def _logpmf(self, x, n, a, b):
|
| 237 |
+
k = floor(x)
|
| 238 |
+
combiln = -log(n + 1) - betaln(n - k + 1, k + 1)
|
| 239 |
+
return combiln + betaln(k + a, n - k + b) - betaln(a, b)
|
| 240 |
+
|
| 241 |
+
def _pmf(self, x, n, a, b):
|
| 242 |
+
return exp(self._logpmf(x, n, a, b))
|
| 243 |
+
|
| 244 |
+
def _stats(self, n, a, b, moments='mv'):
|
| 245 |
+
e_p = a / (a + b)
|
| 246 |
+
e_q = 1 - e_p
|
| 247 |
+
mu = n * e_p
|
| 248 |
+
var = n * (a + b + n) * e_p * e_q / (a + b + 1)
|
| 249 |
+
g1, g2 = None, None
|
| 250 |
+
if 's' in moments:
|
| 251 |
+
g1 = 1.0 / sqrt(var)
|
| 252 |
+
g1 *= (a + b + 2 * n) * (b - a)
|
| 253 |
+
g1 /= (a + b + 2) * (a + b)
|
| 254 |
+
if 'k' in moments:
|
| 255 |
+
g2 = (a + b).astype(e_p.dtype)
|
| 256 |
+
g2 *= (a + b - 1 + 6 * n)
|
| 257 |
+
g2 += 3 * a * b * (n - 2)
|
| 258 |
+
g2 += 6 * n ** 2
|
| 259 |
+
g2 -= 3 * e_p * b * n * (6 - n)
|
| 260 |
+
g2 -= 18 * e_p * e_q * n ** 2
|
| 261 |
+
g2 *= (a + b) ** 2 * (1 + a + b)
|
| 262 |
+
g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n))
|
| 263 |
+
g2 -= 3
|
| 264 |
+
return mu, var, g1, g2
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
betabinom = betabinom_gen(name='betabinom')
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class nbinom_gen(rv_discrete):
|
| 271 |
+
r"""A negative binomial discrete random variable.
|
| 272 |
+
|
| 273 |
+
%(before_notes)s
|
| 274 |
+
|
| 275 |
+
Notes
|
| 276 |
+
-----
|
| 277 |
+
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
|
| 278 |
+
trials, repeated until a predefined, non-random number of successes occurs.
|
| 279 |
+
|
| 280 |
+
The probability mass function of the number of failures for `nbinom` is:
|
| 281 |
+
|
| 282 |
+
.. math::
|
| 283 |
+
|
| 284 |
+
f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k
|
| 285 |
+
|
| 286 |
+
for :math:`k \ge 0`, :math:`0 < p \leq 1`
|
| 287 |
+
|
| 288 |
+
`nbinom` takes :math:`n` and :math:`p` as shape parameters where :math:`n`
|
| 289 |
+
is the number of successes, :math:`p` is the probability of a single
|
| 290 |
+
success, and :math:`1-p` is the probability of a single failure.
|
| 291 |
+
|
| 292 |
+
Another common parameterization of the negative binomial distribution is
|
| 293 |
+
in terms of the mean number of failures :math:`\mu` to achieve :math:`n`
|
| 294 |
+
successes. The mean :math:`\mu` is related to the probability of success
|
| 295 |
+
as
|
| 296 |
+
|
| 297 |
+
.. math::
|
| 298 |
+
|
| 299 |
+
p = \frac{n}{n + \mu}
|
| 300 |
+
|
| 301 |
+
The number of successes :math:`n` may also be specified in terms of a
|
| 302 |
+
"dispersion", "heterogeneity", or "aggregation" parameter :math:`\alpha`,
|
| 303 |
+
which relates the mean :math:`\mu` to the variance :math:`\sigma^2`,
|
| 304 |
+
e.g. :math:`\sigma^2 = \mu + \alpha \mu^2`. Regardless of the convention
|
| 305 |
+
used for :math:`\alpha`,
|
| 306 |
+
|
| 307 |
+
.. math::
|
| 308 |
+
|
| 309 |
+
p &= \frac{\mu}{\sigma^2} \\
|
| 310 |
+
n &= \frac{\mu^2}{\sigma^2 - \mu}
|
| 311 |
+
|
| 312 |
+
%(after_notes)s
|
| 313 |
+
|
| 314 |
+
%(example)s
|
| 315 |
+
|
| 316 |
+
See Also
|
| 317 |
+
--------
|
| 318 |
+
hypergeom, binom, nhypergeom
|
| 319 |
+
|
| 320 |
+
"""
|
| 321 |
+
def _shape_info(self):
|
| 322 |
+
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
| 323 |
+
_ShapeInfo("p", False, (0, 1), (True, True))]
|
| 324 |
+
|
| 325 |
+
def _rvs(self, n, p, size=None, random_state=None):
|
| 326 |
+
return random_state.negative_binomial(n, p, size)
|
| 327 |
+
|
| 328 |
+
def _argcheck(self, n, p):
|
| 329 |
+
return (n > 0) & (p > 0) & (p <= 1)
|
| 330 |
+
|
| 331 |
+
def _pmf(self, x, n, p):
|
| 332 |
+
# nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
|
| 333 |
+
return scu._nbinom_pmf(x, n, p)
|
| 334 |
+
|
| 335 |
+
def _logpmf(self, x, n, p):
|
| 336 |
+
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
|
| 337 |
+
return coeff + n*log(p) + special.xlog1py(x, -p)
|
| 338 |
+
|
| 339 |
+
def _cdf(self, x, n, p):
|
| 340 |
+
k = floor(x)
|
| 341 |
+
return scu._nbinom_cdf(k, n, p)
|
| 342 |
+
|
| 343 |
+
def _logcdf(self, x, n, p):
|
| 344 |
+
k = floor(x)
|
| 345 |
+
k, n, p = np.broadcast_arrays(k, n, p)
|
| 346 |
+
cdf = self._cdf(k, n, p)
|
| 347 |
+
cond = cdf > 0.5
|
| 348 |
+
def f1(k, n, p):
|
| 349 |
+
return np.log1p(-special.betainc(k + 1, n, 1 - p))
|
| 350 |
+
|
| 351 |
+
# do calc in place
|
| 352 |
+
logcdf = cdf
|
| 353 |
+
with np.errstate(divide='ignore'):
|
| 354 |
+
logcdf[cond] = f1(k[cond], n[cond], p[cond])
|
| 355 |
+
logcdf[~cond] = np.log(cdf[~cond])
|
| 356 |
+
return logcdf
|
| 357 |
+
|
| 358 |
+
def _sf(self, x, n, p):
|
| 359 |
+
k = floor(x)
|
| 360 |
+
return scu._nbinom_sf(k, n, p)
|
| 361 |
+
|
| 362 |
+
def _isf(self, x, n, p):
|
| 363 |
+
with np.errstate(over='ignore'): # see gh-17432
|
| 364 |
+
return scu._nbinom_isf(x, n, p)
|
| 365 |
+
|
| 366 |
+
def _ppf(self, q, n, p):
|
| 367 |
+
with np.errstate(over='ignore'): # see gh-17432
|
| 368 |
+
return scu._nbinom_ppf(q, n, p)
|
| 369 |
+
|
| 370 |
+
def _stats(self, n, p):
|
| 371 |
+
return (
|
| 372 |
+
scu._nbinom_mean(n, p),
|
| 373 |
+
scu._nbinom_variance(n, p),
|
| 374 |
+
scu._nbinom_skewness(n, p),
|
| 375 |
+
scu._nbinom_kurtosis_excess(n, p),
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
nbinom = nbinom_gen(name='nbinom')
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
class betanbinom_gen(rv_discrete):
|
| 383 |
+
r"""A beta-negative-binomial discrete random variable.
|
| 384 |
+
|
| 385 |
+
%(before_notes)s
|
| 386 |
+
|
| 387 |
+
Notes
|
| 388 |
+
-----
|
| 389 |
+
The beta-negative-binomial distribution is a negative binomial
|
| 390 |
+
distribution with a probability of success `p` that follows a
|
| 391 |
+
beta distribution.
|
| 392 |
+
|
| 393 |
+
The probability mass function for `betanbinom` is:
|
| 394 |
+
|
| 395 |
+
.. math::
|
| 396 |
+
|
| 397 |
+
f(k) = \binom{n + k - 1}{k} \frac{B(a + n, b + k)}{B(a, b)}
|
| 398 |
+
|
| 399 |
+
for :math:`k \ge 0`, :math:`n \geq 0`, :math:`a > 0`,
|
| 400 |
+
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
|
| 401 |
+
|
| 402 |
+
`betanbinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
|
| 403 |
+
|
| 404 |
+
References
|
| 405 |
+
----------
|
| 406 |
+
.. [1] https://en.wikipedia.org/wiki/Beta_negative_binomial_distribution
|
| 407 |
+
|
| 408 |
+
%(after_notes)s
|
| 409 |
+
|
| 410 |
+
.. versionadded:: 1.12.0
|
| 411 |
+
|
| 412 |
+
See Also
|
| 413 |
+
--------
|
| 414 |
+
betabinom : Beta binomial distribution
|
| 415 |
+
|
| 416 |
+
%(example)s
|
| 417 |
+
|
| 418 |
+
"""
|
| 419 |
+
def _shape_info(self):
|
| 420 |
+
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
| 421 |
+
_ShapeInfo("a", False, (0, np.inf), (False, False)),
|
| 422 |
+
_ShapeInfo("b", False, (0, np.inf), (False, False))]
|
| 423 |
+
|
| 424 |
+
def _rvs(self, n, a, b, size=None, random_state=None):
|
| 425 |
+
p = random_state.beta(a, b, size)
|
| 426 |
+
return random_state.negative_binomial(n, p, size)
|
| 427 |
+
|
| 428 |
+
def _argcheck(self, n, a, b):
|
| 429 |
+
return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0)
|
| 430 |
+
|
| 431 |
+
def _logpmf(self, x, n, a, b):
|
| 432 |
+
k = floor(x)
|
| 433 |
+
combiln = -np.log(n + k) - betaln(n, k + 1)
|
| 434 |
+
return combiln + betaln(a + n, b + k) - betaln(a, b)
|
| 435 |
+
|
| 436 |
+
def _pmf(self, x, n, a, b):
|
| 437 |
+
return exp(self._logpmf(x, n, a, b))
|
| 438 |
+
|
| 439 |
+
def _stats(self, n, a, b, moments='mv'):
|
| 440 |
+
# reference: Wolfram Alpha input
|
| 441 |
+
# BetaNegativeBinomialDistribution[a, b, n]
|
| 442 |
+
def mean(n, a, b):
|
| 443 |
+
return n * b / (a - 1.)
|
| 444 |
+
mu = _lazywhere(a > 1, (n, a, b), f=mean, fillvalue=np.inf)
|
| 445 |
+
def var(n, a, b):
|
| 446 |
+
return (n * b * (n + a - 1.) * (a + b - 1.)
|
| 447 |
+
/ ((a - 2.) * (a - 1.)**2.))
|
| 448 |
+
var = _lazywhere(a > 2, (n, a, b), f=var, fillvalue=np.inf)
|
| 449 |
+
g1, g2 = None, None
|
| 450 |
+
def skew(n, a, b):
|
| 451 |
+
return ((2 * n + a - 1.) * (2 * b + a - 1.)
|
| 452 |
+
/ (a - 3.) / sqrt(n * b * (n + a - 1.) * (b + a - 1.)
|
| 453 |
+
/ (a - 2.)))
|
| 454 |
+
if 's' in moments:
|
| 455 |
+
g1 = _lazywhere(a > 3, (n, a, b), f=skew, fillvalue=np.inf)
|
| 456 |
+
def kurtosis(n, a, b):
|
| 457 |
+
term = (a - 2.)
|
| 458 |
+
term_2 = ((a - 1.)**2. * (a**2. + a * (6 * b - 1.)
|
| 459 |
+
+ 6. * (b - 1.) * b)
|
| 460 |
+
+ 3. * n**2. * ((a + 5.) * b**2. + (a + 5.)
|
| 461 |
+
* (a - 1.) * b + 2. * (a - 1.)**2)
|
| 462 |
+
+ 3 * (a - 1.) * n
|
| 463 |
+
* ((a + 5.) * b**2. + (a + 5.) * (a - 1.) * b
|
| 464 |
+
+ 2. * (a - 1.)**2.))
|
| 465 |
+
denominator = ((a - 4.) * (a - 3.) * b * n
|
| 466 |
+
* (a + b - 1.) * (a + n - 1.))
|
| 467 |
+
# Wolfram Alpha uses Pearson kurtosis, so we substract 3 to get
|
| 468 |
+
# scipy's Fisher kurtosis
|
| 469 |
+
return term * term_2 / denominator - 3.
|
| 470 |
+
if 'k' in moments:
|
| 471 |
+
g2 = _lazywhere(a > 4, (n, a, b), f=kurtosis, fillvalue=np.inf)
|
| 472 |
+
return mu, var, g1, g2
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
betanbinom = betanbinom_gen(name='betanbinom')
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class geom_gen(rv_discrete):
|
| 479 |
+
r"""A geometric discrete random variable.
|
| 480 |
+
|
| 481 |
+
%(before_notes)s
|
| 482 |
+
|
| 483 |
+
Notes
|
| 484 |
+
-----
|
| 485 |
+
The probability mass function for `geom` is:
|
| 486 |
+
|
| 487 |
+
.. math::
|
| 488 |
+
|
| 489 |
+
f(k) = (1-p)^{k-1} p
|
| 490 |
+
|
| 491 |
+
for :math:`k \ge 1`, :math:`0 < p \leq 1`
|
| 492 |
+
|
| 493 |
+
`geom` takes :math:`p` as shape parameter,
|
| 494 |
+
where :math:`p` is the probability of a single success
|
| 495 |
+
and :math:`1-p` is the probability of a single failure.
|
| 496 |
+
|
| 497 |
+
%(after_notes)s
|
| 498 |
+
|
| 499 |
+
See Also
|
| 500 |
+
--------
|
| 501 |
+
planck
|
| 502 |
+
|
| 503 |
+
%(example)s
|
| 504 |
+
|
| 505 |
+
"""
|
| 506 |
+
|
| 507 |
+
def _shape_info(self):
|
| 508 |
+
return [_ShapeInfo("p", False, (0, 1), (True, True))]
|
| 509 |
+
|
| 510 |
+
def _rvs(self, p, size=None, random_state=None):
|
| 511 |
+
return random_state.geometric(p, size=size)
|
| 512 |
+
|
| 513 |
+
def _argcheck(self, p):
|
| 514 |
+
return (p <= 1) & (p > 0)
|
| 515 |
+
|
| 516 |
+
def _pmf(self, k, p):
|
| 517 |
+
return np.power(1-p, k-1) * p
|
| 518 |
+
|
| 519 |
+
def _logpmf(self, k, p):
|
| 520 |
+
return special.xlog1py(k - 1, -p) + log(p)
|
| 521 |
+
|
| 522 |
+
def _cdf(self, x, p):
|
| 523 |
+
k = floor(x)
|
| 524 |
+
return -expm1(log1p(-p)*k)
|
| 525 |
+
|
| 526 |
+
def _sf(self, x, p):
|
| 527 |
+
return np.exp(self._logsf(x, p))
|
| 528 |
+
|
| 529 |
+
def _logsf(self, x, p):
|
| 530 |
+
k = floor(x)
|
| 531 |
+
return k*log1p(-p)
|
| 532 |
+
|
| 533 |
+
def _ppf(self, q, p):
|
| 534 |
+
vals = ceil(log1p(-q) / log1p(-p))
|
| 535 |
+
temp = self._cdf(vals-1, p)
|
| 536 |
+
return np.where((temp >= q) & (vals > 0), vals-1, vals)
|
| 537 |
+
|
| 538 |
+
def _stats(self, p):
|
| 539 |
+
mu = 1.0/p
|
| 540 |
+
qr = 1.0-p
|
| 541 |
+
var = qr / p / p
|
| 542 |
+
g1 = (2.0-p) / sqrt(qr)
|
| 543 |
+
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
|
| 544 |
+
return mu, var, g1, g2
|
| 545 |
+
|
| 546 |
+
def _entropy(self, p):
|
| 547 |
+
return -np.log(p) - np.log1p(-p) * (1.0-p) / p
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
geom = geom_gen(a=1, name='geom', longname="A geometric")
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
class hypergeom_gen(rv_discrete):
|
| 554 |
+
r"""A hypergeometric discrete random variable.
|
| 555 |
+
|
| 556 |
+
The hypergeometric distribution models drawing objects from a bin.
|
| 557 |
+
`M` is the total number of objects, `n` is total number of Type I objects.
|
| 558 |
+
The random variate represents the number of Type I objects in `N` drawn
|
| 559 |
+
without replacement from the total population.
|
| 560 |
+
|
| 561 |
+
%(before_notes)s
|
| 562 |
+
|
| 563 |
+
Notes
|
| 564 |
+
-----
|
| 565 |
+
The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not
|
| 566 |
+
universally accepted. See the Examples for a clarification of the
|
| 567 |
+
definitions used here.
|
| 568 |
+
|
| 569 |
+
The probability mass function is defined as,
|
| 570 |
+
|
| 571 |
+
.. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}}
|
| 572 |
+
{\binom{M}{N}}
|
| 573 |
+
|
| 574 |
+
for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial
|
| 575 |
+
coefficients are defined as,
|
| 576 |
+
|
| 577 |
+
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
|
| 578 |
+
|
| 579 |
+
%(after_notes)s
|
| 580 |
+
|
| 581 |
+
Examples
|
| 582 |
+
--------
|
| 583 |
+
>>> import numpy as np
|
| 584 |
+
>>> from scipy.stats import hypergeom
|
| 585 |
+
>>> import matplotlib.pyplot as plt
|
| 586 |
+
|
| 587 |
+
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
|
| 588 |
+
we want to know the probability of finding a given number of dogs if we
|
| 589 |
+
choose at random 12 of the 20 animals, we can initialize a frozen
|
| 590 |
+
distribution and plot the probability mass function:
|
| 591 |
+
|
| 592 |
+
>>> [M, n, N] = [20, 7, 12]
|
| 593 |
+
>>> rv = hypergeom(M, n, N)
|
| 594 |
+
>>> x = np.arange(0, n+1)
|
| 595 |
+
>>> pmf_dogs = rv.pmf(x)
|
| 596 |
+
|
| 597 |
+
>>> fig = plt.figure()
|
| 598 |
+
>>> ax = fig.add_subplot(111)
|
| 599 |
+
>>> ax.plot(x, pmf_dogs, 'bo')
|
| 600 |
+
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
|
| 601 |
+
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
|
| 602 |
+
>>> ax.set_ylabel('hypergeom PMF')
|
| 603 |
+
>>> plt.show()
|
| 604 |
+
|
| 605 |
+
Instead of using a frozen distribution we can also use `hypergeom`
|
| 606 |
+
methods directly. To for example obtain the cumulative distribution
|
| 607 |
+
function, use:
|
| 608 |
+
|
| 609 |
+
>>> prb = hypergeom.cdf(x, M, n, N)
|
| 610 |
+
|
| 611 |
+
And to generate random numbers:
|
| 612 |
+
|
| 613 |
+
>>> R = hypergeom.rvs(M, n, N, size=10)
|
| 614 |
+
|
| 615 |
+
See Also
|
| 616 |
+
--------
|
| 617 |
+
nhypergeom, binom, nbinom
|
| 618 |
+
|
| 619 |
+
"""
|
| 620 |
+
def _shape_info(self):
|
| 621 |
+
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
|
| 622 |
+
_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
| 623 |
+
_ShapeInfo("N", True, (0, np.inf), (True, False))]
|
| 624 |
+
|
| 625 |
+
def _rvs(self, M, n, N, size=None, random_state=None):
|
| 626 |
+
return random_state.hypergeometric(n, M-n, N, size=size)
|
| 627 |
+
|
| 628 |
+
def _get_support(self, M, n, N):
|
| 629 |
+
return np.maximum(N-(M-n), 0), np.minimum(n, N)
|
| 630 |
+
|
| 631 |
+
def _argcheck(self, M, n, N):
|
| 632 |
+
cond = (M > 0) & (n >= 0) & (N >= 0)
|
| 633 |
+
cond &= (n <= M) & (N <= M)
|
| 634 |
+
cond &= _isintegral(M) & _isintegral(n) & _isintegral(N)
|
| 635 |
+
return cond
|
| 636 |
+
|
| 637 |
+
def _logpmf(self, k, M, n, N):
|
| 638 |
+
tot, good = M, n
|
| 639 |
+
bad = tot - good
|
| 640 |
+
result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) -
|
| 641 |
+
betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) -
|
| 642 |
+
betaln(tot+1, 1))
|
| 643 |
+
return result
|
| 644 |
+
|
| 645 |
+
def _pmf(self, k, M, n, N):
|
| 646 |
+
return scu._hypergeom_pmf(k, n, N, M)
|
| 647 |
+
|
| 648 |
+
def _cdf(self, k, M, n, N):
|
| 649 |
+
return scu._hypergeom_cdf(k, n, N, M)
|
| 650 |
+
|
| 651 |
+
def _stats(self, M, n, N):
|
| 652 |
+
M, n, N = 1. * M, 1. * n, 1. * N
|
| 653 |
+
m = M - n
|
| 654 |
+
|
| 655 |
+
# Boost kurtosis_excess doesn't return the same as the value
|
| 656 |
+
# computed here.
|
| 657 |
+
g2 = M * (M + 1) - 6. * N * (M - N) - 6. * n * m
|
| 658 |
+
g2 *= (M - 1) * M * M
|
| 659 |
+
g2 += 6. * n * N * (M - N) * m * (5. * M - 6)
|
| 660 |
+
g2 /= n * N * (M - N) * m * (M - 2.) * (M - 3.)
|
| 661 |
+
return (
|
| 662 |
+
scu._hypergeom_mean(n, N, M),
|
| 663 |
+
scu._hypergeom_variance(n, N, M),
|
| 664 |
+
scu._hypergeom_skewness(n, N, M),
|
| 665 |
+
g2,
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
def _entropy(self, M, n, N):
|
| 669 |
+
k = np.r_[N - (M - n):min(n, N) + 1]
|
| 670 |
+
vals = self.pmf(k, M, n, N)
|
| 671 |
+
return np.sum(entr(vals), axis=0)
|
| 672 |
+
|
| 673 |
+
def _sf(self, k, M, n, N):
|
| 674 |
+
return scu._hypergeom_sf(k, n, N, M)
|
| 675 |
+
|
| 676 |
+
def _logsf(self, k, M, n, N):
|
| 677 |
+
res = []
|
| 678 |
+
for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
|
| 679 |
+
if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5):
|
| 680 |
+
# Less terms to sum if we calculate log(1-cdf)
|
| 681 |
+
res.append(log1p(-exp(self.logcdf(quant, tot, good, draw))))
|
| 682 |
+
else:
|
| 683 |
+
# Integration over probability mass function using logsumexp
|
| 684 |
+
k2 = np.arange(quant + 1, draw + 1)
|
| 685 |
+
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
|
| 686 |
+
return np.asarray(res)
|
| 687 |
+
|
| 688 |
+
def _logcdf(self, k, M, n, N):
|
| 689 |
+
res = []
|
| 690 |
+
for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
|
| 691 |
+
if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5):
|
| 692 |
+
# Less terms to sum if we calculate log(1-sf)
|
| 693 |
+
res.append(log1p(-exp(self.logsf(quant, tot, good, draw))))
|
| 694 |
+
else:
|
| 695 |
+
# Integration over probability mass function using logsumexp
|
| 696 |
+
k2 = np.arange(0, quant + 1)
|
| 697 |
+
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
|
| 698 |
+
return np.asarray(res)
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
hypergeom = hypergeom_gen(name='hypergeom')
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
class nhypergeom_gen(rv_discrete):
|
| 705 |
+
r"""A negative hypergeometric discrete random variable.
|
| 706 |
+
|
| 707 |
+
Consider a box containing :math:`M` balls:, :math:`n` red and
|
| 708 |
+
:math:`M-n` blue. We randomly sample balls from the box, one
|
| 709 |
+
at a time and *without* replacement, until we have picked :math:`r`
|
| 710 |
+
blue balls. `nhypergeom` is the distribution of the number of
|
| 711 |
+
red balls :math:`k` we have picked.
|
| 712 |
+
|
| 713 |
+
%(before_notes)s
|
| 714 |
+
|
| 715 |
+
Notes
|
| 716 |
+
-----
|
| 717 |
+
The symbols used to denote the shape parameters (`M`, `n`, and `r`) are not
|
| 718 |
+
universally accepted. See the Examples for a clarification of the
|
| 719 |
+
definitions used here.
|
| 720 |
+
|
| 721 |
+
The probability mass function is defined as,
|
| 722 |
+
|
| 723 |
+
.. math:: f(k; M, n, r) = \frac{{{k+r-1}\choose{k}}{{M-r-k}\choose{n-k}}}
|
| 724 |
+
{{M \choose n}}
|
| 725 |
+
|
| 726 |
+
for :math:`k \in [0, n]`, :math:`n \in [0, M]`, :math:`r \in [0, M-n]`,
|
| 727 |
+
and the binomial coefficient is:
|
| 728 |
+
|
| 729 |
+
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
|
| 730 |
+
|
| 731 |
+
It is equivalent to observing :math:`k` successes in :math:`k+r-1`
|
| 732 |
+
samples with :math:`k+r`'th sample being a failure. The former
|
| 733 |
+
can be modelled as a hypergeometric distribution. The probability
|
| 734 |
+
of the latter is simply the number of failures remaining
|
| 735 |
+
:math:`M-n-(r-1)` divided by the size of the remaining population
|
| 736 |
+
:math:`M-(k+r-1)`. This relationship can be shown as:
|
| 737 |
+
|
| 738 |
+
.. math:: NHG(k;M,n,r) = HG(k;M,n,k+r-1)\frac{(M-n-(r-1))}{(M-(k+r-1))}
|
| 739 |
+
|
| 740 |
+
where :math:`NHG` is probability mass function (PMF) of the
|
| 741 |
+
negative hypergeometric distribution and :math:`HG` is the
|
| 742 |
+
PMF of the hypergeometric distribution.
|
| 743 |
+
|
| 744 |
+
%(after_notes)s
|
| 745 |
+
|
| 746 |
+
Examples
|
| 747 |
+
--------
|
| 748 |
+
>>> import numpy as np
|
| 749 |
+
>>> from scipy.stats import nhypergeom
|
| 750 |
+
>>> import matplotlib.pyplot as plt
|
| 751 |
+
|
| 752 |
+
Suppose we have a collection of 20 animals, of which 7 are dogs.
|
| 753 |
+
Then if we want to know the probability of finding a given number
|
| 754 |
+
of dogs (successes) in a sample with exactly 12 animals that
|
| 755 |
+
aren't dogs (failures), we can initialize a frozen distribution
|
| 756 |
+
and plot the probability mass function:
|
| 757 |
+
|
| 758 |
+
>>> M, n, r = [20, 7, 12]
|
| 759 |
+
>>> rv = nhypergeom(M, n, r)
|
| 760 |
+
>>> x = np.arange(0, n+2)
|
| 761 |
+
>>> pmf_dogs = rv.pmf(x)
|
| 762 |
+
|
| 763 |
+
>>> fig = plt.figure()
|
| 764 |
+
>>> ax = fig.add_subplot(111)
|
| 765 |
+
>>> ax.plot(x, pmf_dogs, 'bo')
|
| 766 |
+
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
|
| 767 |
+
>>> ax.set_xlabel('# of dogs in our group with given 12 failures')
|
| 768 |
+
>>> ax.set_ylabel('nhypergeom PMF')
|
| 769 |
+
>>> plt.show()
|
| 770 |
+
|
| 771 |
+
Instead of using a frozen distribution we can also use `nhypergeom`
|
| 772 |
+
methods directly. To for example obtain the probability mass
|
| 773 |
+
function, use:
|
| 774 |
+
|
| 775 |
+
>>> prb = nhypergeom.pmf(x, M, n, r)
|
| 776 |
+
|
| 777 |
+
And to generate random numbers:
|
| 778 |
+
|
| 779 |
+
>>> R = nhypergeom.rvs(M, n, r, size=10)
|
| 780 |
+
|
| 781 |
+
To verify the relationship between `hypergeom` and `nhypergeom`, use:
|
| 782 |
+
|
| 783 |
+
>>> from scipy.stats import hypergeom, nhypergeom
|
| 784 |
+
>>> M, n, r = 45, 13, 8
|
| 785 |
+
>>> k = 6
|
| 786 |
+
>>> nhypergeom.pmf(k, M, n, r)
|
| 787 |
+
0.06180776620271643
|
| 788 |
+
>>> hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1))
|
| 789 |
+
0.06180776620271644
|
| 790 |
+
|
| 791 |
+
See Also
|
| 792 |
+
--------
|
| 793 |
+
hypergeom, binom, nbinom
|
| 794 |
+
|
| 795 |
+
References
|
| 796 |
+
----------
|
| 797 |
+
.. [1] Negative Hypergeometric Distribution on Wikipedia
|
| 798 |
+
https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution
|
| 799 |
+
|
| 800 |
+
.. [2] Negative Hypergeometric Distribution from
|
| 801 |
+
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Negativehypergeometric.pdf
|
| 802 |
+
|
| 803 |
+
"""
|
| 804 |
+
|
| 805 |
+
def _shape_info(self):
|
| 806 |
+
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
|
| 807 |
+
_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
| 808 |
+
_ShapeInfo("r", True, (0, np.inf), (True, False))]
|
| 809 |
+
|
| 810 |
+
def _get_support(self, M, n, r):
|
| 811 |
+
return 0, n
|
| 812 |
+
|
| 813 |
+
def _argcheck(self, M, n, r):
|
| 814 |
+
cond = (n >= 0) & (n <= M) & (r >= 0) & (r <= M-n)
|
| 815 |
+
cond &= _isintegral(M) & _isintegral(n) & _isintegral(r)
|
| 816 |
+
return cond
|
| 817 |
+
|
| 818 |
+
def _rvs(self, M, n, r, size=None, random_state=None):
|
| 819 |
+
|
| 820 |
+
@_vectorize_rvs_over_shapes
|
| 821 |
+
def _rvs1(M, n, r, size, random_state):
|
| 822 |
+
# invert cdf by calculating all values in support, scalar M, n, r
|
| 823 |
+
a, b = self.support(M, n, r)
|
| 824 |
+
ks = np.arange(a, b+1)
|
| 825 |
+
cdf = self.cdf(ks, M, n, r)
|
| 826 |
+
ppf = interp1d(cdf, ks, kind='next', fill_value='extrapolate')
|
| 827 |
+
rvs = ppf(random_state.uniform(size=size)).astype(int)
|
| 828 |
+
if size is None:
|
| 829 |
+
return rvs.item()
|
| 830 |
+
return rvs
|
| 831 |
+
|
| 832 |
+
return _rvs1(M, n, r, size=size, random_state=random_state)
|
| 833 |
+
|
| 834 |
+
def _logpmf(self, k, M, n, r):
|
| 835 |
+
cond = ((r == 0) & (k == 0))
|
| 836 |
+
result = _lazywhere(~cond, (k, M, n, r),
|
| 837 |
+
lambda k, M, n, r:
|
| 838 |
+
(-betaln(k+1, r) + betaln(k+r, 1) -
|
| 839 |
+
betaln(n-k+1, M-r-n+1) + betaln(M-r-k+1, 1) +
|
| 840 |
+
betaln(n+1, M-n+1) - betaln(M+1, 1)),
|
| 841 |
+
fillvalue=0.0)
|
| 842 |
+
return result
|
| 843 |
+
|
| 844 |
+
def _pmf(self, k, M, n, r):
|
| 845 |
+
# same as the following but numerically more precise
|
| 846 |
+
# return comb(k+r-1, k) * comb(M-r-k, n-k) / comb(M, n)
|
| 847 |
+
return exp(self._logpmf(k, M, n, r))
|
| 848 |
+
|
| 849 |
+
def _stats(self, M, n, r):
|
| 850 |
+
# Promote the datatype to at least float
|
| 851 |
+
# mu = rn / (M-n+1)
|
| 852 |
+
M, n, r = 1.*M, 1.*n, 1.*r
|
| 853 |
+
mu = r*n / (M-n+1)
|
| 854 |
+
|
| 855 |
+
var = r*(M+1)*n / ((M-n+1)*(M-n+2)) * (1 - r / (M-n+1))
|
| 856 |
+
|
| 857 |
+
# The skew and kurtosis are mathematically
|
| 858 |
+
# intractable so return `None`. See [2]_.
|
| 859 |
+
g1, g2 = None, None
|
| 860 |
+
return mu, var, g1, g2
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
nhypergeom = nhypergeom_gen(name='nhypergeom')
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
# FIXME: Fails _cdfvec
|
| 867 |
+
class logser_gen(rv_discrete):
|
| 868 |
+
r"""A Logarithmic (Log-Series, Series) discrete random variable.
|
| 869 |
+
|
| 870 |
+
%(before_notes)s
|
| 871 |
+
|
| 872 |
+
Notes
|
| 873 |
+
-----
|
| 874 |
+
The probability mass function for `logser` is:
|
| 875 |
+
|
| 876 |
+
.. math::
|
| 877 |
+
|
| 878 |
+
f(k) = - \frac{p^k}{k \log(1-p)}
|
| 879 |
+
|
| 880 |
+
for :math:`k \ge 1`, :math:`0 < p < 1`
|
| 881 |
+
|
| 882 |
+
`logser` takes :math:`p` as shape parameter,
|
| 883 |
+
where :math:`p` is the probability of a single success
|
| 884 |
+
and :math:`1-p` is the probability of a single failure.
|
| 885 |
+
|
| 886 |
+
%(after_notes)s
|
| 887 |
+
|
| 888 |
+
%(example)s
|
| 889 |
+
|
| 890 |
+
"""
|
| 891 |
+
|
| 892 |
+
def _shape_info(self):
|
| 893 |
+
return [_ShapeInfo("p", False, (0, 1), (True, True))]
|
| 894 |
+
|
| 895 |
+
def _rvs(self, p, size=None, random_state=None):
|
| 896 |
+
# looks wrong for p>0.5, too few k=1
|
| 897 |
+
# trying to use generic is worse, no k=1 at all
|
| 898 |
+
return random_state.logseries(p, size=size)
|
| 899 |
+
|
| 900 |
+
def _argcheck(self, p):
|
| 901 |
+
return (p > 0) & (p < 1)
|
| 902 |
+
|
| 903 |
+
def _pmf(self, k, p):
|
| 904 |
+
# logser.pmf(k) = - p**k / (k*log(1-p))
|
| 905 |
+
return -np.power(p, k) * 1.0 / k / special.log1p(-p)
|
| 906 |
+
|
| 907 |
+
def _stats(self, p):
|
| 908 |
+
r = special.log1p(-p)
|
| 909 |
+
mu = p / (p - 1.0) / r
|
| 910 |
+
mu2p = -p / r / (p - 1.0)**2
|
| 911 |
+
var = mu2p - mu*mu
|
| 912 |
+
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
|
| 913 |
+
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
|
| 914 |
+
g1 = mu3 / np.power(var, 1.5)
|
| 915 |
+
|
| 916 |
+
mu4p = -p / r * (
|
| 917 |
+
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
|
| 918 |
+
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
|
| 919 |
+
g2 = mu4 / var**2 - 3.0
|
| 920 |
+
return mu, var, g1, g2
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
|
| 924 |
+
|
| 925 |
+
|
| 926 |
+
class poisson_gen(rv_discrete):
|
| 927 |
+
r"""A Poisson discrete random variable.
|
| 928 |
+
|
| 929 |
+
%(before_notes)s
|
| 930 |
+
|
| 931 |
+
Notes
|
| 932 |
+
-----
|
| 933 |
+
The probability mass function for `poisson` is:
|
| 934 |
+
|
| 935 |
+
.. math::
|
| 936 |
+
|
| 937 |
+
f(k) = \exp(-\mu) \frac{\mu^k}{k!}
|
| 938 |
+
|
| 939 |
+
for :math:`k \ge 0`.
|
| 940 |
+
|
| 941 |
+
`poisson` takes :math:`\mu \geq 0` as shape parameter.
|
| 942 |
+
When :math:`\mu = 0`, the ``pmf`` method
|
| 943 |
+
returns ``1.0`` at quantile :math:`k = 0`.
|
| 944 |
+
|
| 945 |
+
%(after_notes)s
|
| 946 |
+
|
| 947 |
+
%(example)s
|
| 948 |
+
|
| 949 |
+
"""
|
| 950 |
+
|
| 951 |
+
def _shape_info(self):
|
| 952 |
+
return [_ShapeInfo("mu", False, (0, np.inf), (True, False))]
|
| 953 |
+
|
| 954 |
+
# Override rv_discrete._argcheck to allow mu=0.
|
| 955 |
+
def _argcheck(self, mu):
|
| 956 |
+
return mu >= 0
|
| 957 |
+
|
| 958 |
+
def _rvs(self, mu, size=None, random_state=None):
|
| 959 |
+
return random_state.poisson(mu, size)
|
| 960 |
+
|
| 961 |
+
def _logpmf(self, k, mu):
|
| 962 |
+
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
|
| 963 |
+
return Pk
|
| 964 |
+
|
| 965 |
+
def _pmf(self, k, mu):
|
| 966 |
+
# poisson.pmf(k) = exp(-mu) * mu**k / k!
|
| 967 |
+
return exp(self._logpmf(k, mu))
|
| 968 |
+
|
| 969 |
+
def _cdf(self, x, mu):
|
| 970 |
+
k = floor(x)
|
| 971 |
+
return special.pdtr(k, mu)
|
| 972 |
+
|
| 973 |
+
def _sf(self, x, mu):
|
| 974 |
+
k = floor(x)
|
| 975 |
+
return special.pdtrc(k, mu)
|
| 976 |
+
|
| 977 |
+
def _ppf(self, q, mu):
|
| 978 |
+
vals = ceil(special.pdtrik(q, mu))
|
| 979 |
+
vals1 = np.maximum(vals - 1, 0)
|
| 980 |
+
temp = special.pdtr(vals1, mu)
|
| 981 |
+
return np.where(temp >= q, vals1, vals)
|
| 982 |
+
|
| 983 |
+
def _stats(self, mu):
|
| 984 |
+
var = mu
|
| 985 |
+
tmp = np.asarray(mu)
|
| 986 |
+
mu_nonzero = tmp > 0
|
| 987 |
+
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
|
| 988 |
+
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
|
| 989 |
+
return mu, var, g1, g2
|
| 990 |
+
|
| 991 |
+
|
| 992 |
+
poisson = poisson_gen(name="poisson", longname='A Poisson')
|
| 993 |
+
|
| 994 |
+
|
| 995 |
+
class planck_gen(rv_discrete):
|
| 996 |
+
r"""A Planck discrete exponential random variable.
|
| 997 |
+
|
| 998 |
+
%(before_notes)s
|
| 999 |
+
|
| 1000 |
+
Notes
|
| 1001 |
+
-----
|
| 1002 |
+
The probability mass function for `planck` is:
|
| 1003 |
+
|
| 1004 |
+
.. math::
|
| 1005 |
+
|
| 1006 |
+
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k)
|
| 1007 |
+
|
| 1008 |
+
for :math:`k \ge 0` and :math:`\lambda > 0`.
|
| 1009 |
+
|
| 1010 |
+
`planck` takes :math:`\lambda` as shape parameter. The Planck distribution
|
| 1011 |
+
can be written as a geometric distribution (`geom`) with
|
| 1012 |
+
:math:`p = 1 - \exp(-\lambda)` shifted by ``loc = -1``.
|
| 1013 |
+
|
| 1014 |
+
%(after_notes)s
|
| 1015 |
+
|
| 1016 |
+
See Also
|
| 1017 |
+
--------
|
| 1018 |
+
geom
|
| 1019 |
+
|
| 1020 |
+
%(example)s
|
| 1021 |
+
|
| 1022 |
+
"""
|
| 1023 |
+
def _shape_info(self):
|
| 1024 |
+
return [_ShapeInfo("lambda", False, (0, np.inf), (False, False))]
|
| 1025 |
+
|
| 1026 |
+
def _argcheck(self, lambda_):
|
| 1027 |
+
return lambda_ > 0
|
| 1028 |
+
|
| 1029 |
+
def _pmf(self, k, lambda_):
|
| 1030 |
+
return -expm1(-lambda_)*exp(-lambda_*k)
|
| 1031 |
+
|
| 1032 |
+
def _cdf(self, x, lambda_):
|
| 1033 |
+
k = floor(x)
|
| 1034 |
+
return -expm1(-lambda_*(k+1))
|
| 1035 |
+
|
| 1036 |
+
def _sf(self, x, lambda_):
|
| 1037 |
+
return exp(self._logsf(x, lambda_))
|
| 1038 |
+
|
| 1039 |
+
def _logsf(self, x, lambda_):
|
| 1040 |
+
k = floor(x)
|
| 1041 |
+
return -lambda_*(k+1)
|
| 1042 |
+
|
| 1043 |
+
def _ppf(self, q, lambda_):
|
| 1044 |
+
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
|
| 1045 |
+
vals1 = (vals-1).clip(*(self._get_support(lambda_)))
|
| 1046 |
+
temp = self._cdf(vals1, lambda_)
|
| 1047 |
+
return np.where(temp >= q, vals1, vals)
|
| 1048 |
+
|
| 1049 |
+
def _rvs(self, lambda_, size=None, random_state=None):
|
| 1050 |
+
# use relation to geometric distribution for sampling
|
| 1051 |
+
p = -expm1(-lambda_)
|
| 1052 |
+
return random_state.geometric(p, size=size) - 1.0
|
| 1053 |
+
|
| 1054 |
+
def _stats(self, lambda_):
|
| 1055 |
+
mu = 1/expm1(lambda_)
|
| 1056 |
+
var = exp(-lambda_)/(expm1(-lambda_))**2
|
| 1057 |
+
g1 = 2*cosh(lambda_/2.0)
|
| 1058 |
+
g2 = 4+2*cosh(lambda_)
|
| 1059 |
+
return mu, var, g1, g2
|
| 1060 |
+
|
| 1061 |
+
def _entropy(self, lambda_):
|
| 1062 |
+
C = -expm1(-lambda_)
|
| 1063 |
+
return lambda_*exp(-lambda_)/C - log(C)
|
| 1064 |
+
|
| 1065 |
+
|
| 1066 |
+
planck = planck_gen(a=0, name='planck', longname='A discrete exponential ')
|
| 1067 |
+
|
| 1068 |
+
|
| 1069 |
+
class boltzmann_gen(rv_discrete):
|
| 1070 |
+
r"""A Boltzmann (Truncated Discrete Exponential) random variable.
|
| 1071 |
+
|
| 1072 |
+
%(before_notes)s
|
| 1073 |
+
|
| 1074 |
+
Notes
|
| 1075 |
+
-----
|
| 1076 |
+
The probability mass function for `boltzmann` is:
|
| 1077 |
+
|
| 1078 |
+
.. math::
|
| 1079 |
+
|
| 1080 |
+
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N))
|
| 1081 |
+
|
| 1082 |
+
for :math:`k = 0,..., N-1`.
|
| 1083 |
+
|
| 1084 |
+
`boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters.
|
| 1085 |
+
|
| 1086 |
+
%(after_notes)s
|
| 1087 |
+
|
| 1088 |
+
%(example)s
|
| 1089 |
+
|
| 1090 |
+
"""
|
| 1091 |
+
def _shape_info(self):
|
| 1092 |
+
return [_ShapeInfo("lambda_", False, (0, np.inf), (False, False)),
|
| 1093 |
+
_ShapeInfo("N", True, (0, np.inf), (False, False))]
|
| 1094 |
+
|
| 1095 |
+
def _argcheck(self, lambda_, N):
|
| 1096 |
+
return (lambda_ > 0) & (N > 0) & _isintegral(N)
|
| 1097 |
+
|
| 1098 |
+
def _get_support(self, lambda_, N):
|
| 1099 |
+
return self.a, N - 1
|
| 1100 |
+
|
| 1101 |
+
def _pmf(self, k, lambda_, N):
|
| 1102 |
+
# boltzmann.pmf(k) =
|
| 1103 |
+
# (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
|
| 1104 |
+
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
|
| 1105 |
+
return fact*exp(-lambda_*k)
|
| 1106 |
+
|
| 1107 |
+
def _cdf(self, x, lambda_, N):
|
| 1108 |
+
k = floor(x)
|
| 1109 |
+
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
|
| 1110 |
+
|
| 1111 |
+
def _ppf(self, q, lambda_, N):
|
| 1112 |
+
qnew = q*(1-exp(-lambda_*N))
|
| 1113 |
+
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
|
| 1114 |
+
vals1 = (vals-1).clip(0.0, np.inf)
|
| 1115 |
+
temp = self._cdf(vals1, lambda_, N)
|
| 1116 |
+
return np.where(temp >= q, vals1, vals)
|
| 1117 |
+
|
| 1118 |
+
def _stats(self, lambda_, N):
|
| 1119 |
+
z = exp(-lambda_)
|
| 1120 |
+
zN = exp(-lambda_*N)
|
| 1121 |
+
mu = z/(1.0-z)-N*zN/(1-zN)
|
| 1122 |
+
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
|
| 1123 |
+
trm = (1-zN)/(1-z)
|
| 1124 |
+
trm2 = (z*trm**2 - N*N*zN)
|
| 1125 |
+
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
|
| 1126 |
+
g1 = g1 / trm2**(1.5)
|
| 1127 |
+
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
|
| 1128 |
+
g2 = g2 / trm2 / trm2
|
| 1129 |
+
return mu, var, g1, g2
|
| 1130 |
+
|
| 1131 |
+
|
| 1132 |
+
boltzmann = boltzmann_gen(name='boltzmann', a=0,
|
| 1133 |
+
longname='A truncated discrete exponential ')
|
| 1134 |
+
|
| 1135 |
+
|
| 1136 |
+
class randint_gen(rv_discrete):
|
| 1137 |
+
r"""A uniform discrete random variable.
|
| 1138 |
+
|
| 1139 |
+
%(before_notes)s
|
| 1140 |
+
|
| 1141 |
+
Notes
|
| 1142 |
+
-----
|
| 1143 |
+
The probability mass function for `randint` is:
|
| 1144 |
+
|
| 1145 |
+
.. math::
|
| 1146 |
+
|
| 1147 |
+
f(k) = \frac{1}{\texttt{high} - \texttt{low}}
|
| 1148 |
+
|
| 1149 |
+
for :math:`k \in \{\texttt{low}, \dots, \texttt{high} - 1\}`.
|
| 1150 |
+
|
| 1151 |
+
`randint` takes :math:`\texttt{low}` and :math:`\texttt{high}` as shape
|
| 1152 |
+
parameters.
|
| 1153 |
+
|
| 1154 |
+
%(after_notes)s
|
| 1155 |
+
|
| 1156 |
+
Examples
|
| 1157 |
+
--------
|
| 1158 |
+
>>> import numpy as np
|
| 1159 |
+
>>> from scipy.stats import randint
|
| 1160 |
+
>>> import matplotlib.pyplot as plt
|
| 1161 |
+
>>> fig, ax = plt.subplots(1, 1)
|
| 1162 |
+
|
| 1163 |
+
Calculate the first four moments:
|
| 1164 |
+
|
| 1165 |
+
>>> low, high = 7, 31
|
| 1166 |
+
>>> mean, var, skew, kurt = randint.stats(low, high, moments='mvsk')
|
| 1167 |
+
|
| 1168 |
+
Display the probability mass function (``pmf``):
|
| 1169 |
+
|
| 1170 |
+
>>> x = np.arange(low - 5, high + 5)
|
| 1171 |
+
>>> ax.plot(x, randint.pmf(x, low, high), 'bo', ms=8, label='randint pmf')
|
| 1172 |
+
>>> ax.vlines(x, 0, randint.pmf(x, low, high), colors='b', lw=5, alpha=0.5)
|
| 1173 |
+
|
| 1174 |
+
Alternatively, the distribution object can be called (as a function) to
|
| 1175 |
+
fix the shape and location. This returns a "frozen" RV object holding the
|
| 1176 |
+
given parameters fixed.
|
| 1177 |
+
|
| 1178 |
+
Freeze the distribution and display the frozen ``pmf``:
|
| 1179 |
+
|
| 1180 |
+
>>> rv = randint(low, high)
|
| 1181 |
+
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-',
|
| 1182 |
+
... lw=1, label='frozen pmf')
|
| 1183 |
+
>>> ax.legend(loc='lower center')
|
| 1184 |
+
>>> plt.show()
|
| 1185 |
+
|
| 1186 |
+
Check the relationship between the cumulative distribution function
|
| 1187 |
+
(``cdf``) and its inverse, the percent point function (``ppf``):
|
| 1188 |
+
|
| 1189 |
+
>>> q = np.arange(low, high)
|
| 1190 |
+
>>> p = randint.cdf(q, low, high)
|
| 1191 |
+
>>> np.allclose(q, randint.ppf(p, low, high))
|
| 1192 |
+
True
|
| 1193 |
+
|
| 1194 |
+
Generate random numbers:
|
| 1195 |
+
|
| 1196 |
+
>>> r = randint.rvs(low, high, size=1000)
|
| 1197 |
+
|
| 1198 |
+
"""
|
| 1199 |
+
|
| 1200 |
+
def _shape_info(self):
|
| 1201 |
+
return [_ShapeInfo("low", True, (-np.inf, np.inf), (False, False)),
|
| 1202 |
+
_ShapeInfo("high", True, (-np.inf, np.inf), (False, False))]
|
| 1203 |
+
|
| 1204 |
+
def _argcheck(self, low, high):
|
| 1205 |
+
return (high > low) & _isintegral(low) & _isintegral(high)
|
| 1206 |
+
|
| 1207 |
+
def _get_support(self, low, high):
|
| 1208 |
+
return low, high-1
|
| 1209 |
+
|
| 1210 |
+
def _pmf(self, k, low, high):
|
| 1211 |
+
# randint.pmf(k) = 1./(high - low)
|
| 1212 |
+
p = np.ones_like(k) / (high - low)
|
| 1213 |
+
return np.where((k >= low) & (k < high), p, 0.)
|
| 1214 |
+
|
| 1215 |
+
def _cdf(self, x, low, high):
|
| 1216 |
+
k = floor(x)
|
| 1217 |
+
return (k - low + 1.) / (high - low)
|
| 1218 |
+
|
| 1219 |
+
def _ppf(self, q, low, high):
|
| 1220 |
+
vals = ceil(q * (high - low) + low) - 1
|
| 1221 |
+
vals1 = (vals - 1).clip(low, high)
|
| 1222 |
+
temp = self._cdf(vals1, low, high)
|
| 1223 |
+
return np.where(temp >= q, vals1, vals)
|
| 1224 |
+
|
| 1225 |
+
def _stats(self, low, high):
|
| 1226 |
+
m2, m1 = np.asarray(high), np.asarray(low)
|
| 1227 |
+
mu = (m2 + m1 - 1.0) / 2
|
| 1228 |
+
d = m2 - m1
|
| 1229 |
+
var = (d*d - 1) / 12.0
|
| 1230 |
+
g1 = 0.0
|
| 1231 |
+
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
|
| 1232 |
+
return mu, var, g1, g2
|
| 1233 |
+
|
| 1234 |
+
def _rvs(self, low, high, size=None, random_state=None):
|
| 1235 |
+
"""An array of *size* random integers >= ``low`` and < ``high``."""
|
| 1236 |
+
if np.asarray(low).size == 1 and np.asarray(high).size == 1:
|
| 1237 |
+
# no need to vectorize in that case
|
| 1238 |
+
return rng_integers(random_state, low, high, size=size)
|
| 1239 |
+
|
| 1240 |
+
if size is not None:
|
| 1241 |
+
# NumPy's RandomState.randint() doesn't broadcast its arguments.
|
| 1242 |
+
# Use `broadcast_to()` to extend the shapes of low and high
|
| 1243 |
+
# up to size. Then we can use the numpy.vectorize'd
|
| 1244 |
+
# randint without needing to pass it a `size` argument.
|
| 1245 |
+
low = np.broadcast_to(low, size)
|
| 1246 |
+
high = np.broadcast_to(high, size)
|
| 1247 |
+
randint = np.vectorize(partial(rng_integers, random_state),
|
| 1248 |
+
otypes=[np.dtype(int)])
|
| 1249 |
+
return randint(low, high)
|
| 1250 |
+
|
| 1251 |
+
def _entropy(self, low, high):
|
| 1252 |
+
return log(high - low)
|
| 1253 |
+
|
| 1254 |
+
|
| 1255 |
+
randint = randint_gen(name='randint', longname='A discrete uniform '
|
| 1256 |
+
'(random integer)')
|
| 1257 |
+
|
| 1258 |
+
|
| 1259 |
+
# FIXME: problems sampling.
|
| 1260 |
+
class zipf_gen(rv_discrete):
|
| 1261 |
+
r"""A Zipf (Zeta) discrete random variable.
|
| 1262 |
+
|
| 1263 |
+
%(before_notes)s
|
| 1264 |
+
|
| 1265 |
+
See Also
|
| 1266 |
+
--------
|
| 1267 |
+
zipfian
|
| 1268 |
+
|
| 1269 |
+
Notes
|
| 1270 |
+
-----
|
| 1271 |
+
The probability mass function for `zipf` is:
|
| 1272 |
+
|
| 1273 |
+
.. math::
|
| 1274 |
+
|
| 1275 |
+
f(k, a) = \frac{1}{\zeta(a) k^a}
|
| 1276 |
+
|
| 1277 |
+
for :math:`k \ge 1`, :math:`a > 1`.
|
| 1278 |
+
|
| 1279 |
+
`zipf` takes :math:`a > 1` as shape parameter. :math:`\zeta` is the
|
| 1280 |
+
Riemann zeta function (`scipy.special.zeta`)
|
| 1281 |
+
|
| 1282 |
+
The Zipf distribution is also known as the zeta distribution, which is
|
| 1283 |
+
a special case of the Zipfian distribution (`zipfian`).
|
| 1284 |
+
|
| 1285 |
+
%(after_notes)s
|
| 1286 |
+
|
| 1287 |
+
References
|
| 1288 |
+
----------
|
| 1289 |
+
.. [1] "Zeta Distribution", Wikipedia,
|
| 1290 |
+
https://en.wikipedia.org/wiki/Zeta_distribution
|
| 1291 |
+
|
| 1292 |
+
%(example)s
|
| 1293 |
+
|
| 1294 |
+
Confirm that `zipf` is the large `n` limit of `zipfian`.
|
| 1295 |
+
|
| 1296 |
+
>>> import numpy as np
|
| 1297 |
+
>>> from scipy.stats import zipf, zipfian
|
| 1298 |
+
>>> k = np.arange(11)
|
| 1299 |
+
>>> np.allclose(zipf.pmf(k, a), zipfian.pmf(k, a, n=10000000))
|
| 1300 |
+
True
|
| 1301 |
+
|
| 1302 |
+
"""
|
| 1303 |
+
|
| 1304 |
+
def _shape_info(self):
|
| 1305 |
+
return [_ShapeInfo("a", False, (1, np.inf), (False, False))]
|
| 1306 |
+
|
| 1307 |
+
def _rvs(self, a, size=None, random_state=None):
|
| 1308 |
+
return random_state.zipf(a, size=size)
|
| 1309 |
+
|
| 1310 |
+
def _argcheck(self, a):
|
| 1311 |
+
return a > 1
|
| 1312 |
+
|
| 1313 |
+
def _pmf(self, k, a):
|
| 1314 |
+
k = k.astype(np.float64)
|
| 1315 |
+
# zipf.pmf(k, a) = 1/(zeta(a) * k**a)
|
| 1316 |
+
Pk = 1.0 / special.zeta(a, 1) * k**-a
|
| 1317 |
+
return Pk
|
| 1318 |
+
|
| 1319 |
+
def _munp(self, n, a):
|
| 1320 |
+
return _lazywhere(
|
| 1321 |
+
a > n + 1, (a, n),
|
| 1322 |
+
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
|
| 1323 |
+
np.inf)
|
| 1324 |
+
|
| 1325 |
+
|
| 1326 |
+
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
|
| 1327 |
+
|
| 1328 |
+
|
| 1329 |
+
def _gen_harmonic_gt1(n, a):
|
| 1330 |
+
"""Generalized harmonic number, a > 1"""
|
| 1331 |
+
# See https://en.wikipedia.org/wiki/Harmonic_number; search for "hurwitz"
|
| 1332 |
+
return zeta(a, 1) - zeta(a, n+1)
|
| 1333 |
+
|
| 1334 |
+
|
| 1335 |
+
def _gen_harmonic_leq1(n, a):
|
| 1336 |
+
"""Generalized harmonic number, a <= 1"""
|
| 1337 |
+
if not np.size(n):
|
| 1338 |
+
return n
|
| 1339 |
+
n_max = np.max(n) # loop starts at maximum of all n
|
| 1340 |
+
out = np.zeros_like(a, dtype=float)
|
| 1341 |
+
# add terms of harmonic series; starting from smallest to avoid roundoff
|
| 1342 |
+
for i in np.arange(n_max, 0, -1, dtype=float):
|
| 1343 |
+
mask = i <= n # don't add terms after nth
|
| 1344 |
+
out[mask] += 1/i**a[mask]
|
| 1345 |
+
return out
|
| 1346 |
+
|
| 1347 |
+
|
| 1348 |
+
def _gen_harmonic(n, a):
|
| 1349 |
+
"""Generalized harmonic number"""
|
| 1350 |
+
n, a = np.broadcast_arrays(n, a)
|
| 1351 |
+
return _lazywhere(a > 1, (n, a),
|
| 1352 |
+
f=_gen_harmonic_gt1, f2=_gen_harmonic_leq1)
|
| 1353 |
+
|
| 1354 |
+
|
| 1355 |
+
class zipfian_gen(rv_discrete):
|
| 1356 |
+
r"""A Zipfian discrete random variable.
|
| 1357 |
+
|
| 1358 |
+
%(before_notes)s
|
| 1359 |
+
|
| 1360 |
+
See Also
|
| 1361 |
+
--------
|
| 1362 |
+
zipf
|
| 1363 |
+
|
| 1364 |
+
Notes
|
| 1365 |
+
-----
|
| 1366 |
+
The probability mass function for `zipfian` is:
|
| 1367 |
+
|
| 1368 |
+
.. math::
|
| 1369 |
+
|
| 1370 |
+
f(k, a, n) = \frac{1}{H_{n,a} k^a}
|
| 1371 |
+
|
| 1372 |
+
for :math:`k \in \{1, 2, \dots, n-1, n\}`, :math:`a \ge 0`,
|
| 1373 |
+
:math:`n \in \{1, 2, 3, \dots\}`.
|
| 1374 |
+
|
| 1375 |
+
`zipfian` takes :math:`a` and :math:`n` as shape parameters.
|
| 1376 |
+
:math:`H_{n,a}` is the :math:`n`:sup:`th` generalized harmonic
|
| 1377 |
+
number of order :math:`a`.
|
| 1378 |
+
|
| 1379 |
+
The Zipfian distribution reduces to the Zipf (zeta) distribution as
|
| 1380 |
+
:math:`n \rightarrow \infty`.
|
| 1381 |
+
|
| 1382 |
+
%(after_notes)s
|
| 1383 |
+
|
| 1384 |
+
References
|
| 1385 |
+
----------
|
| 1386 |
+
.. [1] "Zipf's Law", Wikipedia, https://en.wikipedia.org/wiki/Zipf's_law
|
| 1387 |
+
.. [2] Larry Leemis, "Zipf Distribution", Univariate Distribution
|
| 1388 |
+
Relationships. http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
|
| 1389 |
+
|
| 1390 |
+
%(example)s
|
| 1391 |
+
|
| 1392 |
+
Confirm that `zipfian` reduces to `zipf` for large `n`, `a > 1`.
|
| 1393 |
+
|
| 1394 |
+
>>> import numpy as np
|
| 1395 |
+
>>> from scipy.stats import zipf, zipfian
|
| 1396 |
+
>>> k = np.arange(11)
|
| 1397 |
+
>>> np.allclose(zipfian.pmf(k, a=3.5, n=10000000), zipf.pmf(k, a=3.5))
|
| 1398 |
+
True
|
| 1399 |
+
|
| 1400 |
+
"""
|
| 1401 |
+
|
| 1402 |
+
def _shape_info(self):
|
| 1403 |
+
return [_ShapeInfo("a", False, (0, np.inf), (True, False)),
|
| 1404 |
+
_ShapeInfo("n", True, (0, np.inf), (False, False))]
|
| 1405 |
+
|
| 1406 |
+
def _argcheck(self, a, n):
|
| 1407 |
+
# we need np.asarray here because moment (maybe others) don't convert
|
| 1408 |
+
return (a >= 0) & (n > 0) & (n == np.asarray(n, dtype=int))
|
| 1409 |
+
|
| 1410 |
+
def _get_support(self, a, n):
|
| 1411 |
+
return 1, n
|
| 1412 |
+
|
| 1413 |
+
def _pmf(self, k, a, n):
|
| 1414 |
+
k = k.astype(np.float64)
|
| 1415 |
+
return 1.0 / _gen_harmonic(n, a) * k**-a
|
| 1416 |
+
|
| 1417 |
+
def _cdf(self, k, a, n):
|
| 1418 |
+
return _gen_harmonic(k, a) / _gen_harmonic(n, a)
|
| 1419 |
+
|
| 1420 |
+
def _sf(self, k, a, n):
|
| 1421 |
+
k = k + 1 # # to match SciPy convention
|
| 1422 |
+
# see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
|
| 1423 |
+
return ((k**a*(_gen_harmonic(n, a) - _gen_harmonic(k, a)) + 1)
|
| 1424 |
+
/ (k**a*_gen_harmonic(n, a)))
|
| 1425 |
+
|
| 1426 |
+
def _stats(self, a, n):
|
| 1427 |
+
# see # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
|
| 1428 |
+
Hna = _gen_harmonic(n, a)
|
| 1429 |
+
Hna1 = _gen_harmonic(n, a-1)
|
| 1430 |
+
Hna2 = _gen_harmonic(n, a-2)
|
| 1431 |
+
Hna3 = _gen_harmonic(n, a-3)
|
| 1432 |
+
Hna4 = _gen_harmonic(n, a-4)
|
| 1433 |
+
mu1 = Hna1/Hna
|
| 1434 |
+
mu2n = (Hna2*Hna - Hna1**2)
|
| 1435 |
+
mu2d = Hna**2
|
| 1436 |
+
mu2 = mu2n / mu2d
|
| 1437 |
+
g1 = (Hna3/Hna - 3*Hna1*Hna2/Hna**2 + 2*Hna1**3/Hna**3)/mu2**(3/2)
|
| 1438 |
+
g2 = (Hna**3*Hna4 - 4*Hna**2*Hna1*Hna3 + 6*Hna*Hna1**2*Hna2
|
| 1439 |
+
- 3*Hna1**4) / mu2n**2
|
| 1440 |
+
g2 -= 3
|
| 1441 |
+
return mu1, mu2, g1, g2
|
| 1442 |
+
|
| 1443 |
+
|
| 1444 |
+
zipfian = zipfian_gen(a=1, name='zipfian', longname='A Zipfian')
|
| 1445 |
+
|
| 1446 |
+
|
| 1447 |
+
class dlaplace_gen(rv_discrete):
|
| 1448 |
+
r"""A Laplacian discrete random variable.
|
| 1449 |
+
|
| 1450 |
+
%(before_notes)s
|
| 1451 |
+
|
| 1452 |
+
Notes
|
| 1453 |
+
-----
|
| 1454 |
+
The probability mass function for `dlaplace` is:
|
| 1455 |
+
|
| 1456 |
+
.. math::
|
| 1457 |
+
|
| 1458 |
+
f(k) = \tanh(a/2) \exp(-a |k|)
|
| 1459 |
+
|
| 1460 |
+
for integers :math:`k` and :math:`a > 0`.
|
| 1461 |
+
|
| 1462 |
+
`dlaplace` takes :math:`a` as shape parameter.
|
| 1463 |
+
|
| 1464 |
+
%(after_notes)s
|
| 1465 |
+
|
| 1466 |
+
%(example)s
|
| 1467 |
+
|
| 1468 |
+
"""
|
| 1469 |
+
|
| 1470 |
+
def _shape_info(self):
|
| 1471 |
+
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
|
| 1472 |
+
|
| 1473 |
+
def _pmf(self, k, a):
|
| 1474 |
+
# dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
|
| 1475 |
+
return tanh(a/2.0) * exp(-a * abs(k))
|
| 1476 |
+
|
| 1477 |
+
def _cdf(self, x, a):
|
| 1478 |
+
k = floor(x)
|
| 1479 |
+
|
| 1480 |
+
def f(k, a):
|
| 1481 |
+
return 1.0 - exp(-a * k) / (exp(a) + 1)
|
| 1482 |
+
|
| 1483 |
+
def f2(k, a):
|
| 1484 |
+
return exp(a * (k + 1)) / (exp(a) + 1)
|
| 1485 |
+
|
| 1486 |
+
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
|
| 1487 |
+
|
| 1488 |
+
def _ppf(self, q, a):
|
| 1489 |
+
const = 1 + exp(a)
|
| 1490 |
+
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)),
|
| 1491 |
+
log(q*const) / a - 1,
|
| 1492 |
+
-log((1-q) * const) / a))
|
| 1493 |
+
vals1 = vals - 1
|
| 1494 |
+
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
|
| 1495 |
+
|
| 1496 |
+
def _stats(self, a):
|
| 1497 |
+
ea = exp(a)
|
| 1498 |
+
mu2 = 2.*ea/(ea-1.)**2
|
| 1499 |
+
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
|
| 1500 |
+
return 0., mu2, 0., mu4/mu2**2 - 3.
|
| 1501 |
+
|
| 1502 |
+
def _entropy(self, a):
|
| 1503 |
+
return a / sinh(a) - log(tanh(a/2.0))
|
| 1504 |
+
|
| 1505 |
+
def _rvs(self, a, size=None, random_state=None):
|
| 1506 |
+
# The discrete Laplace is equivalent to the two-sided geometric
|
| 1507 |
+
# distribution with PMF:
|
| 1508 |
+
# f(k) = (1 - alpha)/(1 + alpha) * alpha^abs(k)
|
| 1509 |
+
# Reference:
|
| 1510 |
+
# https://www.sciencedirect.com/science/
|
| 1511 |
+
# article/abs/pii/S0378375804003519
|
| 1512 |
+
# Furthermore, the two-sided geometric distribution is
|
| 1513 |
+
# equivalent to the difference between two iid geometric
|
| 1514 |
+
# distributions.
|
| 1515 |
+
# Reference (page 179):
|
| 1516 |
+
# https://pdfs.semanticscholar.org/61b3/
|
| 1517 |
+
# b99f466815808fd0d03f5d2791eea8b541a1.pdf
|
| 1518 |
+
# Thus, we can leverage the following:
|
| 1519 |
+
# 1) alpha = e^-a
|
| 1520 |
+
# 2) probability_of_success = 1 - alpha (Bernoulli trial)
|
| 1521 |
+
probOfSuccess = -np.expm1(-np.asarray(a))
|
| 1522 |
+
x = random_state.geometric(probOfSuccess, size=size)
|
| 1523 |
+
y = random_state.geometric(probOfSuccess, size=size)
|
| 1524 |
+
return x - y
|
| 1525 |
+
|
| 1526 |
+
|
| 1527 |
+
dlaplace = dlaplace_gen(a=-np.inf,
|
| 1528 |
+
name='dlaplace', longname='A discrete Laplacian')
|
| 1529 |
+
|
| 1530 |
+
|
| 1531 |
+
class skellam_gen(rv_discrete):
|
| 1532 |
+
r"""A Skellam discrete random variable.
|
| 1533 |
+
|
| 1534 |
+
%(before_notes)s
|
| 1535 |
+
|
| 1536 |
+
Notes
|
| 1537 |
+
-----
|
| 1538 |
+
Probability distribution of the difference of two correlated or
|
| 1539 |
+
uncorrelated Poisson random variables.
|
| 1540 |
+
|
| 1541 |
+
Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with
|
| 1542 |
+
expected values :math:`\lambda_1` and :math:`\lambda_2`. Then,
|
| 1543 |
+
:math:`k_1 - k_2` follows a Skellam distribution with parameters
|
| 1544 |
+
:math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and
|
| 1545 |
+
:math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where
|
| 1546 |
+
:math:`\rho` is the correlation coefficient between :math:`k_1` and
|
| 1547 |
+
:math:`k_2`. If the two Poisson-distributed r.v. are independent then
|
| 1548 |
+
:math:`\rho = 0`.
|
| 1549 |
+
|
| 1550 |
+
Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive.
|
| 1551 |
+
|
| 1552 |
+
For details see: https://en.wikipedia.org/wiki/Skellam_distribution
|
| 1553 |
+
|
| 1554 |
+
`skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters.
|
| 1555 |
+
|
| 1556 |
+
%(after_notes)s
|
| 1557 |
+
|
| 1558 |
+
%(example)s
|
| 1559 |
+
|
| 1560 |
+
"""
|
| 1561 |
+
def _shape_info(self):
|
| 1562 |
+
return [_ShapeInfo("mu1", False, (0, np.inf), (False, False)),
|
| 1563 |
+
_ShapeInfo("mu2", False, (0, np.inf), (False, False))]
|
| 1564 |
+
|
| 1565 |
+
def _rvs(self, mu1, mu2, size=None, random_state=None):
|
| 1566 |
+
n = size
|
| 1567 |
+
return (random_state.poisson(mu1, n) -
|
| 1568 |
+
random_state.poisson(mu2, n))
|
| 1569 |
+
|
| 1570 |
+
def _pmf(self, x, mu1, mu2):
|
| 1571 |
+
with np.errstate(over='ignore'): # see gh-17432
|
| 1572 |
+
px = np.where(x < 0,
|
| 1573 |
+
scu._ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
|
| 1574 |
+
scu._ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
|
| 1575 |
+
# ncx2.pdf() returns nan's for extremely low probabilities
|
| 1576 |
+
return px
|
| 1577 |
+
|
| 1578 |
+
def _cdf(self, x, mu1, mu2):
|
| 1579 |
+
x = floor(x)
|
| 1580 |
+
with np.errstate(over='ignore'): # see gh-17432
|
| 1581 |
+
px = np.where(x < 0,
|
| 1582 |
+
scu._ncx2_cdf(2*mu2, -2*x, 2*mu1),
|
| 1583 |
+
1 - scu._ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
|
| 1584 |
+
return px
|
| 1585 |
+
|
| 1586 |
+
def _stats(self, mu1, mu2):
|
| 1587 |
+
mean = mu1 - mu2
|
| 1588 |
+
var = mu1 + mu2
|
| 1589 |
+
g1 = mean / sqrt((var)**3)
|
| 1590 |
+
g2 = 1 / var
|
| 1591 |
+
return mean, var, g1, g2
|
| 1592 |
+
|
| 1593 |
+
|
| 1594 |
+
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
|
| 1595 |
+
|
| 1596 |
+
|
| 1597 |
+
class yulesimon_gen(rv_discrete):
|
| 1598 |
+
r"""A Yule-Simon discrete random variable.
|
| 1599 |
+
|
| 1600 |
+
%(before_notes)s
|
| 1601 |
+
|
| 1602 |
+
Notes
|
| 1603 |
+
-----
|
| 1604 |
+
|
| 1605 |
+
The probability mass function for the `yulesimon` is:
|
| 1606 |
+
|
| 1607 |
+
.. math::
|
| 1608 |
+
|
| 1609 |
+
f(k) = \alpha B(k, \alpha+1)
|
| 1610 |
+
|
| 1611 |
+
for :math:`k=1,2,3,...`, where :math:`\alpha>0`.
|
| 1612 |
+
Here :math:`B` refers to the `scipy.special.beta` function.
|
| 1613 |
+
|
| 1614 |
+
The sampling of random variates is based on pg 553, Section 6.3 of [1]_.
|
| 1615 |
+
Our notation maps to the referenced logic via :math:`\alpha=a-1`.
|
| 1616 |
+
|
| 1617 |
+
For details see the wikipedia entry [2]_.
|
| 1618 |
+
|
| 1619 |
+
References
|
| 1620 |
+
----------
|
| 1621 |
+
.. [1] Devroye, Luc. "Non-uniform Random Variate Generation",
|
| 1622 |
+
(1986) Springer, New York.
|
| 1623 |
+
|
| 1624 |
+
.. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution
|
| 1625 |
+
|
| 1626 |
+
%(after_notes)s
|
| 1627 |
+
|
| 1628 |
+
%(example)s
|
| 1629 |
+
|
| 1630 |
+
"""
|
| 1631 |
+
def _shape_info(self):
|
| 1632 |
+
return [_ShapeInfo("alpha", False, (0, np.inf), (False, False))]
|
| 1633 |
+
|
| 1634 |
+
def _rvs(self, alpha, size=None, random_state=None):
|
| 1635 |
+
E1 = random_state.standard_exponential(size)
|
| 1636 |
+
E2 = random_state.standard_exponential(size)
|
| 1637 |
+
ans = ceil(-E1 / log1p(-exp(-E2 / alpha)))
|
| 1638 |
+
return ans
|
| 1639 |
+
|
| 1640 |
+
def _pmf(self, x, alpha):
|
| 1641 |
+
return alpha * special.beta(x, alpha + 1)
|
| 1642 |
+
|
| 1643 |
+
def _argcheck(self, alpha):
|
| 1644 |
+
return (alpha > 0)
|
| 1645 |
+
|
| 1646 |
+
def _logpmf(self, x, alpha):
|
| 1647 |
+
return log(alpha) + special.betaln(x, alpha + 1)
|
| 1648 |
+
|
| 1649 |
+
def _cdf(self, x, alpha):
|
| 1650 |
+
return 1 - x * special.beta(x, alpha + 1)
|
| 1651 |
+
|
| 1652 |
+
def _sf(self, x, alpha):
|
| 1653 |
+
return x * special.beta(x, alpha + 1)
|
| 1654 |
+
|
| 1655 |
+
def _logsf(self, x, alpha):
|
| 1656 |
+
return log(x) + special.betaln(x, alpha + 1)
|
| 1657 |
+
|
| 1658 |
+
def _stats(self, alpha):
|
| 1659 |
+
mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1))
|
| 1660 |
+
mu2 = np.where(alpha > 2,
|
| 1661 |
+
alpha**2 / ((alpha - 2.0) * (alpha - 1)**2),
|
| 1662 |
+
np.inf)
|
| 1663 |
+
mu2 = np.where(alpha <= 1, np.nan, mu2)
|
| 1664 |
+
g1 = np.where(alpha > 3,
|
| 1665 |
+
sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)),
|
| 1666 |
+
np.inf)
|
| 1667 |
+
g1 = np.where(alpha <= 2, np.nan, g1)
|
| 1668 |
+
g2 = np.where(alpha > 4,
|
| 1669 |
+
alpha + 3 + ((11 * alpha**3 - 49 * alpha - 22) /
|
| 1670 |
+
(alpha * (alpha - 4) * (alpha - 3))),
|
| 1671 |
+
np.inf)
|
| 1672 |
+
g2 = np.where(alpha <= 2, np.nan, g2)
|
| 1673 |
+
return mu, mu2, g1, g2
|
| 1674 |
+
|
| 1675 |
+
|
| 1676 |
+
yulesimon = yulesimon_gen(name='yulesimon', a=1)
|
| 1677 |
+
|
| 1678 |
+
|
| 1679 |
+
class _nchypergeom_gen(rv_discrete):
|
| 1680 |
+
r"""A noncentral hypergeometric discrete random variable.
|
| 1681 |
+
|
| 1682 |
+
For subclassing by nchypergeom_fisher_gen and nchypergeom_wallenius_gen.
|
| 1683 |
+
|
| 1684 |
+
"""
|
| 1685 |
+
|
| 1686 |
+
rvs_name = None
|
| 1687 |
+
dist = None
|
| 1688 |
+
|
| 1689 |
+
def _shape_info(self):
|
| 1690 |
+
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
|
| 1691 |
+
_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
| 1692 |
+
_ShapeInfo("N", True, (0, np.inf), (True, False)),
|
| 1693 |
+
_ShapeInfo("odds", False, (0, np.inf), (False, False))]
|
| 1694 |
+
|
| 1695 |
+
def _get_support(self, M, n, N, odds):
|
| 1696 |
+
N, m1, n = M, n, N # follow Wikipedia notation
|
| 1697 |
+
m2 = N - m1
|
| 1698 |
+
x_min = np.maximum(0, n - m2)
|
| 1699 |
+
x_max = np.minimum(n, m1)
|
| 1700 |
+
return x_min, x_max
|
| 1701 |
+
|
| 1702 |
+
def _argcheck(self, M, n, N, odds):
|
| 1703 |
+
M, n = np.asarray(M), np.asarray(n),
|
| 1704 |
+
N, odds = np.asarray(N), np.asarray(odds)
|
| 1705 |
+
cond1 = (M.astype(int) == M) & (M >= 0)
|
| 1706 |
+
cond2 = (n.astype(int) == n) & (n >= 0)
|
| 1707 |
+
cond3 = (N.astype(int) == N) & (N >= 0)
|
| 1708 |
+
cond4 = odds > 0
|
| 1709 |
+
cond5 = N <= M
|
| 1710 |
+
cond6 = n <= M
|
| 1711 |
+
return cond1 & cond2 & cond3 & cond4 & cond5 & cond6
|
| 1712 |
+
|
| 1713 |
+
def _rvs(self, M, n, N, odds, size=None, random_state=None):
|
| 1714 |
+
|
| 1715 |
+
@_vectorize_rvs_over_shapes
|
| 1716 |
+
def _rvs1(M, n, N, odds, size, random_state):
|
| 1717 |
+
length = np.prod(size)
|
| 1718 |
+
urn = _PyStochasticLib3()
|
| 1719 |
+
rv_gen = getattr(urn, self.rvs_name)
|
| 1720 |
+
rvs = rv_gen(N, n, M, odds, length, random_state)
|
| 1721 |
+
rvs = rvs.reshape(size)
|
| 1722 |
+
return rvs
|
| 1723 |
+
|
| 1724 |
+
return _rvs1(M, n, N, odds, size=size, random_state=random_state)
|
| 1725 |
+
|
| 1726 |
+
def _pmf(self, x, M, n, N, odds):
|
| 1727 |
+
|
| 1728 |
+
x, M, n, N, odds = np.broadcast_arrays(x, M, n, N, odds)
|
| 1729 |
+
if x.size == 0: # np.vectorize doesn't work with zero size input
|
| 1730 |
+
return np.empty_like(x)
|
| 1731 |
+
|
| 1732 |
+
@np.vectorize
|
| 1733 |
+
def _pmf1(x, M, n, N, odds):
|
| 1734 |
+
urn = self.dist(N, n, M, odds, 1e-12)
|
| 1735 |
+
return urn.probability(x)
|
| 1736 |
+
|
| 1737 |
+
return _pmf1(x, M, n, N, odds)
|
| 1738 |
+
|
| 1739 |
+
def _stats(self, M, n, N, odds, moments):
|
| 1740 |
+
|
| 1741 |
+
@np.vectorize
|
| 1742 |
+
def _moments1(M, n, N, odds):
|
| 1743 |
+
urn = self.dist(N, n, M, odds, 1e-12)
|
| 1744 |
+
return urn.moments()
|
| 1745 |
+
|
| 1746 |
+
m, v = (_moments1(M, n, N, odds) if ("m" in moments or "v" in moments)
|
| 1747 |
+
else (None, None))
|
| 1748 |
+
s, k = None, None
|
| 1749 |
+
return m, v, s, k
|
| 1750 |
+
|
| 1751 |
+
|
| 1752 |
+
class nchypergeom_fisher_gen(_nchypergeom_gen):
|
| 1753 |
+
r"""A Fisher's noncentral hypergeometric discrete random variable.
|
| 1754 |
+
|
| 1755 |
+
Fisher's noncentral hypergeometric distribution models drawing objects of
|
| 1756 |
+
two types from a bin. `M` is the total number of objects, `n` is the
|
| 1757 |
+
number of Type I objects, and `odds` is the odds ratio: the odds of
|
| 1758 |
+
selecting a Type I object rather than a Type II object when there is only
|
| 1759 |
+
one object of each type.
|
| 1760 |
+
The random variate represents the number of Type I objects drawn if we
|
| 1761 |
+
take a handful of objects from the bin at once and find out afterwards
|
| 1762 |
+
that we took `N` objects.
|
| 1763 |
+
|
| 1764 |
+
%(before_notes)s
|
| 1765 |
+
|
| 1766 |
+
See Also
|
| 1767 |
+
--------
|
| 1768 |
+
nchypergeom_wallenius, hypergeom, nhypergeom
|
| 1769 |
+
|
| 1770 |
+
Notes
|
| 1771 |
+
-----
|
| 1772 |
+
Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
|
| 1773 |
+
with parameters `N`, `n`, and `M` (respectively) as defined above.
|
| 1774 |
+
|
| 1775 |
+
The probability mass function is defined as
|
| 1776 |
+
|
| 1777 |
+
.. math::
|
| 1778 |
+
|
| 1779 |
+
p(x; M, n, N, \omega) =
|
| 1780 |
+
\frac{\binom{n}{x}\binom{M - n}{N-x}\omega^x}{P_0},
|
| 1781 |
+
|
| 1782 |
+
for
|
| 1783 |
+
:math:`x \in [x_l, x_u]`,
|
| 1784 |
+
:math:`M \in {\mathbb N}`,
|
| 1785 |
+
:math:`n \in [0, M]`,
|
| 1786 |
+
:math:`N \in [0, M]`,
|
| 1787 |
+
:math:`\omega > 0`,
|
| 1788 |
+
where
|
| 1789 |
+
:math:`x_l = \max(0, N - (M - n))`,
|
| 1790 |
+
:math:`x_u = \min(N, n)`,
|
| 1791 |
+
|
| 1792 |
+
.. math::
|
| 1793 |
+
|
| 1794 |
+
P_0 = \sum_{y=x_l}^{x_u} \binom{n}{y}\binom{M - n}{N-y}\omega^y,
|
| 1795 |
+
|
| 1796 |
+
and the binomial coefficients are defined as
|
| 1797 |
+
|
| 1798 |
+
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
|
| 1799 |
+
|
| 1800 |
+
`nchypergeom_fisher` uses the BiasedUrn package by Agner Fog with
|
| 1801 |
+
permission for it to be distributed under SciPy's license.
|
| 1802 |
+
|
| 1803 |
+
The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
|
| 1804 |
+
universally accepted; they are chosen for consistency with `hypergeom`.
|
| 1805 |
+
|
| 1806 |
+
Note that Fisher's noncentral hypergeometric distribution is distinct
|
| 1807 |
+
from Wallenius' noncentral hypergeometric distribution, which models
|
| 1808 |
+
drawing a pre-determined `N` objects from a bin one by one.
|
| 1809 |
+
When the odds ratio is unity, however, both distributions reduce to the
|
| 1810 |
+
ordinary hypergeometric distribution.
|
| 1811 |
+
|
| 1812 |
+
%(after_notes)s
|
| 1813 |
+
|
| 1814 |
+
References
|
| 1815 |
+
----------
|
| 1816 |
+
.. [1] Agner Fog, "Biased Urn Theory".
|
| 1817 |
+
https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
|
| 1818 |
+
|
| 1819 |
+
.. [2] "Fisher's noncentral hypergeometric distribution", Wikipedia,
|
| 1820 |
+
https://en.wikipedia.org/wiki/Fisher's_noncentral_hypergeometric_distribution
|
| 1821 |
+
|
| 1822 |
+
%(example)s
|
| 1823 |
+
|
| 1824 |
+
"""
|
| 1825 |
+
|
| 1826 |
+
rvs_name = "rvs_fisher"
|
| 1827 |
+
dist = _PyFishersNCHypergeometric
|
| 1828 |
+
|
| 1829 |
+
|
| 1830 |
+
nchypergeom_fisher = nchypergeom_fisher_gen(
|
| 1831 |
+
name='nchypergeom_fisher',
|
| 1832 |
+
longname="A Fisher's noncentral hypergeometric")
|
| 1833 |
+
|
| 1834 |
+
|
| 1835 |
+
class nchypergeom_wallenius_gen(_nchypergeom_gen):
|
| 1836 |
+
r"""A Wallenius' noncentral hypergeometric discrete random variable.
|
| 1837 |
+
|
| 1838 |
+
Wallenius' noncentral hypergeometric distribution models drawing objects of
|
| 1839 |
+
two types from a bin. `M` is the total number of objects, `n` is the
|
| 1840 |
+
number of Type I objects, and `odds` is the odds ratio: the odds of
|
| 1841 |
+
selecting a Type I object rather than a Type II object when there is only
|
| 1842 |
+
one object of each type.
|
| 1843 |
+
The random variate represents the number of Type I objects drawn if we
|
| 1844 |
+
draw a pre-determined `N` objects from a bin one by one.
|
| 1845 |
+
|
| 1846 |
+
%(before_notes)s
|
| 1847 |
+
|
| 1848 |
+
See Also
|
| 1849 |
+
--------
|
| 1850 |
+
nchypergeom_fisher, hypergeom, nhypergeom
|
| 1851 |
+
|
| 1852 |
+
Notes
|
| 1853 |
+
-----
|
| 1854 |
+
Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
|
| 1855 |
+
with parameters `N`, `n`, and `M` (respectively) as defined above.
|
| 1856 |
+
|
| 1857 |
+
The probability mass function is defined as
|
| 1858 |
+
|
| 1859 |
+
.. math::
|
| 1860 |
+
|
| 1861 |
+
p(x; N, n, M) = \binom{n}{x} \binom{M - n}{N-x}
|
| 1862 |
+
\int_0^1 \left(1-t^{\omega/D}\right)^x\left(1-t^{1/D}\right)^{N-x} dt
|
| 1863 |
+
|
| 1864 |
+
for
|
| 1865 |
+
:math:`x \in [x_l, x_u]`,
|
| 1866 |
+
:math:`M \in {\mathbb N}`,
|
| 1867 |
+
:math:`n \in [0, M]`,
|
| 1868 |
+
:math:`N \in [0, M]`,
|
| 1869 |
+
:math:`\omega > 0`,
|
| 1870 |
+
where
|
| 1871 |
+
:math:`x_l = \max(0, N - (M - n))`,
|
| 1872 |
+
:math:`x_u = \min(N, n)`,
|
| 1873 |
+
|
| 1874 |
+
.. math::
|
| 1875 |
+
|
| 1876 |
+
D = \omega(n - x) + ((M - n)-(N-x)),
|
| 1877 |
+
|
| 1878 |
+
and the binomial coefficients are defined as
|
| 1879 |
+
|
| 1880 |
+
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
|
| 1881 |
+
|
| 1882 |
+
`nchypergeom_wallenius` uses the BiasedUrn package by Agner Fog with
|
| 1883 |
+
permission for it to be distributed under SciPy's license.
|
| 1884 |
+
|
| 1885 |
+
The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
|
| 1886 |
+
universally accepted; they are chosen for consistency with `hypergeom`.
|
| 1887 |
+
|
| 1888 |
+
Note that Wallenius' noncentral hypergeometric distribution is distinct
|
| 1889 |
+
from Fisher's noncentral hypergeometric distribution, which models
|
| 1890 |
+
take a handful of objects from the bin at once, finding out afterwards
|
| 1891 |
+
that `N` objects were taken.
|
| 1892 |
+
When the odds ratio is unity, however, both distributions reduce to the
|
| 1893 |
+
ordinary hypergeometric distribution.
|
| 1894 |
+
|
| 1895 |
+
%(after_notes)s
|
| 1896 |
+
|
| 1897 |
+
References
|
| 1898 |
+
----------
|
| 1899 |
+
.. [1] Agner Fog, "Biased Urn Theory".
|
| 1900 |
+
https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
|
| 1901 |
+
|
| 1902 |
+
.. [2] "Wallenius' noncentral hypergeometric distribution", Wikipedia,
|
| 1903 |
+
https://en.wikipedia.org/wiki/Wallenius'_noncentral_hypergeometric_distribution
|
| 1904 |
+
|
| 1905 |
+
%(example)s
|
| 1906 |
+
|
| 1907 |
+
"""
|
| 1908 |
+
|
| 1909 |
+
rvs_name = "rvs_wallenius"
|
| 1910 |
+
dist = _PyWalleniusNCHypergeometric
|
| 1911 |
+
|
| 1912 |
+
|
| 1913 |
+
nchypergeom_wallenius = nchypergeom_wallenius_gen(
|
| 1914 |
+
name='nchypergeom_wallenius',
|
| 1915 |
+
longname="A Wallenius' noncentral hypergeometric")
|
| 1916 |
+
|
| 1917 |
+
|
| 1918 |
+
# Collect names of classes and objects in this module.
|
| 1919 |
+
pairs = list(globals().copy().items())
|
| 1920 |
+
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
|
| 1921 |
+
|
| 1922 |
+
__all__ = _distn_names + _distn_gen_names
|
parrot/lib/python3.10/site-packages/scipy/stats/_distr_params.py
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Sane parameters for stats.distributions.
|
| 3 |
+
"""
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
distcont = [
|
| 7 |
+
['alpha', (3.5704770516650459,)],
|
| 8 |
+
['anglit', ()],
|
| 9 |
+
['arcsine', ()],
|
| 10 |
+
['argus', (1.0,)],
|
| 11 |
+
['beta', (2.3098496451481823, 0.62687954300963677)],
|
| 12 |
+
['betaprime', (5, 6)],
|
| 13 |
+
['bradford', (0.29891359763170633,)],
|
| 14 |
+
['burr', (10.5, 4.3)],
|
| 15 |
+
['burr12', (10, 4)],
|
| 16 |
+
['cauchy', ()],
|
| 17 |
+
['chi', (78,)],
|
| 18 |
+
['chi2', (55,)],
|
| 19 |
+
['cosine', ()],
|
| 20 |
+
['crystalball', (2.0, 3.0)],
|
| 21 |
+
['dgamma', (1.1023326088288166,)],
|
| 22 |
+
['dweibull', (2.0685080649914673,)],
|
| 23 |
+
['erlang', (10,)],
|
| 24 |
+
['expon', ()],
|
| 25 |
+
['exponnorm', (1.5,)],
|
| 26 |
+
['exponpow', (2.697119160358469,)],
|
| 27 |
+
['exponweib', (2.8923945291034436, 1.9505288745913174)],
|
| 28 |
+
['f', (29, 18)],
|
| 29 |
+
['fatiguelife', (29,)], # correction numargs = 1
|
| 30 |
+
['fisk', (3.0857548622253179,)],
|
| 31 |
+
['foldcauchy', (4.7164673455831894,)],
|
| 32 |
+
['foldnorm', (1.9521253373555869,)],
|
| 33 |
+
['gamma', (1.9932305483800778,)],
|
| 34 |
+
['gausshyper', (13.763771604130699, 3.1189636648681431,
|
| 35 |
+
2.5145980350183019, 5.1811649903971615)], # veryslow
|
| 36 |
+
['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)],
|
| 37 |
+
['genextreme', (-0.1,)],
|
| 38 |
+
['gengamma', (4.4162385429431925, 3.1193091679242761)],
|
| 39 |
+
['gengamma', (4.4162385429431925, -3.1193091679242761)],
|
| 40 |
+
['genhalflogistic', (0.77274727809929322,)],
|
| 41 |
+
['genhyperbolic', (0.5, 1.5, -0.5,)],
|
| 42 |
+
['geninvgauss', (2.3, 1.5)],
|
| 43 |
+
['genlogistic', (0.41192440799679475,)],
|
| 44 |
+
['gennorm', (1.2988442399460265,)],
|
| 45 |
+
['halfgennorm', (0.6748054997000371,)],
|
| 46 |
+
['genpareto', (0.1,)], # use case with finite moments
|
| 47 |
+
['gibrat', ()],
|
| 48 |
+
['gompertz', (0.94743713075105251,)],
|
| 49 |
+
['gumbel_l', ()],
|
| 50 |
+
['gumbel_r', ()],
|
| 51 |
+
['halfcauchy', ()],
|
| 52 |
+
['halflogistic', ()],
|
| 53 |
+
['halfnorm', ()],
|
| 54 |
+
['hypsecant', ()],
|
| 55 |
+
['invgamma', (4.0668996136993067,)],
|
| 56 |
+
['invgauss', (0.14546264555347513,)],
|
| 57 |
+
['invweibull', (10.58,)],
|
| 58 |
+
['irwinhall', (10,)],
|
| 59 |
+
['jf_skew_t', (8, 4)],
|
| 60 |
+
['johnsonsb', (4.3172675099141058, 3.1837781130785063)],
|
| 61 |
+
['johnsonsu', (2.554395574161155, 2.2482281679651965)],
|
| 62 |
+
['kappa4', (0.0, 0.0)],
|
| 63 |
+
['kappa4', (-0.1, 0.1)],
|
| 64 |
+
['kappa4', (0.0, 0.1)],
|
| 65 |
+
['kappa4', (0.1, 0.0)],
|
| 66 |
+
['kappa3', (1.0,)],
|
| 67 |
+
['ksone', (1000,)], # replace 22 by 100 to avoid failing range, ticket 956
|
| 68 |
+
['kstwo', (10,)],
|
| 69 |
+
['kstwobign', ()],
|
| 70 |
+
['laplace', ()],
|
| 71 |
+
['laplace_asymmetric', (2,)],
|
| 72 |
+
['levy', ()],
|
| 73 |
+
['levy_l', ()],
|
| 74 |
+
['levy_stable', (1.8, -0.5)],
|
| 75 |
+
['loggamma', (0.41411931826052117,)],
|
| 76 |
+
['logistic', ()],
|
| 77 |
+
['loglaplace', (3.2505926592051435,)],
|
| 78 |
+
['lognorm', (0.95368226960575331,)],
|
| 79 |
+
['loguniform', (0.01, 1.25)],
|
| 80 |
+
['lomax', (1.8771398388773268,)],
|
| 81 |
+
['maxwell', ()],
|
| 82 |
+
['mielke', (10.4, 4.6)],
|
| 83 |
+
['moyal', ()],
|
| 84 |
+
['nakagami', (4.9673794866666237,)],
|
| 85 |
+
['ncf', (27, 27, 0.41578441799226107)],
|
| 86 |
+
['nct', (14, 0.24045031331198066)],
|
| 87 |
+
['ncx2', (21, 1.0560465975116415)],
|
| 88 |
+
['norm', ()],
|
| 89 |
+
['norminvgauss', (1.25, 0.5)],
|
| 90 |
+
['pareto', (2.621716532144454,)],
|
| 91 |
+
['pearson3', (0.1,)],
|
| 92 |
+
['pearson3', (-2,)],
|
| 93 |
+
['powerlaw', (1.6591133289905851,)],
|
| 94 |
+
['powerlaw', (0.6591133289905851,)],
|
| 95 |
+
['powerlognorm', (2.1413923530064087, 0.44639540782048337)],
|
| 96 |
+
['powernorm', (4.4453652254590779,)],
|
| 97 |
+
['rayleigh', ()],
|
| 98 |
+
['rdist', (1.6,)],
|
| 99 |
+
['recipinvgauss', (0.63004267809369119,)],
|
| 100 |
+
['reciprocal', (0.01, 1.25)],
|
| 101 |
+
['rel_breitwigner', (36.545206797050334, )],
|
| 102 |
+
['rice', (0.7749725210111873,)],
|
| 103 |
+
['semicircular', ()],
|
| 104 |
+
['skewcauchy', (0.5,)],
|
| 105 |
+
['skewnorm', (4.0,)],
|
| 106 |
+
['studentized_range', (3.0, 10.0)],
|
| 107 |
+
['t', (2.7433514990818093,)],
|
| 108 |
+
['trapezoid', (0.2, 0.8)],
|
| 109 |
+
['triang', (0.15785029824528218,)],
|
| 110 |
+
['truncexpon', (4.6907725456810478,)],
|
| 111 |
+
['truncnorm', (-1.0978730080013919, 2.7306754109031979)],
|
| 112 |
+
['truncnorm', (0.1, 2.)],
|
| 113 |
+
['truncpareto', (1.8, 5.3)],
|
| 114 |
+
['truncpareto', (2, 5)],
|
| 115 |
+
['truncweibull_min', (2.5, 0.25, 1.75)],
|
| 116 |
+
['tukeylambda', (3.1321477856738267,)],
|
| 117 |
+
['uniform', ()],
|
| 118 |
+
['vonmises', (3.9939042581071398,)],
|
| 119 |
+
['vonmises_line', (3.9939042581071398,)],
|
| 120 |
+
['wald', ()],
|
| 121 |
+
['weibull_max', (2.8687961709100187,)],
|
| 122 |
+
['weibull_min', (1.7866166930421596,)],
|
| 123 |
+
['wrapcauchy', (0.031071279018614728,)]]
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
distdiscrete = [
|
| 127 |
+
['bernoulli',(0.3,)],
|
| 128 |
+
['betabinom', (5, 2.3, 0.63)],
|
| 129 |
+
['betanbinom', (5, 9.3, 1)],
|
| 130 |
+
['binom', (5, 0.4)],
|
| 131 |
+
['boltzmann',(1.4, 19)],
|
| 132 |
+
['dlaplace', (0.8,)], # 0.5
|
| 133 |
+
['geom', (0.5,)],
|
| 134 |
+
['hypergeom',(30, 12, 6)],
|
| 135 |
+
['hypergeom',(21,3,12)], # numpy.random (3,18,12) numpy ticket:921
|
| 136 |
+
['hypergeom',(21,18,11)], # numpy.random (18,3,11) numpy ticket:921
|
| 137 |
+
['nchypergeom_fisher', (140, 80, 60, 0.5)],
|
| 138 |
+
['nchypergeom_wallenius', (140, 80, 60, 0.5)],
|
| 139 |
+
['logser', (0.6,)], # re-enabled, numpy ticket:921
|
| 140 |
+
['nbinom', (0.4, 0.4)], # from tickets: 583
|
| 141 |
+
['nbinom', (5, 0.5)],
|
| 142 |
+
['planck', (0.51,)], # 4.1
|
| 143 |
+
['poisson', (0.6,)],
|
| 144 |
+
['randint', (7, 31)],
|
| 145 |
+
['skellam', (15, 8)],
|
| 146 |
+
['zipf', (6.6,)],
|
| 147 |
+
['zipfian', (0.75, 15)],
|
| 148 |
+
['zipfian', (1.25, 10)],
|
| 149 |
+
['yulesimon', (11.0,)],
|
| 150 |
+
['nhypergeom', (20, 7, 1)]
|
| 151 |
+
]
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
invdistdiscrete = [
|
| 155 |
+
# In each of the following, at least one shape parameter is invalid
|
| 156 |
+
['hypergeom', (3, 3, 4)],
|
| 157 |
+
['nhypergeom', (5, 2, 8)],
|
| 158 |
+
['nchypergeom_fisher', (3, 3, 4, 1)],
|
| 159 |
+
['nchypergeom_wallenius', (3, 3, 4, 1)],
|
| 160 |
+
['bernoulli', (1.5, )],
|
| 161 |
+
['binom', (10, 1.5)],
|
| 162 |
+
['betabinom', (10, -0.4, -0.5)],
|
| 163 |
+
['betanbinom', (10, -0.4, -0.5)],
|
| 164 |
+
['boltzmann', (-1, 4)],
|
| 165 |
+
['dlaplace', (-0.5, )],
|
| 166 |
+
['geom', (1.5, )],
|
| 167 |
+
['logser', (1.5, )],
|
| 168 |
+
['nbinom', (10, 1.5)],
|
| 169 |
+
['planck', (-0.5, )],
|
| 170 |
+
['poisson', (-0.5, )],
|
| 171 |
+
['randint', (5, 2)],
|
| 172 |
+
['skellam', (-5, -2)],
|
| 173 |
+
['zipf', (-2, )],
|
| 174 |
+
['yulesimon', (-2, )],
|
| 175 |
+
['zipfian', (-0.75, 15)]
|
| 176 |
+
]
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
invdistcont = [
|
| 180 |
+
# In each of the following, at least one shape parameter is invalid
|
| 181 |
+
['alpha', (-1, )],
|
| 182 |
+
['anglit', ()],
|
| 183 |
+
['arcsine', ()],
|
| 184 |
+
['argus', (-1, )],
|
| 185 |
+
['beta', (-2, 2)],
|
| 186 |
+
['betaprime', (-2, 2)],
|
| 187 |
+
['bradford', (-1, )],
|
| 188 |
+
['burr', (-1, 1)],
|
| 189 |
+
['burr12', (-1, 1)],
|
| 190 |
+
['cauchy', ()],
|
| 191 |
+
['chi', (-1, )],
|
| 192 |
+
['chi2', (-1, )],
|
| 193 |
+
['cosine', ()],
|
| 194 |
+
['crystalball', (-1, 2)],
|
| 195 |
+
['dgamma', (-1, )],
|
| 196 |
+
['dweibull', (-1, )],
|
| 197 |
+
['erlang', (-1, )],
|
| 198 |
+
['expon', ()],
|
| 199 |
+
['exponnorm', (-1, )],
|
| 200 |
+
['exponweib', (1, -1)],
|
| 201 |
+
['exponpow', (-1, )],
|
| 202 |
+
['f', (10, -10)],
|
| 203 |
+
['fatiguelife', (-1, )],
|
| 204 |
+
['fisk', (-1, )],
|
| 205 |
+
['foldcauchy', (-1, )],
|
| 206 |
+
['foldnorm', (-1, )],
|
| 207 |
+
['genlogistic', (-1, )],
|
| 208 |
+
['gennorm', (-1, )],
|
| 209 |
+
['genpareto', (np.inf, )],
|
| 210 |
+
['genexpon', (1, 2, -3)],
|
| 211 |
+
['genextreme', (np.inf, )],
|
| 212 |
+
['genhyperbolic', (0.5, -0.5, -1.5,)],
|
| 213 |
+
['gausshyper', (1, 2, 3, -4)],
|
| 214 |
+
['gamma', (-1, )],
|
| 215 |
+
['gengamma', (-1, 0)],
|
| 216 |
+
['genhalflogistic', (-1, )],
|
| 217 |
+
['geninvgauss', (1, 0)],
|
| 218 |
+
['gibrat', ()],
|
| 219 |
+
['gompertz', (-1, )],
|
| 220 |
+
['gumbel_r', ()],
|
| 221 |
+
['gumbel_l', ()],
|
| 222 |
+
['halfcauchy', ()],
|
| 223 |
+
['halflogistic', ()],
|
| 224 |
+
['halfnorm', ()],
|
| 225 |
+
['halfgennorm', (-1, )],
|
| 226 |
+
['hypsecant', ()],
|
| 227 |
+
['invgamma', (-1, )],
|
| 228 |
+
['invgauss', (-1, )],
|
| 229 |
+
['invweibull', (-1, )],
|
| 230 |
+
['irwinhall', (-1,)],
|
| 231 |
+
['irwinhall', (0,)],
|
| 232 |
+
['irwinhall', (2.5,)],
|
| 233 |
+
['jf_skew_t', (-1, 0)],
|
| 234 |
+
['johnsonsb', (1, -2)],
|
| 235 |
+
['johnsonsu', (1, -2)],
|
| 236 |
+
['kappa4', (np.nan, 0)],
|
| 237 |
+
['kappa3', (-1, )],
|
| 238 |
+
['ksone', (-1, )],
|
| 239 |
+
['kstwo', (-1, )],
|
| 240 |
+
['kstwobign', ()],
|
| 241 |
+
['laplace', ()],
|
| 242 |
+
['laplace_asymmetric', (-1, )],
|
| 243 |
+
['levy', ()],
|
| 244 |
+
['levy_l', ()],
|
| 245 |
+
['levy_stable', (-1, 1)],
|
| 246 |
+
['logistic', ()],
|
| 247 |
+
['loggamma', (-1, )],
|
| 248 |
+
['loglaplace', (-1, )],
|
| 249 |
+
['lognorm', (-1, )],
|
| 250 |
+
['loguniform', (10, 5)],
|
| 251 |
+
['lomax', (-1, )],
|
| 252 |
+
['maxwell', ()],
|
| 253 |
+
['mielke', (1, -2)],
|
| 254 |
+
['moyal', ()],
|
| 255 |
+
['nakagami', (-1, )],
|
| 256 |
+
['ncx2', (-1, 2)],
|
| 257 |
+
['ncf', (10, 20, -1)],
|
| 258 |
+
['nct', (-1, 2)],
|
| 259 |
+
['norm', ()],
|
| 260 |
+
['norminvgauss', (5, -10)],
|
| 261 |
+
['pareto', (-1, )],
|
| 262 |
+
['pearson3', (np.nan, )],
|
| 263 |
+
['powerlaw', (-1, )],
|
| 264 |
+
['powerlognorm', (1, -2)],
|
| 265 |
+
['powernorm', (-1, )],
|
| 266 |
+
['rdist', (-1, )],
|
| 267 |
+
['rayleigh', ()],
|
| 268 |
+
['rice', (-1, )],
|
| 269 |
+
['recipinvgauss', (-1, )],
|
| 270 |
+
['semicircular', ()],
|
| 271 |
+
['skewnorm', (np.inf, )],
|
| 272 |
+
['studentized_range', (-1, 1)],
|
| 273 |
+
['rel_breitwigner', (-2, )],
|
| 274 |
+
['t', (-1, )],
|
| 275 |
+
['trapezoid', (0, 2)],
|
| 276 |
+
['triang', (2, )],
|
| 277 |
+
['truncexpon', (-1, )],
|
| 278 |
+
['truncnorm', (10, 5)],
|
| 279 |
+
['truncpareto', (-1, 5)],
|
| 280 |
+
['truncpareto', (1.8, .5)],
|
| 281 |
+
['truncweibull_min', (-2.5, 0.25, 1.75)],
|
| 282 |
+
['tukeylambda', (np.nan, )],
|
| 283 |
+
['uniform', ()],
|
| 284 |
+
['vonmises', (-1, )],
|
| 285 |
+
['vonmises_line', (-1, )],
|
| 286 |
+
['wald', ()],
|
| 287 |
+
['weibull_min', (-1, )],
|
| 288 |
+
['weibull_max', (-1, )],
|
| 289 |
+
['wrapcauchy', (2, )],
|
| 290 |
+
['reciprocal', (15, 10)],
|
| 291 |
+
['skewcauchy', (2, )]
|
| 292 |
+
]
|
parrot/lib/python3.10/site-packages/scipy/stats/_entropy.py
ADDED
|
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Created on Fri Apr 2 09:06:05 2021
|
| 3 |
+
|
| 4 |
+
@author: matth
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
import math
|
| 9 |
+
import numpy as np
|
| 10 |
+
from scipy import special
|
| 11 |
+
from ._axis_nan_policy import _axis_nan_policy_factory, _broadcast_arrays
|
| 12 |
+
from scipy._lib._array_api import array_namespace
|
| 13 |
+
|
| 14 |
+
__all__ = ['entropy', 'differential_entropy']
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@_axis_nan_policy_factory(
|
| 18 |
+
lambda x: x,
|
| 19 |
+
n_samples=lambda kwgs: (
|
| 20 |
+
2 if ("qk" in kwgs and kwgs["qk"] is not None)
|
| 21 |
+
else 1
|
| 22 |
+
),
|
| 23 |
+
n_outputs=1, result_to_tuple=lambda x: (x,), paired=True,
|
| 24 |
+
too_small=-1 # entropy doesn't have too small inputs
|
| 25 |
+
)
|
| 26 |
+
def entropy(pk: np.typing.ArrayLike,
|
| 27 |
+
qk: np.typing.ArrayLike | None = None,
|
| 28 |
+
base: float | None = None,
|
| 29 |
+
axis: int = 0
|
| 30 |
+
) -> np.number | np.ndarray:
|
| 31 |
+
"""
|
| 32 |
+
Calculate the Shannon entropy/relative entropy of given distribution(s).
|
| 33 |
+
|
| 34 |
+
If only probabilities `pk` are given, the Shannon entropy is calculated as
|
| 35 |
+
``H = -sum(pk * log(pk))``.
|
| 36 |
+
|
| 37 |
+
If `qk` is not None, then compute the relative entropy
|
| 38 |
+
``D = sum(pk * log(pk / qk))``. This quantity is also known
|
| 39 |
+
as the Kullback-Leibler divergence.
|
| 40 |
+
|
| 41 |
+
This routine will normalize `pk` and `qk` if they don't sum to 1.
|
| 42 |
+
|
| 43 |
+
Parameters
|
| 44 |
+
----------
|
| 45 |
+
pk : array_like
|
| 46 |
+
Defines the (discrete) distribution. Along each axis-slice of ``pk``,
|
| 47 |
+
element ``i`` is the (possibly unnormalized) probability of event
|
| 48 |
+
``i``.
|
| 49 |
+
qk : array_like, optional
|
| 50 |
+
Sequence against which the relative entropy is computed. Should be in
|
| 51 |
+
the same format as `pk`.
|
| 52 |
+
base : float, optional
|
| 53 |
+
The logarithmic base to use, defaults to ``e`` (natural logarithm).
|
| 54 |
+
axis : int, optional
|
| 55 |
+
The axis along which the entropy is calculated. Default is 0.
|
| 56 |
+
|
| 57 |
+
Returns
|
| 58 |
+
-------
|
| 59 |
+
S : {float, array_like}
|
| 60 |
+
The calculated entropy.
|
| 61 |
+
|
| 62 |
+
Notes
|
| 63 |
+
-----
|
| 64 |
+
Informally, the Shannon entropy quantifies the expected uncertainty
|
| 65 |
+
inherent in the possible outcomes of a discrete random variable.
|
| 66 |
+
For example,
|
| 67 |
+
if messages consisting of sequences of symbols from a set are to be
|
| 68 |
+
encoded and transmitted over a noiseless channel, then the Shannon entropy
|
| 69 |
+
``H(pk)`` gives a tight lower bound for the average number of units of
|
| 70 |
+
information needed per symbol if the symbols occur with frequencies
|
| 71 |
+
governed by the discrete distribution `pk` [1]_. The choice of base
|
| 72 |
+
determines the choice of units; e.g., ``e`` for nats, ``2`` for bits, etc.
|
| 73 |
+
|
| 74 |
+
The relative entropy, ``D(pk|qk)``, quantifies the increase in the average
|
| 75 |
+
number of units of information needed per symbol if the encoding is
|
| 76 |
+
optimized for the probability distribution `qk` instead of the true
|
| 77 |
+
distribution `pk`. Informally, the relative entropy quantifies the expected
|
| 78 |
+
excess in surprise experienced if one believes the true distribution is
|
| 79 |
+
`qk` when it is actually `pk`.
|
| 80 |
+
|
| 81 |
+
A related quantity, the cross entropy ``CE(pk, qk)``, satisfies the
|
| 82 |
+
equation ``CE(pk, qk) = H(pk) + D(pk|qk)`` and can also be calculated with
|
| 83 |
+
the formula ``CE = -sum(pk * log(qk))``. It gives the average
|
| 84 |
+
number of units of information needed per symbol if an encoding is
|
| 85 |
+
optimized for the probability distribution `qk` when the true distribution
|
| 86 |
+
is `pk`. It is not computed directly by `entropy`, but it can be computed
|
| 87 |
+
using two calls to the function (see Examples).
|
| 88 |
+
|
| 89 |
+
See [2]_ for more information.
|
| 90 |
+
|
| 91 |
+
References
|
| 92 |
+
----------
|
| 93 |
+
.. [1] Shannon, C.E. (1948), A Mathematical Theory of Communication.
|
| 94 |
+
Bell System Technical Journal, 27: 379-423.
|
| 95 |
+
https://doi.org/10.1002/j.1538-7305.1948.tb01338.x
|
| 96 |
+
.. [2] Thomas M. Cover and Joy A. Thomas. 2006. Elements of Information
|
| 97 |
+
Theory (Wiley Series in Telecommunications and Signal Processing).
|
| 98 |
+
Wiley-Interscience, USA.
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
Examples
|
| 102 |
+
--------
|
| 103 |
+
The outcome of a fair coin is the most uncertain:
|
| 104 |
+
|
| 105 |
+
>>> import numpy as np
|
| 106 |
+
>>> from scipy.stats import entropy
|
| 107 |
+
>>> base = 2 # work in units of bits
|
| 108 |
+
>>> pk = np.array([1/2, 1/2]) # fair coin
|
| 109 |
+
>>> H = entropy(pk, base=base)
|
| 110 |
+
>>> H
|
| 111 |
+
1.0
|
| 112 |
+
>>> H == -np.sum(pk * np.log(pk)) / np.log(base)
|
| 113 |
+
True
|
| 114 |
+
|
| 115 |
+
The outcome of a biased coin is less uncertain:
|
| 116 |
+
|
| 117 |
+
>>> qk = np.array([9/10, 1/10]) # biased coin
|
| 118 |
+
>>> entropy(qk, base=base)
|
| 119 |
+
0.46899559358928117
|
| 120 |
+
|
| 121 |
+
The relative entropy between the fair coin and biased coin is calculated
|
| 122 |
+
as:
|
| 123 |
+
|
| 124 |
+
>>> D = entropy(pk, qk, base=base)
|
| 125 |
+
>>> D
|
| 126 |
+
0.7369655941662062
|
| 127 |
+
>>> D == np.sum(pk * np.log(pk/qk)) / np.log(base)
|
| 128 |
+
True
|
| 129 |
+
|
| 130 |
+
The cross entropy can be calculated as the sum of the entropy and
|
| 131 |
+
relative entropy`:
|
| 132 |
+
|
| 133 |
+
>>> CE = entropy(pk, base=base) + entropy(pk, qk, base=base)
|
| 134 |
+
>>> CE
|
| 135 |
+
1.736965594166206
|
| 136 |
+
>>> CE == -np.sum(pk * np.log(qk)) / np.log(base)
|
| 137 |
+
True
|
| 138 |
+
|
| 139 |
+
"""
|
| 140 |
+
if base is not None and base <= 0:
|
| 141 |
+
raise ValueError("`base` must be a positive number or `None`.")
|
| 142 |
+
|
| 143 |
+
xp = array_namespace(pk) if qk is None else array_namespace(pk, qk)
|
| 144 |
+
|
| 145 |
+
pk = xp.asarray(pk)
|
| 146 |
+
with np.errstate(invalid='ignore'):
|
| 147 |
+
pk = 1.0*pk / xp.sum(pk, axis=axis, keepdims=True) # type: ignore[operator]
|
| 148 |
+
if qk is None:
|
| 149 |
+
vec = special.entr(pk)
|
| 150 |
+
else:
|
| 151 |
+
qk = xp.asarray(qk)
|
| 152 |
+
pk, qk = _broadcast_arrays((pk, qk), axis=None, xp=xp) # don't ignore any axes
|
| 153 |
+
sum_kwargs = dict(axis=axis, keepdims=True)
|
| 154 |
+
qk = 1.0*qk / xp.sum(qk, **sum_kwargs) # type: ignore[operator, call-overload]
|
| 155 |
+
vec = special.rel_entr(pk, qk)
|
| 156 |
+
S = xp.sum(vec, axis=axis)
|
| 157 |
+
if base is not None:
|
| 158 |
+
S /= math.log(base)
|
| 159 |
+
return S
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _differential_entropy_is_too_small(samples, kwargs, axis=-1):
|
| 163 |
+
values = samples[0]
|
| 164 |
+
n = values.shape[axis]
|
| 165 |
+
window_length = kwargs.get("window_length",
|
| 166 |
+
math.floor(math.sqrt(n) + 0.5))
|
| 167 |
+
if not 2 <= 2 * window_length < n:
|
| 168 |
+
return True
|
| 169 |
+
return False
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@_axis_nan_policy_factory(
|
| 173 |
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,),
|
| 174 |
+
too_small=_differential_entropy_is_too_small
|
| 175 |
+
)
|
| 176 |
+
def differential_entropy(
|
| 177 |
+
values: np.typing.ArrayLike,
|
| 178 |
+
*,
|
| 179 |
+
window_length: int | None = None,
|
| 180 |
+
base: float | None = None,
|
| 181 |
+
axis: int = 0,
|
| 182 |
+
method: str = "auto",
|
| 183 |
+
) -> np.number | np.ndarray:
|
| 184 |
+
r"""Given a sample of a distribution, estimate the differential entropy.
|
| 185 |
+
|
| 186 |
+
Several estimation methods are available using the `method` parameter. By
|
| 187 |
+
default, a method is selected based the size of the sample.
|
| 188 |
+
|
| 189 |
+
Parameters
|
| 190 |
+
----------
|
| 191 |
+
values : sequence
|
| 192 |
+
Sample from a continuous distribution.
|
| 193 |
+
window_length : int, optional
|
| 194 |
+
Window length for computing Vasicek estimate. Must be an integer
|
| 195 |
+
between 1 and half of the sample size. If ``None`` (the default), it
|
| 196 |
+
uses the heuristic value
|
| 197 |
+
|
| 198 |
+
.. math::
|
| 199 |
+
\left \lfloor \sqrt{n} + 0.5 \right \rfloor
|
| 200 |
+
|
| 201 |
+
where :math:`n` is the sample size. This heuristic was originally
|
| 202 |
+
proposed in [2]_ and has become common in the literature.
|
| 203 |
+
base : float, optional
|
| 204 |
+
The logarithmic base to use, defaults to ``e`` (natural logarithm).
|
| 205 |
+
axis : int, optional
|
| 206 |
+
The axis along which the differential entropy is calculated.
|
| 207 |
+
Default is 0.
|
| 208 |
+
method : {'vasicek', 'van es', 'ebrahimi', 'correa', 'auto'}, optional
|
| 209 |
+
The method used to estimate the differential entropy from the sample.
|
| 210 |
+
Default is ``'auto'``. See Notes for more information.
|
| 211 |
+
|
| 212 |
+
Returns
|
| 213 |
+
-------
|
| 214 |
+
entropy : float
|
| 215 |
+
The calculated differential entropy.
|
| 216 |
+
|
| 217 |
+
Notes
|
| 218 |
+
-----
|
| 219 |
+
This function will converge to the true differential entropy in the limit
|
| 220 |
+
|
| 221 |
+
.. math::
|
| 222 |
+
n \to \infty, \quad m \to \infty, \quad \frac{m}{n} \to 0
|
| 223 |
+
|
| 224 |
+
The optimal choice of ``window_length`` for a given sample size depends on
|
| 225 |
+
the (unknown) distribution. Typically, the smoother the density of the
|
| 226 |
+
distribution, the larger the optimal value of ``window_length`` [1]_.
|
| 227 |
+
|
| 228 |
+
The following options are available for the `method` parameter.
|
| 229 |
+
|
| 230 |
+
* ``'vasicek'`` uses the estimator presented in [1]_. This is
|
| 231 |
+
one of the first and most influential estimators of differential entropy.
|
| 232 |
+
* ``'van es'`` uses the bias-corrected estimator presented in [3]_, which
|
| 233 |
+
is not only consistent but, under some conditions, asymptotically normal.
|
| 234 |
+
* ``'ebrahimi'`` uses an estimator presented in [4]_, which was shown
|
| 235 |
+
in simulation to have smaller bias and mean squared error than
|
| 236 |
+
the Vasicek estimator.
|
| 237 |
+
* ``'correa'`` uses the estimator presented in [5]_ based on local linear
|
| 238 |
+
regression. In a simulation study, it had consistently smaller mean
|
| 239 |
+
square error than the Vasiceck estimator, but it is more expensive to
|
| 240 |
+
compute.
|
| 241 |
+
* ``'auto'`` selects the method automatically (default). Currently,
|
| 242 |
+
this selects ``'van es'`` for very small samples (<10), ``'ebrahimi'``
|
| 243 |
+
for moderate sample sizes (11-1000), and ``'vasicek'`` for larger
|
| 244 |
+
samples, but this behavior is subject to change in future versions.
|
| 245 |
+
|
| 246 |
+
All estimators are implemented as described in [6]_.
|
| 247 |
+
|
| 248 |
+
References
|
| 249 |
+
----------
|
| 250 |
+
.. [1] Vasicek, O. (1976). A test for normality based on sample entropy.
|
| 251 |
+
Journal of the Royal Statistical Society:
|
| 252 |
+
Series B (Methodological), 38(1), 54-59.
|
| 253 |
+
.. [2] Crzcgorzewski, P., & Wirczorkowski, R. (1999). Entropy-based
|
| 254 |
+
goodness-of-fit test for exponentiality. Communications in
|
| 255 |
+
Statistics-Theory and Methods, 28(5), 1183-1202.
|
| 256 |
+
.. [3] Van Es, B. (1992). Estimating functionals related to a density by a
|
| 257 |
+
class of statistics based on spacings. Scandinavian Journal of
|
| 258 |
+
Statistics, 61-72.
|
| 259 |
+
.. [4] Ebrahimi, N., Pflughoeft, K., & Soofi, E. S. (1994). Two measures
|
| 260 |
+
of sample entropy. Statistics & Probability Letters, 20(3), 225-234.
|
| 261 |
+
.. [5] Correa, J. C. (1995). A new estimator of entropy. Communications
|
| 262 |
+
in Statistics-Theory and Methods, 24(10), 2439-2449.
|
| 263 |
+
.. [6] Noughabi, H. A. (2015). Entropy Estimation Using Numerical Methods.
|
| 264 |
+
Annals of Data Science, 2(2), 231-241.
|
| 265 |
+
https://link.springer.com/article/10.1007/s40745-015-0045-9
|
| 266 |
+
|
| 267 |
+
Examples
|
| 268 |
+
--------
|
| 269 |
+
>>> import numpy as np
|
| 270 |
+
>>> from scipy.stats import differential_entropy, norm
|
| 271 |
+
|
| 272 |
+
Entropy of a standard normal distribution:
|
| 273 |
+
|
| 274 |
+
>>> rng = np.random.default_rng()
|
| 275 |
+
>>> values = rng.standard_normal(100)
|
| 276 |
+
>>> differential_entropy(values)
|
| 277 |
+
1.3407817436640392
|
| 278 |
+
|
| 279 |
+
Compare with the true entropy:
|
| 280 |
+
|
| 281 |
+
>>> float(norm.entropy())
|
| 282 |
+
1.4189385332046727
|
| 283 |
+
|
| 284 |
+
For several sample sizes between 5 and 1000, compare the accuracy of
|
| 285 |
+
the ``'vasicek'``, ``'van es'``, and ``'ebrahimi'`` methods. Specifically,
|
| 286 |
+
compare the root mean squared error (over 1000 trials) between the estimate
|
| 287 |
+
and the true differential entropy of the distribution.
|
| 288 |
+
|
| 289 |
+
>>> from scipy import stats
|
| 290 |
+
>>> import matplotlib.pyplot as plt
|
| 291 |
+
>>>
|
| 292 |
+
>>>
|
| 293 |
+
>>> def rmse(res, expected):
|
| 294 |
+
... '''Root mean squared error'''
|
| 295 |
+
... return np.sqrt(np.mean((res - expected)**2))
|
| 296 |
+
>>>
|
| 297 |
+
>>>
|
| 298 |
+
>>> a, b = np.log10(5), np.log10(1000)
|
| 299 |
+
>>> ns = np.round(np.logspace(a, b, 10)).astype(int)
|
| 300 |
+
>>> reps = 1000 # number of repetitions for each sample size
|
| 301 |
+
>>> expected = stats.expon.entropy()
|
| 302 |
+
>>>
|
| 303 |
+
>>> method_errors = {'vasicek': [], 'van es': [], 'ebrahimi': []}
|
| 304 |
+
>>> for method in method_errors:
|
| 305 |
+
... for n in ns:
|
| 306 |
+
... rvs = stats.expon.rvs(size=(reps, n), random_state=rng)
|
| 307 |
+
... res = stats.differential_entropy(rvs, method=method, axis=-1)
|
| 308 |
+
... error = rmse(res, expected)
|
| 309 |
+
... method_errors[method].append(error)
|
| 310 |
+
>>>
|
| 311 |
+
>>> for method, errors in method_errors.items():
|
| 312 |
+
... plt.loglog(ns, errors, label=method)
|
| 313 |
+
>>>
|
| 314 |
+
>>> plt.legend()
|
| 315 |
+
>>> plt.xlabel('sample size')
|
| 316 |
+
>>> plt.ylabel('RMSE (1000 trials)')
|
| 317 |
+
>>> plt.title('Entropy Estimator Error (Exponential Distribution)')
|
| 318 |
+
|
| 319 |
+
"""
|
| 320 |
+
values = np.asarray(values)
|
| 321 |
+
values = np.moveaxis(values, axis, -1)
|
| 322 |
+
n = values.shape[-1] # number of observations
|
| 323 |
+
|
| 324 |
+
if window_length is None:
|
| 325 |
+
window_length = math.floor(math.sqrt(n) + 0.5)
|
| 326 |
+
|
| 327 |
+
if not 2 <= 2 * window_length < n:
|
| 328 |
+
raise ValueError(
|
| 329 |
+
f"Window length ({window_length}) must be positive and less "
|
| 330 |
+
f"than half the sample size ({n}).",
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
if base is not None and base <= 0:
|
| 334 |
+
raise ValueError("`base` must be a positive number or `None`.")
|
| 335 |
+
|
| 336 |
+
sorted_data = np.sort(values, axis=-1)
|
| 337 |
+
|
| 338 |
+
methods = {"vasicek": _vasicek_entropy,
|
| 339 |
+
"van es": _van_es_entropy,
|
| 340 |
+
"correa": _correa_entropy,
|
| 341 |
+
"ebrahimi": _ebrahimi_entropy,
|
| 342 |
+
"auto": _vasicek_entropy}
|
| 343 |
+
method = method.lower()
|
| 344 |
+
if method not in methods:
|
| 345 |
+
message = f"`method` must be one of {set(methods)}"
|
| 346 |
+
raise ValueError(message)
|
| 347 |
+
|
| 348 |
+
if method == "auto":
|
| 349 |
+
if n <= 10:
|
| 350 |
+
method = 'van es'
|
| 351 |
+
elif n <= 1000:
|
| 352 |
+
method = 'ebrahimi'
|
| 353 |
+
else:
|
| 354 |
+
method = 'vasicek'
|
| 355 |
+
|
| 356 |
+
res = methods[method](sorted_data, window_length)
|
| 357 |
+
|
| 358 |
+
if base is not None:
|
| 359 |
+
res /= np.log(base)
|
| 360 |
+
|
| 361 |
+
return res
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def _pad_along_last_axis(X, m):
|
| 365 |
+
"""Pad the data for computing the rolling window difference."""
|
| 366 |
+
# scales a bit better than method in _vasicek_like_entropy
|
| 367 |
+
shape = np.array(X.shape)
|
| 368 |
+
shape[-1] = m
|
| 369 |
+
Xl = np.broadcast_to(X[..., [0]], shape) # [0] vs 0 to maintain shape
|
| 370 |
+
Xr = np.broadcast_to(X[..., [-1]], shape)
|
| 371 |
+
return np.concatenate((Xl, X, Xr), axis=-1)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def _vasicek_entropy(X, m):
|
| 375 |
+
"""Compute the Vasicek estimator as described in [6] Eq. 1.3."""
|
| 376 |
+
n = X.shape[-1]
|
| 377 |
+
X = _pad_along_last_axis(X, m)
|
| 378 |
+
differences = X[..., 2 * m:] - X[..., : -2 * m:]
|
| 379 |
+
logs = np.log(n/(2*m) * differences)
|
| 380 |
+
return np.mean(logs, axis=-1)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def _van_es_entropy(X, m):
|
| 384 |
+
"""Compute the van Es estimator as described in [6]."""
|
| 385 |
+
# No equation number, but referred to as HVE_mn.
|
| 386 |
+
# Typo: there should be a log within the summation.
|
| 387 |
+
n = X.shape[-1]
|
| 388 |
+
difference = X[..., m:] - X[..., :-m]
|
| 389 |
+
term1 = 1/(n-m) * np.sum(np.log((n+1)/m * difference), axis=-1)
|
| 390 |
+
k = np.arange(m, n+1)
|
| 391 |
+
return term1 + np.sum(1/k) + np.log(m) - np.log(n+1)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def _ebrahimi_entropy(X, m):
|
| 395 |
+
"""Compute the Ebrahimi estimator as described in [6]."""
|
| 396 |
+
# No equation number, but referred to as HE_mn
|
| 397 |
+
n = X.shape[-1]
|
| 398 |
+
X = _pad_along_last_axis(X, m)
|
| 399 |
+
|
| 400 |
+
differences = X[..., 2 * m:] - X[..., : -2 * m:]
|
| 401 |
+
|
| 402 |
+
i = np.arange(1, n+1).astype(float)
|
| 403 |
+
ci = np.ones_like(i)*2
|
| 404 |
+
ci[i <= m] = 1 + (i[i <= m] - 1)/m
|
| 405 |
+
ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m
|
| 406 |
+
|
| 407 |
+
logs = np.log(n * differences / (ci * m))
|
| 408 |
+
return np.mean(logs, axis=-1)
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def _correa_entropy(X, m):
|
| 412 |
+
"""Compute the Correa estimator as described in [6]."""
|
| 413 |
+
# No equation number, but referred to as HC_mn
|
| 414 |
+
n = X.shape[-1]
|
| 415 |
+
X = _pad_along_last_axis(X, m)
|
| 416 |
+
|
| 417 |
+
i = np.arange(1, n+1)
|
| 418 |
+
dj = np.arange(-m, m+1)[:, None]
|
| 419 |
+
j = i + dj
|
| 420 |
+
j0 = j + m - 1 # 0-indexed version of j
|
| 421 |
+
|
| 422 |
+
Xibar = np.mean(X[..., j0], axis=-2, keepdims=True)
|
| 423 |
+
difference = X[..., j0] - Xibar
|
| 424 |
+
num = np.sum(difference*dj, axis=-2) # dj is d-i
|
| 425 |
+
den = n*np.sum(difference**2, axis=-2)
|
| 426 |
+
return -np.mean(np.log(num/den), axis=-1)
|
parrot/lib/python3.10/site-packages/scipy/stats/_fit.py
ADDED
|
@@ -0,0 +1,1354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
from collections import namedtuple
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy import optimize, stats
|
| 5 |
+
from scipy._lib._util import check_random_state
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _combine_bounds(name, user_bounds, shape_domain, integral):
|
| 9 |
+
"""Intersection of user-defined bounds and distribution PDF/PMF domain"""
|
| 10 |
+
|
| 11 |
+
user_bounds = np.atleast_1d(user_bounds)
|
| 12 |
+
|
| 13 |
+
if user_bounds[0] > user_bounds[1]:
|
| 14 |
+
message = (f"There are no values for `{name}` on the interval "
|
| 15 |
+
f"{list(user_bounds)}.")
|
| 16 |
+
raise ValueError(message)
|
| 17 |
+
|
| 18 |
+
bounds = (max(user_bounds[0], shape_domain[0]),
|
| 19 |
+
min(user_bounds[1], shape_domain[1]))
|
| 20 |
+
|
| 21 |
+
if integral and (np.ceil(bounds[0]) > np.floor(bounds[1])):
|
| 22 |
+
message = (f"There are no integer values for `{name}` on the interval "
|
| 23 |
+
f"defined by the user-provided bounds and the domain "
|
| 24 |
+
"of the distribution.")
|
| 25 |
+
raise ValueError(message)
|
| 26 |
+
elif not integral and (bounds[0] > bounds[1]):
|
| 27 |
+
message = (f"There are no values for `{name}` on the interval "
|
| 28 |
+
f"defined by the user-provided bounds and the domain "
|
| 29 |
+
"of the distribution.")
|
| 30 |
+
raise ValueError(message)
|
| 31 |
+
|
| 32 |
+
if not np.all(np.isfinite(bounds)):
|
| 33 |
+
message = (f"The intersection of user-provided bounds for `{name}` "
|
| 34 |
+
f"and the domain of the distribution is not finite. Please "
|
| 35 |
+
f"provide finite bounds for shape `{name}` in `bounds`.")
|
| 36 |
+
raise ValueError(message)
|
| 37 |
+
|
| 38 |
+
return bounds
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class FitResult:
|
| 42 |
+
r"""Result of fitting a discrete or continuous distribution to data
|
| 43 |
+
|
| 44 |
+
Attributes
|
| 45 |
+
----------
|
| 46 |
+
params : namedtuple
|
| 47 |
+
A namedtuple containing the maximum likelihood estimates of the
|
| 48 |
+
shape parameters, location, and (if applicable) scale of the
|
| 49 |
+
distribution.
|
| 50 |
+
success : bool or None
|
| 51 |
+
Whether the optimizer considered the optimization to terminate
|
| 52 |
+
successfully or not.
|
| 53 |
+
message : str or None
|
| 54 |
+
Any status message provided by the optimizer.
|
| 55 |
+
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(self, dist, data, discrete, res):
|
| 59 |
+
self._dist = dist
|
| 60 |
+
self._data = data
|
| 61 |
+
self.discrete = discrete
|
| 62 |
+
self.pxf = getattr(dist, "pmf", None) or getattr(dist, "pdf", None)
|
| 63 |
+
|
| 64 |
+
shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
|
| 65 |
+
if not discrete:
|
| 66 |
+
FitParams = namedtuple('FitParams', shape_names + ['loc', 'scale'])
|
| 67 |
+
else:
|
| 68 |
+
FitParams = namedtuple('FitParams', shape_names + ['loc'])
|
| 69 |
+
|
| 70 |
+
self.params = FitParams(*res.x)
|
| 71 |
+
|
| 72 |
+
# Optimizer can report success even when nllf is infinite
|
| 73 |
+
if res.success and not np.isfinite(self.nllf()):
|
| 74 |
+
res.success = False
|
| 75 |
+
res.message = ("Optimization converged to parameter values that "
|
| 76 |
+
"are inconsistent with the data.")
|
| 77 |
+
self.success = getattr(res, "success", None)
|
| 78 |
+
self.message = getattr(res, "message", None)
|
| 79 |
+
|
| 80 |
+
def __repr__(self):
|
| 81 |
+
keys = ["params", "success", "message"]
|
| 82 |
+
m = max(map(len, keys)) + 1
|
| 83 |
+
return '\n'.join([key.rjust(m) + ': ' + repr(getattr(self, key))
|
| 84 |
+
for key in keys if getattr(self, key) is not None])
|
| 85 |
+
|
| 86 |
+
def nllf(self, params=None, data=None):
|
| 87 |
+
"""Negative log-likelihood function
|
| 88 |
+
|
| 89 |
+
Evaluates the negative of the log-likelihood function of the provided
|
| 90 |
+
data at the provided parameters.
|
| 91 |
+
|
| 92 |
+
Parameters
|
| 93 |
+
----------
|
| 94 |
+
params : tuple, optional
|
| 95 |
+
The shape parameters, location, and (if applicable) scale of the
|
| 96 |
+
distribution as a single tuple. Default is the maximum likelihood
|
| 97 |
+
estimates (``self.params``).
|
| 98 |
+
data : array_like, optional
|
| 99 |
+
The data for which the log-likelihood function is to be evaluated.
|
| 100 |
+
Default is the data to which the distribution was fit.
|
| 101 |
+
|
| 102 |
+
Returns
|
| 103 |
+
-------
|
| 104 |
+
nllf : float
|
| 105 |
+
The negative of the log-likelihood function.
|
| 106 |
+
|
| 107 |
+
"""
|
| 108 |
+
params = params if params is not None else self.params
|
| 109 |
+
data = data if data is not None else self._data
|
| 110 |
+
return self._dist.nnlf(theta=params, x=data)
|
| 111 |
+
|
| 112 |
+
def plot(self, ax=None, *, plot_type="hist"):
|
| 113 |
+
"""Visually compare the data against the fitted distribution.
|
| 114 |
+
|
| 115 |
+
Available only if `matplotlib` is installed.
|
| 116 |
+
|
| 117 |
+
Parameters
|
| 118 |
+
----------
|
| 119 |
+
ax : `matplotlib.axes.Axes`
|
| 120 |
+
Axes object to draw the plot onto, otherwise uses the current Axes.
|
| 121 |
+
plot_type : {"hist", "qq", "pp", "cdf"}
|
| 122 |
+
Type of plot to draw. Options include:
|
| 123 |
+
|
| 124 |
+
- "hist": Superposes the PDF/PMF of the fitted distribution
|
| 125 |
+
over a normalized histogram of the data.
|
| 126 |
+
- "qq": Scatter plot of theoretical quantiles against the
|
| 127 |
+
empirical quantiles. Specifically, the x-coordinates are the
|
| 128 |
+
values of the fitted distribution PPF evaluated at the
|
| 129 |
+
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is the
|
| 130 |
+
number of data points, and the y-coordinates are the sorted
|
| 131 |
+
data points.
|
| 132 |
+
- "pp": Scatter plot of theoretical percentiles against the
|
| 133 |
+
observed percentiles. Specifically, the x-coordinates are the
|
| 134 |
+
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
|
| 135 |
+
the number of data points, and the y-coordinates are the values
|
| 136 |
+
of the fitted distribution CDF evaluated at the sorted
|
| 137 |
+
data points.
|
| 138 |
+
- "cdf": Superposes the CDF of the fitted distribution over the
|
| 139 |
+
empirical CDF. Specifically, the x-coordinates of the empirical
|
| 140 |
+
CDF are the sorted data points, and the y-coordinates are the
|
| 141 |
+
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
|
| 142 |
+
the number of data points.
|
| 143 |
+
|
| 144 |
+
Returns
|
| 145 |
+
-------
|
| 146 |
+
ax : `matplotlib.axes.Axes`
|
| 147 |
+
The matplotlib Axes object on which the plot was drawn.
|
| 148 |
+
|
| 149 |
+
Examples
|
| 150 |
+
--------
|
| 151 |
+
>>> import numpy as np
|
| 152 |
+
>>> from scipy import stats
|
| 153 |
+
>>> import matplotlib.pyplot as plt # matplotlib must be installed
|
| 154 |
+
>>> rng = np.random.default_rng()
|
| 155 |
+
>>> data = stats.nbinom(5, 0.5).rvs(size=1000, random_state=rng)
|
| 156 |
+
>>> bounds = [(0, 30), (0, 1)]
|
| 157 |
+
>>> res = stats.fit(stats.nbinom, data, bounds)
|
| 158 |
+
>>> ax = res.plot() # save matplotlib Axes object
|
| 159 |
+
|
| 160 |
+
The `matplotlib.axes.Axes` object can be used to customize the plot.
|
| 161 |
+
See `matplotlib.axes.Axes` documentation for details.
|
| 162 |
+
|
| 163 |
+
>>> ax.set_xlabel('number of trials') # customize axis label
|
| 164 |
+
>>> ax.get_children()[0].set_linewidth(5) # customize line widths
|
| 165 |
+
>>> ax.legend()
|
| 166 |
+
>>> plt.show()
|
| 167 |
+
"""
|
| 168 |
+
try:
|
| 169 |
+
import matplotlib # noqa: F401
|
| 170 |
+
except ModuleNotFoundError as exc:
|
| 171 |
+
message = "matplotlib must be installed to use method `plot`."
|
| 172 |
+
raise ModuleNotFoundError(message) from exc
|
| 173 |
+
|
| 174 |
+
plots = {'histogram': self._hist_plot, 'qq': self._qq_plot,
|
| 175 |
+
'pp': self._pp_plot, 'cdf': self._cdf_plot,
|
| 176 |
+
'hist': self._hist_plot}
|
| 177 |
+
if plot_type.lower() not in plots:
|
| 178 |
+
message = f"`plot_type` must be one of {set(plots.keys())}"
|
| 179 |
+
raise ValueError(message)
|
| 180 |
+
plot = plots[plot_type.lower()]
|
| 181 |
+
|
| 182 |
+
if ax is None:
|
| 183 |
+
import matplotlib.pyplot as plt
|
| 184 |
+
ax = plt.gca()
|
| 185 |
+
|
| 186 |
+
fit_params = np.atleast_1d(self.params)
|
| 187 |
+
|
| 188 |
+
return plot(ax=ax, fit_params=fit_params)
|
| 189 |
+
|
| 190 |
+
def _hist_plot(self, ax, fit_params):
|
| 191 |
+
from matplotlib.ticker import MaxNLocator
|
| 192 |
+
|
| 193 |
+
support = self._dist.support(*fit_params)
|
| 194 |
+
lb = support[0] if np.isfinite(support[0]) else min(self._data)
|
| 195 |
+
ub = support[1] if np.isfinite(support[1]) else max(self._data)
|
| 196 |
+
pxf = "PMF" if self.discrete else "PDF"
|
| 197 |
+
|
| 198 |
+
if self.discrete:
|
| 199 |
+
x = np.arange(lb, ub + 2)
|
| 200 |
+
y = self.pxf(x, *fit_params)
|
| 201 |
+
ax.vlines(x[:-1], 0, y[:-1], label='Fitted Distribution PMF',
|
| 202 |
+
color='C0')
|
| 203 |
+
options = dict(density=True, bins=x, align='left', color='C1')
|
| 204 |
+
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
|
| 205 |
+
ax.set_xlabel('k')
|
| 206 |
+
ax.set_ylabel('PMF')
|
| 207 |
+
else:
|
| 208 |
+
x = np.linspace(lb, ub, 200)
|
| 209 |
+
y = self.pxf(x, *fit_params)
|
| 210 |
+
ax.plot(x, y, '--', label='Fitted Distribution PDF', color='C0')
|
| 211 |
+
options = dict(density=True, bins=50, align='mid', color='C1')
|
| 212 |
+
ax.set_xlabel('x')
|
| 213 |
+
ax.set_ylabel('PDF')
|
| 214 |
+
|
| 215 |
+
if len(self._data) > 50 or self.discrete:
|
| 216 |
+
ax.hist(self._data, label="Histogram of Data", **options)
|
| 217 |
+
else:
|
| 218 |
+
ax.plot(self._data, np.zeros_like(self._data), "*",
|
| 219 |
+
label='Data', color='C1')
|
| 220 |
+
|
| 221 |
+
ax.set_title(rf"Fitted $\tt {self._dist.name}$ {pxf} and Histogram")
|
| 222 |
+
ax.legend(*ax.get_legend_handles_labels())
|
| 223 |
+
return ax
|
| 224 |
+
|
| 225 |
+
def _qp_plot(self, ax, fit_params, qq):
|
| 226 |
+
data = np.sort(self._data)
|
| 227 |
+
ps = self._plotting_positions(len(self._data))
|
| 228 |
+
|
| 229 |
+
if qq:
|
| 230 |
+
qp = "Quantiles"
|
| 231 |
+
plot_type = 'Q-Q'
|
| 232 |
+
x = self._dist.ppf(ps, *fit_params)
|
| 233 |
+
y = data
|
| 234 |
+
else:
|
| 235 |
+
qp = "Percentiles"
|
| 236 |
+
plot_type = 'P-P'
|
| 237 |
+
x = ps
|
| 238 |
+
y = self._dist.cdf(data, *fit_params)
|
| 239 |
+
|
| 240 |
+
ax.plot(x, y, '.', label=f'Fitted Distribution {plot_type}',
|
| 241 |
+
color='C0', zorder=1)
|
| 242 |
+
xlim = ax.get_xlim()
|
| 243 |
+
ylim = ax.get_ylim()
|
| 244 |
+
lim = [min(xlim[0], ylim[0]), max(xlim[1], ylim[1])]
|
| 245 |
+
if not qq:
|
| 246 |
+
lim = max(lim[0], 0), min(lim[1], 1)
|
| 247 |
+
|
| 248 |
+
if self.discrete and qq:
|
| 249 |
+
q_min, q_max = int(lim[0]), int(lim[1]+1)
|
| 250 |
+
q_ideal = np.arange(q_min, q_max)
|
| 251 |
+
# q_ideal = np.unique(self._dist.ppf(ps, *fit_params))
|
| 252 |
+
ax.plot(q_ideal, q_ideal, 'o', label='Reference', color='k',
|
| 253 |
+
alpha=0.25, markerfacecolor='none', clip_on=True)
|
| 254 |
+
elif self.discrete and not qq:
|
| 255 |
+
# The intent of this is to match the plot that would be produced
|
| 256 |
+
# if x were continuous on [0, 1] and y were cdf(ppf(x)).
|
| 257 |
+
# It can be approximated by letting x = np.linspace(0, 1, 1000),
|
| 258 |
+
# but this might not look great when zooming in. The vertical
|
| 259 |
+
# portions are included to indicate where the transition occurs
|
| 260 |
+
# where the data completely obscures the horizontal portions.
|
| 261 |
+
p_min, p_max = lim
|
| 262 |
+
a, b = self._dist.support(*fit_params)
|
| 263 |
+
p_min = max(p_min, 0 if np.isfinite(a) else 1e-3)
|
| 264 |
+
p_max = min(p_max, 1 if np.isfinite(b) else 1-1e-3)
|
| 265 |
+
q_min, q_max = self._dist.ppf([p_min, p_max], *fit_params)
|
| 266 |
+
qs = np.arange(q_min-1, q_max+1)
|
| 267 |
+
ps = self._dist.cdf(qs, *fit_params)
|
| 268 |
+
ax.step(ps, ps, '-', label='Reference', color='k', alpha=0.25,
|
| 269 |
+
clip_on=True)
|
| 270 |
+
else:
|
| 271 |
+
ax.plot(lim, lim, '-', label='Reference', color='k', alpha=0.25,
|
| 272 |
+
clip_on=True)
|
| 273 |
+
|
| 274 |
+
ax.set_xlim(lim)
|
| 275 |
+
ax.set_ylim(lim)
|
| 276 |
+
ax.set_xlabel(rf"Fitted $\tt {self._dist.name}$ Theoretical {qp}")
|
| 277 |
+
ax.set_ylabel(f"Data {qp}")
|
| 278 |
+
ax.set_title(rf"Fitted $\tt {self._dist.name}$ {plot_type} Plot")
|
| 279 |
+
ax.legend(*ax.get_legend_handles_labels())
|
| 280 |
+
ax.set_aspect('equal')
|
| 281 |
+
return ax
|
| 282 |
+
|
| 283 |
+
def _qq_plot(self, **kwargs):
|
| 284 |
+
return self._qp_plot(qq=True, **kwargs)
|
| 285 |
+
|
| 286 |
+
def _pp_plot(self, **kwargs):
|
| 287 |
+
return self._qp_plot(qq=False, **kwargs)
|
| 288 |
+
|
| 289 |
+
def _plotting_positions(self, n, a=.5):
|
| 290 |
+
# See https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot#Plotting_positions
|
| 291 |
+
k = np.arange(1, n+1)
|
| 292 |
+
return (k-a) / (n + 1 - 2*a)
|
| 293 |
+
|
| 294 |
+
def _cdf_plot(self, ax, fit_params):
|
| 295 |
+
data = np.sort(self._data)
|
| 296 |
+
ecdf = self._plotting_positions(len(self._data))
|
| 297 |
+
ls = '--' if len(np.unique(data)) < 30 else '.'
|
| 298 |
+
xlabel = 'k' if self.discrete else 'x'
|
| 299 |
+
ax.step(data, ecdf, ls, label='Empirical CDF', color='C1', zorder=0)
|
| 300 |
+
|
| 301 |
+
xlim = ax.get_xlim()
|
| 302 |
+
q = np.linspace(*xlim, 300)
|
| 303 |
+
tcdf = self._dist.cdf(q, *fit_params)
|
| 304 |
+
|
| 305 |
+
ax.plot(q, tcdf, label='Fitted Distribution CDF', color='C0', zorder=1)
|
| 306 |
+
ax.set_xlim(xlim)
|
| 307 |
+
ax.set_ylim(0, 1)
|
| 308 |
+
ax.set_xlabel(xlabel)
|
| 309 |
+
ax.set_ylabel("CDF")
|
| 310 |
+
ax.set_title(rf"Fitted $\tt {self._dist.name}$ and Empirical CDF")
|
| 311 |
+
handles, labels = ax.get_legend_handles_labels()
|
| 312 |
+
ax.legend(handles[::-1], labels[::-1])
|
| 313 |
+
return ax
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def fit(dist, data, bounds=None, *, guess=None, method='mle',
|
| 317 |
+
optimizer=optimize.differential_evolution):
|
| 318 |
+
r"""Fit a discrete or continuous distribution to data
|
| 319 |
+
|
| 320 |
+
Given a distribution, data, and bounds on the parameters of the
|
| 321 |
+
distribution, return maximum likelihood estimates of the parameters.
|
| 322 |
+
|
| 323 |
+
Parameters
|
| 324 |
+
----------
|
| 325 |
+
dist : `scipy.stats.rv_continuous` or `scipy.stats.rv_discrete`
|
| 326 |
+
The object representing the distribution to be fit to the data.
|
| 327 |
+
data : 1D array_like
|
| 328 |
+
The data to which the distribution is to be fit. If the data contain
|
| 329 |
+
any of ``np.nan``, ``np.inf``, or -``np.inf``, the fit method will
|
| 330 |
+
raise a ``ValueError``.
|
| 331 |
+
bounds : dict or sequence of tuples, optional
|
| 332 |
+
If a dictionary, each key is the name of a parameter of the
|
| 333 |
+
distribution, and the corresponding value is a tuple containing the
|
| 334 |
+
lower and upper bound on that parameter. If the distribution is
|
| 335 |
+
defined only for a finite range of values of that parameter, no entry
|
| 336 |
+
for that parameter is required; e.g., some distributions have
|
| 337 |
+
parameters which must be on the interval [0, 1]. Bounds for parameters
|
| 338 |
+
location (``loc``) and scale (``scale``) are optional; by default,
|
| 339 |
+
they are fixed to 0 and 1, respectively.
|
| 340 |
+
|
| 341 |
+
If a sequence, element *i* is a tuple containing the lower and upper
|
| 342 |
+
bound on the *i*\ th parameter of the distribution. In this case,
|
| 343 |
+
bounds for *all* distribution shape parameters must be provided.
|
| 344 |
+
Optionally, bounds for location and scale may follow the
|
| 345 |
+
distribution shape parameters.
|
| 346 |
+
|
| 347 |
+
If a shape is to be held fixed (e.g. if it is known), the
|
| 348 |
+
lower and upper bounds may be equal. If a user-provided lower or upper
|
| 349 |
+
bound is beyond a bound of the domain for which the distribution is
|
| 350 |
+
defined, the bound of the distribution's domain will replace the
|
| 351 |
+
user-provided value. Similarly, parameters which must be integral
|
| 352 |
+
will be constrained to integral values within the user-provided bounds.
|
| 353 |
+
guess : dict or array_like, optional
|
| 354 |
+
If a dictionary, each key is the name of a parameter of the
|
| 355 |
+
distribution, and the corresponding value is a guess for the value
|
| 356 |
+
of the parameter.
|
| 357 |
+
|
| 358 |
+
If a sequence, element *i* is a guess for the *i*\ th parameter of the
|
| 359 |
+
distribution. In this case, guesses for *all* distribution shape
|
| 360 |
+
parameters must be provided.
|
| 361 |
+
|
| 362 |
+
If `guess` is not provided, guesses for the decision variables will
|
| 363 |
+
not be passed to the optimizer. If `guess` is provided, guesses for
|
| 364 |
+
any missing parameters will be set at the mean of the lower and
|
| 365 |
+
upper bounds. Guesses for parameters which must be integral will be
|
| 366 |
+
rounded to integral values, and guesses that lie outside the
|
| 367 |
+
intersection of the user-provided bounds and the domain of the
|
| 368 |
+
distribution will be clipped.
|
| 369 |
+
method : {'mle', 'mse'}
|
| 370 |
+
With ``method="mle"`` (default), the fit is computed by minimizing
|
| 371 |
+
the negative log-likelihood function. A large, finite penalty
|
| 372 |
+
(rather than infinite negative log-likelihood) is applied for
|
| 373 |
+
observations beyond the support of the distribution.
|
| 374 |
+
With ``method="mse"``, the fit is computed by minimizing
|
| 375 |
+
the negative log-product spacing function. The same penalty is applied
|
| 376 |
+
for observations beyond the support. We follow the approach of [1]_,
|
| 377 |
+
which is generalized for samples with repeated observations.
|
| 378 |
+
optimizer : callable, optional
|
| 379 |
+
`optimizer` is a callable that accepts the following positional
|
| 380 |
+
argument.
|
| 381 |
+
|
| 382 |
+
fun : callable
|
| 383 |
+
The objective function to be optimized. `fun` accepts one argument
|
| 384 |
+
``x``, candidate shape parameters of the distribution, and returns
|
| 385 |
+
the objective function value given ``x``, `dist`, and the provided
|
| 386 |
+
`data`.
|
| 387 |
+
The job of `optimizer` is to find values of the decision variables
|
| 388 |
+
that minimizes `fun`.
|
| 389 |
+
|
| 390 |
+
`optimizer` must also accept the following keyword argument.
|
| 391 |
+
|
| 392 |
+
bounds : sequence of tuples
|
| 393 |
+
The bounds on values of the decision variables; each element will
|
| 394 |
+
be a tuple containing the lower and upper bound on a decision
|
| 395 |
+
variable.
|
| 396 |
+
|
| 397 |
+
If `guess` is provided, `optimizer` must also accept the following
|
| 398 |
+
keyword argument.
|
| 399 |
+
|
| 400 |
+
x0 : array_like
|
| 401 |
+
The guesses for each decision variable.
|
| 402 |
+
|
| 403 |
+
If the distribution has any shape parameters that must be integral or
|
| 404 |
+
if the distribution is discrete and the location parameter is not
|
| 405 |
+
fixed, `optimizer` must also accept the following keyword argument.
|
| 406 |
+
|
| 407 |
+
integrality : array_like of bools
|
| 408 |
+
For each decision variable, True if the decision variable
|
| 409 |
+
must be constrained to integer values and False if the decision
|
| 410 |
+
variable is continuous.
|
| 411 |
+
|
| 412 |
+
`optimizer` must return an object, such as an instance of
|
| 413 |
+
`scipy.optimize.OptimizeResult`, which holds the optimal values of
|
| 414 |
+
the decision variables in an attribute ``x``. If attributes
|
| 415 |
+
``fun``, ``status``, or ``message`` are provided, they will be
|
| 416 |
+
included in the result object returned by `fit`.
|
| 417 |
+
|
| 418 |
+
Returns
|
| 419 |
+
-------
|
| 420 |
+
result : `~scipy.stats._result_classes.FitResult`
|
| 421 |
+
An object with the following fields.
|
| 422 |
+
|
| 423 |
+
params : namedtuple
|
| 424 |
+
A namedtuple containing the maximum likelihood estimates of the
|
| 425 |
+
shape parameters, location, and (if applicable) scale of the
|
| 426 |
+
distribution.
|
| 427 |
+
success : bool or None
|
| 428 |
+
Whether the optimizer considered the optimization to terminate
|
| 429 |
+
successfully or not.
|
| 430 |
+
message : str or None
|
| 431 |
+
Any status message provided by the optimizer.
|
| 432 |
+
|
| 433 |
+
The object has the following method:
|
| 434 |
+
|
| 435 |
+
nllf(params=None, data=None)
|
| 436 |
+
By default, the negative log-likehood function at the fitted
|
| 437 |
+
`params` for the given `data`. Accepts a tuple containing
|
| 438 |
+
alternative shapes, location, and scale of the distribution and
|
| 439 |
+
an array of alternative data.
|
| 440 |
+
|
| 441 |
+
plot(ax=None)
|
| 442 |
+
Superposes the PDF/PMF of the fitted distribution over a normalized
|
| 443 |
+
histogram of the data.
|
| 444 |
+
|
| 445 |
+
See Also
|
| 446 |
+
--------
|
| 447 |
+
rv_continuous, rv_discrete
|
| 448 |
+
|
| 449 |
+
Notes
|
| 450 |
+
-----
|
| 451 |
+
Optimization is more likely to converge to the maximum likelihood estimate
|
| 452 |
+
when the user provides tight bounds containing the maximum likelihood
|
| 453 |
+
estimate. For example, when fitting a binomial distribution to data, the
|
| 454 |
+
number of experiments underlying each sample may be known, in which case
|
| 455 |
+
the corresponding shape parameter ``n`` can be fixed.
|
| 456 |
+
|
| 457 |
+
References
|
| 458 |
+
----------
|
| 459 |
+
.. [1] Shao, Yongzhao, and Marjorie G. Hahn. "Maximum product of spacings
|
| 460 |
+
method: a unified formulation with illustration of strong
|
| 461 |
+
consistency." Illinois Journal of Mathematics 43.3 (1999): 489-499.
|
| 462 |
+
|
| 463 |
+
Examples
|
| 464 |
+
--------
|
| 465 |
+
Suppose we wish to fit a distribution to the following data.
|
| 466 |
+
|
| 467 |
+
>>> import numpy as np
|
| 468 |
+
>>> from scipy import stats
|
| 469 |
+
>>> rng = np.random.default_rng()
|
| 470 |
+
>>> dist = stats.nbinom
|
| 471 |
+
>>> shapes = (5, 0.5)
|
| 472 |
+
>>> data = dist.rvs(*shapes, size=1000, random_state=rng)
|
| 473 |
+
|
| 474 |
+
Suppose we do not know how the data were generated, but we suspect that
|
| 475 |
+
it follows a negative binomial distribution with parameters *n* and *p*\.
|
| 476 |
+
(See `scipy.stats.nbinom`.) We believe that the parameter *n* was fewer
|
| 477 |
+
than 30, and we know that the parameter *p* must lie on the interval
|
| 478 |
+
[0, 1]. We record this information in a variable `bounds` and pass
|
| 479 |
+
this information to `fit`.
|
| 480 |
+
|
| 481 |
+
>>> bounds = [(0, 30), (0, 1)]
|
| 482 |
+
>>> res = stats.fit(dist, data, bounds)
|
| 483 |
+
|
| 484 |
+
`fit` searches within the user-specified `bounds` for the
|
| 485 |
+
values that best match the data (in the sense of maximum likelihood
|
| 486 |
+
estimation). In this case, it found shape values similar to those
|
| 487 |
+
from which the data were actually generated.
|
| 488 |
+
|
| 489 |
+
>>> res.params
|
| 490 |
+
FitParams(n=5.0, p=0.5028157644634368, loc=0.0) # may vary
|
| 491 |
+
|
| 492 |
+
We can visualize the results by superposing the probability mass function
|
| 493 |
+
of the distribution (with the shapes fit to the data) over a normalized
|
| 494 |
+
histogram of the data.
|
| 495 |
+
|
| 496 |
+
>>> import matplotlib.pyplot as plt # matplotlib must be installed to plot
|
| 497 |
+
>>> res.plot()
|
| 498 |
+
>>> plt.show()
|
| 499 |
+
|
| 500 |
+
Note that the estimate for *n* was exactly integral; this is because
|
| 501 |
+
the domain of the `nbinom` PMF includes only integral *n*, and the `nbinom`
|
| 502 |
+
object "knows" that. `nbinom` also knows that the shape *p* must be a
|
| 503 |
+
value between 0 and 1. In such a case - when the domain of the distribution
|
| 504 |
+
with respect to a parameter is finite - we are not required to specify
|
| 505 |
+
bounds for the parameter.
|
| 506 |
+
|
| 507 |
+
>>> bounds = {'n': (0, 30)} # omit parameter p using a `dict`
|
| 508 |
+
>>> res2 = stats.fit(dist, data, bounds)
|
| 509 |
+
>>> res2.params
|
| 510 |
+
FitParams(n=5.0, p=0.5016492009232932, loc=0.0) # may vary
|
| 511 |
+
|
| 512 |
+
If we wish to force the distribution to be fit with *n* fixed at 6, we can
|
| 513 |
+
set both the lower and upper bounds on *n* to 6. Note, however, that the
|
| 514 |
+
value of the objective function being optimized is typically worse (higher)
|
| 515 |
+
in this case.
|
| 516 |
+
|
| 517 |
+
>>> bounds = {'n': (6, 6)} # fix parameter `n`
|
| 518 |
+
>>> res3 = stats.fit(dist, data, bounds)
|
| 519 |
+
>>> res3.params
|
| 520 |
+
FitParams(n=6.0, p=0.5486556076755706, loc=0.0) # may vary
|
| 521 |
+
>>> res3.nllf() > res.nllf()
|
| 522 |
+
True # may vary
|
| 523 |
+
|
| 524 |
+
Note that the numerical results of the previous examples are typical, but
|
| 525 |
+
they may vary because the default optimizer used by `fit`,
|
| 526 |
+
`scipy.optimize.differential_evolution`, is stochastic. However, we can
|
| 527 |
+
customize the settings used by the optimizer to ensure reproducibility -
|
| 528 |
+
or even use a different optimizer entirely - using the `optimizer`
|
| 529 |
+
parameter.
|
| 530 |
+
|
| 531 |
+
>>> from scipy.optimize import differential_evolution
|
| 532 |
+
>>> rng = np.random.default_rng(767585560716548)
|
| 533 |
+
>>> def optimizer(fun, bounds, *, integrality):
|
| 534 |
+
... return differential_evolution(fun, bounds, strategy='best2bin',
|
| 535 |
+
... seed=rng, integrality=integrality)
|
| 536 |
+
>>> bounds = [(0, 30), (0, 1)]
|
| 537 |
+
>>> res4 = stats.fit(dist, data, bounds, optimizer=optimizer)
|
| 538 |
+
>>> res4.params
|
| 539 |
+
FitParams(n=5.0, p=0.5015183149259951, loc=0.0)
|
| 540 |
+
|
| 541 |
+
"""
|
| 542 |
+
# --- Input Validation / Standardization --- #
|
| 543 |
+
user_bounds = bounds
|
| 544 |
+
user_guess = guess
|
| 545 |
+
|
| 546 |
+
# distribution input validation and information collection
|
| 547 |
+
if hasattr(dist, "pdf"): # can't use isinstance for types
|
| 548 |
+
default_bounds = {'loc': (0, 0), 'scale': (1, 1)}
|
| 549 |
+
discrete = False
|
| 550 |
+
elif hasattr(dist, "pmf"):
|
| 551 |
+
default_bounds = {'loc': (0, 0)}
|
| 552 |
+
discrete = True
|
| 553 |
+
else:
|
| 554 |
+
message = ("`dist` must be an instance of `rv_continuous` "
|
| 555 |
+
"or `rv_discrete.`")
|
| 556 |
+
raise ValueError(message)
|
| 557 |
+
|
| 558 |
+
try:
|
| 559 |
+
param_info = dist._param_info()
|
| 560 |
+
except AttributeError as e:
|
| 561 |
+
message = (f"Distribution `{dist.name}` is not yet supported by "
|
| 562 |
+
"`scipy.stats.fit` because shape information has "
|
| 563 |
+
"not been defined.")
|
| 564 |
+
raise ValueError(message) from e
|
| 565 |
+
|
| 566 |
+
# data input validation
|
| 567 |
+
data = np.asarray(data)
|
| 568 |
+
if data.ndim != 1:
|
| 569 |
+
message = "`data` must be exactly one-dimensional."
|
| 570 |
+
raise ValueError(message)
|
| 571 |
+
if not (np.issubdtype(data.dtype, np.number)
|
| 572 |
+
and np.all(np.isfinite(data))):
|
| 573 |
+
message = "All elements of `data` must be finite numbers."
|
| 574 |
+
raise ValueError(message)
|
| 575 |
+
|
| 576 |
+
# bounds input validation and information collection
|
| 577 |
+
n_params = len(param_info)
|
| 578 |
+
n_shapes = n_params - (1 if discrete else 2)
|
| 579 |
+
param_list = [param.name for param in param_info]
|
| 580 |
+
param_names = ", ".join(param_list)
|
| 581 |
+
shape_names = ", ".join(param_list[:n_shapes])
|
| 582 |
+
|
| 583 |
+
if user_bounds is None:
|
| 584 |
+
user_bounds = {}
|
| 585 |
+
|
| 586 |
+
if isinstance(user_bounds, dict):
|
| 587 |
+
default_bounds.update(user_bounds)
|
| 588 |
+
user_bounds = default_bounds
|
| 589 |
+
user_bounds_array = np.empty((n_params, 2))
|
| 590 |
+
for i in range(n_params):
|
| 591 |
+
param_name = param_info[i].name
|
| 592 |
+
user_bound = user_bounds.pop(param_name, None)
|
| 593 |
+
if user_bound is None:
|
| 594 |
+
user_bound = param_info[i].domain
|
| 595 |
+
user_bounds_array[i] = user_bound
|
| 596 |
+
if user_bounds:
|
| 597 |
+
message = ("Bounds provided for the following unrecognized "
|
| 598 |
+
f"parameters will be ignored: {set(user_bounds)}")
|
| 599 |
+
warnings.warn(message, RuntimeWarning, stacklevel=2)
|
| 600 |
+
|
| 601 |
+
else:
|
| 602 |
+
try:
|
| 603 |
+
user_bounds = np.asarray(user_bounds, dtype=float)
|
| 604 |
+
if user_bounds.size == 0:
|
| 605 |
+
user_bounds = np.empty((0, 2))
|
| 606 |
+
except ValueError as e:
|
| 607 |
+
message = ("Each element of a `bounds` sequence must be a tuple "
|
| 608 |
+
"containing two elements: the lower and upper bound of "
|
| 609 |
+
"a distribution parameter.")
|
| 610 |
+
raise ValueError(message) from e
|
| 611 |
+
if (user_bounds.ndim != 2 or user_bounds.shape[1] != 2):
|
| 612 |
+
message = ("Each element of `bounds` must be a tuple specifying "
|
| 613 |
+
"the lower and upper bounds of a shape parameter")
|
| 614 |
+
raise ValueError(message)
|
| 615 |
+
if user_bounds.shape[0] < n_shapes:
|
| 616 |
+
message = (f"A `bounds` sequence must contain at least {n_shapes} "
|
| 617 |
+
"elements: tuples specifying the lower and upper "
|
| 618 |
+
f"bounds of all shape parameters {shape_names}.")
|
| 619 |
+
raise ValueError(message)
|
| 620 |
+
if user_bounds.shape[0] > n_params:
|
| 621 |
+
message = ("A `bounds` sequence may not contain more than "
|
| 622 |
+
f"{n_params} elements: tuples specifying the lower and "
|
| 623 |
+
"upper bounds of distribution parameters "
|
| 624 |
+
f"{param_names}.")
|
| 625 |
+
raise ValueError(message)
|
| 626 |
+
|
| 627 |
+
user_bounds_array = np.empty((n_params, 2))
|
| 628 |
+
user_bounds_array[n_shapes:] = list(default_bounds.values())
|
| 629 |
+
user_bounds_array[:len(user_bounds)] = user_bounds
|
| 630 |
+
|
| 631 |
+
user_bounds = user_bounds_array
|
| 632 |
+
validated_bounds = []
|
| 633 |
+
for i in range(n_params):
|
| 634 |
+
name = param_info[i].name
|
| 635 |
+
user_bound = user_bounds_array[i]
|
| 636 |
+
param_domain = param_info[i].domain
|
| 637 |
+
integral = param_info[i].integrality
|
| 638 |
+
combined = _combine_bounds(name, user_bound, param_domain, integral)
|
| 639 |
+
validated_bounds.append(combined)
|
| 640 |
+
|
| 641 |
+
bounds = np.asarray(validated_bounds)
|
| 642 |
+
integrality = [param.integrality for param in param_info]
|
| 643 |
+
|
| 644 |
+
# guess input validation
|
| 645 |
+
|
| 646 |
+
if user_guess is None:
|
| 647 |
+
guess_array = None
|
| 648 |
+
elif isinstance(user_guess, dict):
|
| 649 |
+
default_guess = {param.name: np.mean(bound)
|
| 650 |
+
for param, bound in zip(param_info, bounds)}
|
| 651 |
+
unrecognized = set(user_guess) - set(default_guess)
|
| 652 |
+
if unrecognized:
|
| 653 |
+
message = ("Guesses provided for the following unrecognized "
|
| 654 |
+
f"parameters will be ignored: {unrecognized}")
|
| 655 |
+
warnings.warn(message, RuntimeWarning, stacklevel=2)
|
| 656 |
+
default_guess.update(user_guess)
|
| 657 |
+
|
| 658 |
+
message = ("Each element of `guess` must be a scalar "
|
| 659 |
+
"guess for a distribution parameter.")
|
| 660 |
+
try:
|
| 661 |
+
guess_array = np.asarray([default_guess[param.name]
|
| 662 |
+
for param in param_info], dtype=float)
|
| 663 |
+
except ValueError as e:
|
| 664 |
+
raise ValueError(message) from e
|
| 665 |
+
|
| 666 |
+
else:
|
| 667 |
+
message = ("Each element of `guess` must be a scalar "
|
| 668 |
+
"guess for a distribution parameter.")
|
| 669 |
+
try:
|
| 670 |
+
user_guess = np.asarray(user_guess, dtype=float)
|
| 671 |
+
except ValueError as e:
|
| 672 |
+
raise ValueError(message) from e
|
| 673 |
+
if user_guess.ndim != 1:
|
| 674 |
+
raise ValueError(message)
|
| 675 |
+
if user_guess.shape[0] < n_shapes:
|
| 676 |
+
message = (f"A `guess` sequence must contain at least {n_shapes} "
|
| 677 |
+
"elements: scalar guesses for the distribution shape "
|
| 678 |
+
f"parameters {shape_names}.")
|
| 679 |
+
raise ValueError(message)
|
| 680 |
+
if user_guess.shape[0] > n_params:
|
| 681 |
+
message = ("A `guess` sequence may not contain more than "
|
| 682 |
+
f"{n_params} elements: scalar guesses for the "
|
| 683 |
+
f"distribution parameters {param_names}.")
|
| 684 |
+
raise ValueError(message)
|
| 685 |
+
|
| 686 |
+
guess_array = np.mean(bounds, axis=1)
|
| 687 |
+
guess_array[:len(user_guess)] = user_guess
|
| 688 |
+
|
| 689 |
+
if guess_array is not None:
|
| 690 |
+
guess_rounded = guess_array.copy()
|
| 691 |
+
|
| 692 |
+
guess_rounded[integrality] = np.round(guess_rounded[integrality])
|
| 693 |
+
rounded = np.where(guess_rounded != guess_array)[0]
|
| 694 |
+
for i in rounded:
|
| 695 |
+
message = (f"Guess for parameter `{param_info[i].name}` "
|
| 696 |
+
f"rounded from {guess_array[i]} to {guess_rounded[i]}.")
|
| 697 |
+
warnings.warn(message, RuntimeWarning, stacklevel=2)
|
| 698 |
+
|
| 699 |
+
guess_clipped = np.clip(guess_rounded, bounds[:, 0], bounds[:, 1])
|
| 700 |
+
clipped = np.where(guess_clipped != guess_rounded)[0]
|
| 701 |
+
for i in clipped:
|
| 702 |
+
message = (f"Guess for parameter `{param_info[i].name}` "
|
| 703 |
+
f"clipped from {guess_rounded[i]} to "
|
| 704 |
+
f"{guess_clipped[i]}.")
|
| 705 |
+
warnings.warn(message, RuntimeWarning, stacklevel=2)
|
| 706 |
+
|
| 707 |
+
guess = guess_clipped
|
| 708 |
+
else:
|
| 709 |
+
guess = None
|
| 710 |
+
|
| 711 |
+
# --- Fitting --- #
|
| 712 |
+
def nllf(free_params, data=data): # bind data NOW
|
| 713 |
+
with np.errstate(invalid='ignore', divide='ignore'):
|
| 714 |
+
return dist._penalized_nnlf(free_params, data)
|
| 715 |
+
|
| 716 |
+
def nlpsf(free_params, data=data): # bind data NOW
|
| 717 |
+
with np.errstate(invalid='ignore', divide='ignore'):
|
| 718 |
+
return dist._penalized_nlpsf(free_params, data)
|
| 719 |
+
|
| 720 |
+
methods = {'mle': nllf, 'mse': nlpsf}
|
| 721 |
+
objective = methods[method.lower()]
|
| 722 |
+
|
| 723 |
+
with np.errstate(invalid='ignore', divide='ignore'):
|
| 724 |
+
kwds = {}
|
| 725 |
+
if bounds is not None:
|
| 726 |
+
kwds['bounds'] = bounds
|
| 727 |
+
if np.any(integrality):
|
| 728 |
+
kwds['integrality'] = integrality
|
| 729 |
+
if guess is not None:
|
| 730 |
+
kwds['x0'] = guess
|
| 731 |
+
res = optimizer(objective, **kwds)
|
| 732 |
+
|
| 733 |
+
return FitResult(dist, data, discrete, res)
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
GoodnessOfFitResult = namedtuple('GoodnessOfFitResult',
|
| 737 |
+
('fit_result', 'statistic', 'pvalue',
|
| 738 |
+
'null_distribution'))
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
def goodness_of_fit(dist, data, *, known_params=None, fit_params=None,
|
| 742 |
+
guessed_params=None, statistic='ad', n_mc_samples=9999,
|
| 743 |
+
random_state=None):
|
| 744 |
+
r"""
|
| 745 |
+
Perform a goodness of fit test comparing data to a distribution family.
|
| 746 |
+
|
| 747 |
+
Given a distribution family and data, perform a test of the null hypothesis
|
| 748 |
+
that the data were drawn from a distribution in that family. Any known
|
| 749 |
+
parameters of the distribution may be specified. Remaining parameters of
|
| 750 |
+
the distribution will be fit to the data, and the p-value of the test
|
| 751 |
+
is computed accordingly. Several statistics for comparing the distribution
|
| 752 |
+
to data are available.
|
| 753 |
+
|
| 754 |
+
Parameters
|
| 755 |
+
----------
|
| 756 |
+
dist : `scipy.stats.rv_continuous`
|
| 757 |
+
The object representing the distribution family under the null
|
| 758 |
+
hypothesis.
|
| 759 |
+
data : 1D array_like
|
| 760 |
+
Finite, uncensored data to be tested.
|
| 761 |
+
known_params : dict, optional
|
| 762 |
+
A dictionary containing name-value pairs of known distribution
|
| 763 |
+
parameters. Monte Carlo samples are randomly drawn from the
|
| 764 |
+
null-hypothesized distribution with these values of the parameters.
|
| 765 |
+
Before the statistic is evaluated for each Monte Carlo sample, only
|
| 766 |
+
remaining unknown parameters of the null-hypothesized distribution
|
| 767 |
+
family are fit to the samples; the known parameters are held fixed.
|
| 768 |
+
If all parameters of the distribution family are known, then the step
|
| 769 |
+
of fitting the distribution family to each sample is omitted.
|
| 770 |
+
fit_params : dict, optional
|
| 771 |
+
A dictionary containing name-value pairs of distribution parameters
|
| 772 |
+
that have already been fit to the data, e.g. using `scipy.stats.fit`
|
| 773 |
+
or the ``fit`` method of `dist`. Monte Carlo samples are drawn from the
|
| 774 |
+
null-hypothesized distribution with these specified values of the
|
| 775 |
+
parameter. On those Monte Carlo samples, however, these and all other
|
| 776 |
+
unknown parameters of the null-hypothesized distribution family are
|
| 777 |
+
fit before the statistic is evaluated.
|
| 778 |
+
guessed_params : dict, optional
|
| 779 |
+
A dictionary containing name-value pairs of distribution parameters
|
| 780 |
+
which have been guessed. These parameters are always considered as
|
| 781 |
+
free parameters and are fit both to the provided `data` as well as
|
| 782 |
+
to the Monte Carlo samples drawn from the null-hypothesized
|
| 783 |
+
distribution. The purpose of these `guessed_params` is to be used as
|
| 784 |
+
initial values for the numerical fitting procedure.
|
| 785 |
+
statistic : {"ad", "ks", "cvm", "filliben"} or callable, optional
|
| 786 |
+
The statistic used to compare data to a distribution after fitting
|
| 787 |
+
unknown parameters of the distribution family to the data. The
|
| 788 |
+
Anderson-Darling ("ad") [1]_, Kolmogorov-Smirnov ("ks") [1]_,
|
| 789 |
+
Cramer-von Mises ("cvm") [1]_, and Filliben ("filliben") [7]_
|
| 790 |
+
statistics are available. Alternatively, a callable with signature
|
| 791 |
+
``(dist, data, axis)`` may be supplied to compute the statistic. Here
|
| 792 |
+
``dist`` is a frozen distribution object (potentially with array
|
| 793 |
+
parameters), ``data`` is an array of Monte Carlo samples (of
|
| 794 |
+
compatible shape), and ``axis`` is the axis of ``data`` along which
|
| 795 |
+
the statistic must be computed.
|
| 796 |
+
n_mc_samples : int, default: 9999
|
| 797 |
+
The number of Monte Carlo samples drawn from the null hypothesized
|
| 798 |
+
distribution to form the null distribution of the statistic. The
|
| 799 |
+
sample size of each is the same as the given `data`.
|
| 800 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 801 |
+
`numpy.random.RandomState`}, optional
|
| 802 |
+
|
| 803 |
+
Pseudorandom number generator state used to generate the Monte Carlo
|
| 804 |
+
samples.
|
| 805 |
+
|
| 806 |
+
If `random_state` is ``None`` (default), the
|
| 807 |
+
`numpy.random.RandomState` singleton is used.
|
| 808 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
| 809 |
+
seeded with `random_state`.
|
| 810 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
| 811 |
+
instance, then the provided instance is used.
|
| 812 |
+
|
| 813 |
+
Returns
|
| 814 |
+
-------
|
| 815 |
+
res : GoodnessOfFitResult
|
| 816 |
+
An object with the following attributes.
|
| 817 |
+
|
| 818 |
+
fit_result : `~scipy.stats._result_classes.FitResult`
|
| 819 |
+
An object representing the fit of the provided `dist` to `data`.
|
| 820 |
+
This object includes the values of distribution family parameters
|
| 821 |
+
that fully define the null-hypothesized distribution, that is,
|
| 822 |
+
the distribution from which Monte Carlo samples are drawn.
|
| 823 |
+
statistic : float
|
| 824 |
+
The value of the statistic comparing provided `data` to the
|
| 825 |
+
null-hypothesized distribution.
|
| 826 |
+
pvalue : float
|
| 827 |
+
The proportion of elements in the null distribution with
|
| 828 |
+
statistic values at least as extreme as the statistic value of the
|
| 829 |
+
provided `data`.
|
| 830 |
+
null_distribution : ndarray
|
| 831 |
+
The value of the statistic for each Monte Carlo sample
|
| 832 |
+
drawn from the null-hypothesized distribution.
|
| 833 |
+
|
| 834 |
+
Notes
|
| 835 |
+
-----
|
| 836 |
+
This is a generalized Monte Carlo goodness-of-fit procedure, special cases
|
| 837 |
+
of which correspond with various Anderson-Darling tests, Lilliefors' test,
|
| 838 |
+
etc. The test is described in [2]_, [3]_, and [4]_ as a parametric
|
| 839 |
+
bootstrap test. This is a Monte Carlo test in which parameters that
|
| 840 |
+
specify the distribution from which samples are drawn have been estimated
|
| 841 |
+
from the data. We describe the test using "Monte Carlo" rather than
|
| 842 |
+
"parametric bootstrap" throughout to avoid confusion with the more familiar
|
| 843 |
+
nonparametric bootstrap, and describe how the test is performed below.
|
| 844 |
+
|
| 845 |
+
*Traditional goodness of fit tests*
|
| 846 |
+
|
| 847 |
+
Traditionally, critical values corresponding with a fixed set of
|
| 848 |
+
significance levels are pre-calculated using Monte Carlo methods. Users
|
| 849 |
+
perform the test by calculating the value of the test statistic only for
|
| 850 |
+
their observed `data` and comparing this value to tabulated critical
|
| 851 |
+
values. This practice is not very flexible, as tables are not available for
|
| 852 |
+
all distributions and combinations of known and unknown parameter values.
|
| 853 |
+
Also, results can be inaccurate when critical values are interpolated from
|
| 854 |
+
limited tabulated data to correspond with the user's sample size and
|
| 855 |
+
fitted parameter values. To overcome these shortcomings, this function
|
| 856 |
+
allows the user to perform the Monte Carlo trials adapted to their
|
| 857 |
+
particular data.
|
| 858 |
+
|
| 859 |
+
*Algorithmic overview*
|
| 860 |
+
|
| 861 |
+
In brief, this routine executes the following steps:
|
| 862 |
+
|
| 863 |
+
1. Fit unknown parameters to the given `data`, thereby forming the
|
| 864 |
+
"null-hypothesized" distribution, and compute the statistic of
|
| 865 |
+
this pair of data and distribution.
|
| 866 |
+
2. Draw random samples from this null-hypothesized distribution.
|
| 867 |
+
3. Fit the unknown parameters to each random sample.
|
| 868 |
+
4. Calculate the statistic between each sample and the distribution that
|
| 869 |
+
has been fit to the sample.
|
| 870 |
+
5. Compare the value of the statistic corresponding with `data` from (1)
|
| 871 |
+
against the values of the statistic corresponding with the random
|
| 872 |
+
samples from (4). The p-value is the proportion of samples with a
|
| 873 |
+
statistic value greater than or equal to the statistic of the observed
|
| 874 |
+
data.
|
| 875 |
+
|
| 876 |
+
In more detail, the steps are as follows.
|
| 877 |
+
|
| 878 |
+
First, any unknown parameters of the distribution family specified by
|
| 879 |
+
`dist` are fit to the provided `data` using maximum likelihood estimation.
|
| 880 |
+
(One exception is the normal distribution with unknown location and scale:
|
| 881 |
+
we use the bias-corrected standard deviation ``np.std(data, ddof=1)`` for
|
| 882 |
+
the scale as recommended in [1]_.)
|
| 883 |
+
These values of the parameters specify a particular member of the
|
| 884 |
+
distribution family referred to as the "null-hypothesized distribution",
|
| 885 |
+
that is, the distribution from which the data were sampled under the null
|
| 886 |
+
hypothesis. The `statistic`, which compares data to a distribution, is
|
| 887 |
+
computed between `data` and the null-hypothesized distribution.
|
| 888 |
+
|
| 889 |
+
Next, many (specifically `n_mc_samples`) new samples, each containing the
|
| 890 |
+
same number of observations as `data`, are drawn from the
|
| 891 |
+
null-hypothesized distribution. All unknown parameters of the distribution
|
| 892 |
+
family `dist` are fit to *each resample*, and the `statistic` is computed
|
| 893 |
+
between each sample and its corresponding fitted distribution. These
|
| 894 |
+
values of the statistic form the Monte Carlo null distribution (not to be
|
| 895 |
+
confused with the "null-hypothesized distribution" above).
|
| 896 |
+
|
| 897 |
+
The p-value of the test is the proportion of statistic values in the Monte
|
| 898 |
+
Carlo null distribution that are at least as extreme as the statistic value
|
| 899 |
+
of the provided `data`. More precisely, the p-value is given by
|
| 900 |
+
|
| 901 |
+
.. math::
|
| 902 |
+
|
| 903 |
+
p = \frac{b + 1}
|
| 904 |
+
{m + 1}
|
| 905 |
+
|
| 906 |
+
where :math:`b` is the number of statistic values in the Monte Carlo null
|
| 907 |
+
distribution that are greater than or equal to the statistic value
|
| 908 |
+
calculated for `data`, and :math:`m` is the number of elements in the
|
| 909 |
+
Monte Carlo null distribution (`n_mc_samples`). The addition of :math:`1`
|
| 910 |
+
to the numerator and denominator can be thought of as including the
|
| 911 |
+
value of the statistic corresponding with `data` in the null distribution,
|
| 912 |
+
but a more formal explanation is given in [5]_.
|
| 913 |
+
|
| 914 |
+
*Limitations*
|
| 915 |
+
|
| 916 |
+
The test can be very slow for some distribution families because unknown
|
| 917 |
+
parameters of the distribution family must be fit to each of the Monte
|
| 918 |
+
Carlo samples, and for most distributions in SciPy, distribution fitting
|
| 919 |
+
performed via numerical optimization.
|
| 920 |
+
|
| 921 |
+
*Anti-Pattern*
|
| 922 |
+
|
| 923 |
+
For this reason, it may be tempting
|
| 924 |
+
to treat parameters of the distribution pre-fit to `data` (by the user)
|
| 925 |
+
as though they were `known_params`, as specification of all parameters of
|
| 926 |
+
the distribution precludes the need to fit the distribution to each Monte
|
| 927 |
+
Carlo sample. (This is essentially how the original Kilmogorov-Smirnov
|
| 928 |
+
test is performed.) Although such a test can provide evidence against the
|
| 929 |
+
null hypothesis, the test is conservative in the sense that small p-values
|
| 930 |
+
will tend to (greatly) *overestimate* the probability of making a type I
|
| 931 |
+
error (that is, rejecting the null hypothesis although it is true), and the
|
| 932 |
+
power of the test is low (that is, it is less likely to reject the null
|
| 933 |
+
hypothesis even when the null hypothesis is false).
|
| 934 |
+
This is because the Monte Carlo samples are less likely to agree with the
|
| 935 |
+
null-hypothesized distribution as well as `data`. This tends to increase
|
| 936 |
+
the values of the statistic recorded in the null distribution, so that a
|
| 937 |
+
larger number of them exceed the value of statistic for `data`, thereby
|
| 938 |
+
inflating the p-value.
|
| 939 |
+
|
| 940 |
+
References
|
| 941 |
+
----------
|
| 942 |
+
.. [1] M. A. Stephens (1974). "EDF Statistics for Goodness of Fit and
|
| 943 |
+
Some Comparisons." Journal of the American Statistical Association,
|
| 944 |
+
Vol. 69, pp. 730-737.
|
| 945 |
+
.. [2] W. Stute, W. G. Manteiga, and M. P. Quindimil (1993).
|
| 946 |
+
"Bootstrap based goodness-of-fit-tests." Metrika 40.1: 243-256.
|
| 947 |
+
.. [3] C. Genest, & B Rémillard. (2008). "Validity of the parametric
|
| 948 |
+
bootstrap for goodness-of-fit testing in semiparametric models."
|
| 949 |
+
Annales de l'IHP Probabilités et statistiques. Vol. 44. No. 6.
|
| 950 |
+
.. [4] I. Kojadinovic and J. Yan (2012). "Goodness-of-fit testing based on
|
| 951 |
+
a weighted bootstrap: A fast large-sample alternative to the
|
| 952 |
+
parametric bootstrap." Canadian Journal of Statistics 40.3: 480-500.
|
| 953 |
+
.. [5] B. Phipson and G. K. Smyth (2010). "Permutation P-values Should
|
| 954 |
+
Never Be Zero: Calculating Exact P-values When Permutations Are
|
| 955 |
+
Randomly Drawn." Statistical Applications in Genetics and Molecular
|
| 956 |
+
Biology 9.1.
|
| 957 |
+
.. [6] H. W. Lilliefors (1967). "On the Kolmogorov-Smirnov test for
|
| 958 |
+
normality with mean and variance unknown." Journal of the American
|
| 959 |
+
statistical Association 62.318: 399-402.
|
| 960 |
+
.. [7] Filliben, James J. "The probability plot correlation coefficient
|
| 961 |
+
test for normality." Technometrics 17.1 (1975): 111-117.
|
| 962 |
+
|
| 963 |
+
Examples
|
| 964 |
+
--------
|
| 965 |
+
A well-known test of the null hypothesis that data were drawn from a
|
| 966 |
+
given distribution is the Kolmogorov-Smirnov (KS) test, available in SciPy
|
| 967 |
+
as `scipy.stats.ks_1samp`. Suppose we wish to test whether the following
|
| 968 |
+
data:
|
| 969 |
+
|
| 970 |
+
>>> import numpy as np
|
| 971 |
+
>>> from scipy import stats
|
| 972 |
+
>>> rng = np.random.default_rng()
|
| 973 |
+
>>> x = stats.uniform.rvs(size=75, random_state=rng)
|
| 974 |
+
|
| 975 |
+
were sampled from a normal distribution. To perform a KS test, the
|
| 976 |
+
empirical distribution function of the observed data will be compared
|
| 977 |
+
against the (theoretical) cumulative distribution function of a normal
|
| 978 |
+
distribution. Of course, to do this, the normal distribution under the null
|
| 979 |
+
hypothesis must be fully specified. This is commonly done by first fitting
|
| 980 |
+
the ``loc`` and ``scale`` parameters of the distribution to the observed
|
| 981 |
+
data, then performing the test.
|
| 982 |
+
|
| 983 |
+
>>> loc, scale = np.mean(x), np.std(x, ddof=1)
|
| 984 |
+
>>> cdf = stats.norm(loc, scale).cdf
|
| 985 |
+
>>> stats.ks_1samp(x, cdf)
|
| 986 |
+
KstestResult(statistic=0.1119257570456813,
|
| 987 |
+
pvalue=0.2827756409939257,
|
| 988 |
+
statistic_location=0.7751845155861765,
|
| 989 |
+
statistic_sign=-1)
|
| 990 |
+
|
| 991 |
+
An advantage of the KS-test is that the p-value - the probability of
|
| 992 |
+
obtaining a value of the test statistic under the null hypothesis as
|
| 993 |
+
extreme as the value obtained from the observed data - can be calculated
|
| 994 |
+
exactly and efficiently. `goodness_of_fit` can only approximate these
|
| 995 |
+
results.
|
| 996 |
+
|
| 997 |
+
>>> known_params = {'loc': loc, 'scale': scale}
|
| 998 |
+
>>> res = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
|
| 999 |
+
... statistic='ks', random_state=rng)
|
| 1000 |
+
>>> res.statistic, res.pvalue
|
| 1001 |
+
(0.1119257570456813, 0.2788)
|
| 1002 |
+
|
| 1003 |
+
The statistic matches exactly, but the p-value is estimated by forming
|
| 1004 |
+
a "Monte Carlo null distribution", that is, by explicitly drawing random
|
| 1005 |
+
samples from `scipy.stats.norm` with the provided parameters and
|
| 1006 |
+
calculating the stastic for each. The fraction of these statistic values
|
| 1007 |
+
at least as extreme as ``res.statistic`` approximates the exact p-value
|
| 1008 |
+
calculated by `scipy.stats.ks_1samp`.
|
| 1009 |
+
|
| 1010 |
+
However, in many cases, we would prefer to test only that the data were
|
| 1011 |
+
sampled from one of *any* member of the normal distribution family, not
|
| 1012 |
+
specifically from the normal distribution with the location and scale
|
| 1013 |
+
fitted to the observed sample. In this case, Lilliefors [6]_ argued that
|
| 1014 |
+
the KS test is far too conservative (that is, the p-value overstates
|
| 1015 |
+
the actual probability of rejecting a true null hypothesis) and thus lacks
|
| 1016 |
+
power - the ability to reject the null hypothesis when the null hypothesis
|
| 1017 |
+
is actually false.
|
| 1018 |
+
Indeed, our p-value above is approximately 0.28, which is far too large
|
| 1019 |
+
to reject the null hypothesis at any common significance level.
|
| 1020 |
+
|
| 1021 |
+
Consider why this might be. Note that in the KS test above, the statistic
|
| 1022 |
+
always compares data against the CDF of a normal distribution fitted to the
|
| 1023 |
+
*observed data*. This tends to reduce the value of the statistic for the
|
| 1024 |
+
observed data, but it is "unfair" when computing the statistic for other
|
| 1025 |
+
samples, such as those we randomly draw to form the Monte Carlo null
|
| 1026 |
+
distribution. It is easy to correct for this: whenever we compute the KS
|
| 1027 |
+
statistic of a sample, we use the CDF of a normal distribution fitted
|
| 1028 |
+
to *that sample*. The null distribution in this case has not been
|
| 1029 |
+
calculated exactly and is tyically approximated using Monte Carlo methods
|
| 1030 |
+
as described above. This is where `goodness_of_fit` excels.
|
| 1031 |
+
|
| 1032 |
+
>>> res = stats.goodness_of_fit(stats.norm, x, statistic='ks',
|
| 1033 |
+
... random_state=rng)
|
| 1034 |
+
>>> res.statistic, res.pvalue
|
| 1035 |
+
(0.1119257570456813, 0.0196)
|
| 1036 |
+
|
| 1037 |
+
Indeed, this p-value is much smaller, and small enough to (correctly)
|
| 1038 |
+
reject the null hypothesis at common significance levels, including 5% and
|
| 1039 |
+
2.5%.
|
| 1040 |
+
|
| 1041 |
+
However, the KS statistic is not very sensitive to all deviations from
|
| 1042 |
+
normality. The original advantage of the KS statistic was the ability
|
| 1043 |
+
to compute the null distribution theoretically, but a more sensitive
|
| 1044 |
+
statistic - resulting in a higher test power - can be used now that we can
|
| 1045 |
+
approximate the null distribution
|
| 1046 |
+
computationally. The Anderson-Darling statistic [1]_ tends to be more
|
| 1047 |
+
sensitive, and critical values of the this statistic have been tabulated
|
| 1048 |
+
for various significance levels and sample sizes using Monte Carlo methods.
|
| 1049 |
+
|
| 1050 |
+
>>> res = stats.anderson(x, 'norm')
|
| 1051 |
+
>>> print(res.statistic)
|
| 1052 |
+
1.2139573337497467
|
| 1053 |
+
>>> print(res.critical_values)
|
| 1054 |
+
[0.549 0.625 0.75 0.875 1.041]
|
| 1055 |
+
>>> print(res.significance_level)
|
| 1056 |
+
[15. 10. 5. 2.5 1. ]
|
| 1057 |
+
|
| 1058 |
+
Here, the observed value of the statistic exceeds the critical value
|
| 1059 |
+
corresponding with a 1% significance level. This tells us that the p-value
|
| 1060 |
+
of the observed data is less than 1%, but what is it? We could interpolate
|
| 1061 |
+
from these (already-interpolated) values, but `goodness_of_fit` can
|
| 1062 |
+
estimate it directly.
|
| 1063 |
+
|
| 1064 |
+
>>> res = stats.goodness_of_fit(stats.norm, x, statistic='ad',
|
| 1065 |
+
... random_state=rng)
|
| 1066 |
+
>>> res.statistic, res.pvalue
|
| 1067 |
+
(1.2139573337497467, 0.0034)
|
| 1068 |
+
|
| 1069 |
+
A further advantage is that use of `goodness_of_fit` is not limited to
|
| 1070 |
+
a particular set of distributions or conditions on which parameters
|
| 1071 |
+
are known versus which must be estimated from data. Instead,
|
| 1072 |
+
`goodness_of_fit` can estimate p-values relatively quickly for any
|
| 1073 |
+
distribution with a sufficiently fast and reliable ``fit`` method. For
|
| 1074 |
+
instance, here we perform a goodness of fit test using the Cramer-von Mises
|
| 1075 |
+
statistic against the Rayleigh distribution with known location and unknown
|
| 1076 |
+
scale.
|
| 1077 |
+
|
| 1078 |
+
>>> rng = np.random.default_rng()
|
| 1079 |
+
>>> x = stats.chi(df=2.2, loc=0, scale=2).rvs(size=1000, random_state=rng)
|
| 1080 |
+
>>> res = stats.goodness_of_fit(stats.rayleigh, x, statistic='cvm',
|
| 1081 |
+
... known_params={'loc': 0}, random_state=rng)
|
| 1082 |
+
|
| 1083 |
+
This executes fairly quickly, but to check the reliability of the ``fit``
|
| 1084 |
+
method, we should inspect the fit result.
|
| 1085 |
+
|
| 1086 |
+
>>> res.fit_result # location is as specified, and scale is reasonable
|
| 1087 |
+
params: FitParams(loc=0.0, scale=2.1026719844231243)
|
| 1088 |
+
success: True
|
| 1089 |
+
message: 'The fit was performed successfully.'
|
| 1090 |
+
>>> import matplotlib.pyplot as plt # matplotlib must be installed to plot
|
| 1091 |
+
>>> res.fit_result.plot()
|
| 1092 |
+
>>> plt.show()
|
| 1093 |
+
|
| 1094 |
+
If the distribution is not fit to the observed data as well as possible,
|
| 1095 |
+
the test may not control the type I error rate, that is, the chance of
|
| 1096 |
+
rejecting the null hypothesis even when it is true.
|
| 1097 |
+
|
| 1098 |
+
We should also look for extreme outliers in the null distribution that
|
| 1099 |
+
may be caused by unreliable fitting. These do not necessarily invalidate
|
| 1100 |
+
the result, but they tend to reduce the test's power.
|
| 1101 |
+
|
| 1102 |
+
>>> _, ax = plt.subplots()
|
| 1103 |
+
>>> ax.hist(np.log10(res.null_distribution))
|
| 1104 |
+
>>> ax.set_xlabel("log10 of CVM statistic under the null hypothesis")
|
| 1105 |
+
>>> ax.set_ylabel("Frequency")
|
| 1106 |
+
>>> ax.set_title("Histogram of the Monte Carlo null distribution")
|
| 1107 |
+
>>> plt.show()
|
| 1108 |
+
|
| 1109 |
+
This plot seems reassuring.
|
| 1110 |
+
|
| 1111 |
+
If ``fit`` method is working reliably, and if the distribution of the test
|
| 1112 |
+
statistic is not particularly sensitive to the values of the fitted
|
| 1113 |
+
parameters, then the p-value provided by `goodness_of_fit` is expected to
|
| 1114 |
+
be a good approximation.
|
| 1115 |
+
|
| 1116 |
+
>>> res.statistic, res.pvalue
|
| 1117 |
+
(0.2231991510248692, 0.0525)
|
| 1118 |
+
|
| 1119 |
+
"""
|
| 1120 |
+
args = _gof_iv(dist, data, known_params, fit_params, guessed_params,
|
| 1121 |
+
statistic, n_mc_samples, random_state)
|
| 1122 |
+
(dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
|
| 1123 |
+
guessed_rfd_params, statistic, n_mc_samples_int, random_state) = args
|
| 1124 |
+
|
| 1125 |
+
# Fit null hypothesis distribution to data
|
| 1126 |
+
nhd_fit_fun = _get_fit_fun(dist, data, guessed_nhd_params,
|
| 1127 |
+
fixed_nhd_params)
|
| 1128 |
+
nhd_vals = nhd_fit_fun(data)
|
| 1129 |
+
nhd_dist = dist(*nhd_vals)
|
| 1130 |
+
|
| 1131 |
+
def rvs(size):
|
| 1132 |
+
return nhd_dist.rvs(size=size, random_state=random_state)
|
| 1133 |
+
|
| 1134 |
+
# Define statistic
|
| 1135 |
+
fit_fun = _get_fit_fun(dist, data, guessed_rfd_params, fixed_rfd_params)
|
| 1136 |
+
if callable(statistic):
|
| 1137 |
+
compare_fun = statistic
|
| 1138 |
+
else:
|
| 1139 |
+
compare_fun = _compare_dict[statistic]
|
| 1140 |
+
alternative = getattr(compare_fun, 'alternative', 'greater')
|
| 1141 |
+
|
| 1142 |
+
def statistic_fun(data, axis):
|
| 1143 |
+
# Make things simple by always working along the last axis.
|
| 1144 |
+
data = np.moveaxis(data, axis, -1)
|
| 1145 |
+
rfd_vals = fit_fun(data)
|
| 1146 |
+
rfd_dist = dist(*rfd_vals)
|
| 1147 |
+
return compare_fun(rfd_dist, data, axis=-1)
|
| 1148 |
+
|
| 1149 |
+
res = stats.monte_carlo_test(data, rvs, statistic_fun, vectorized=True,
|
| 1150 |
+
n_resamples=n_mc_samples, axis=-1,
|
| 1151 |
+
alternative=alternative)
|
| 1152 |
+
opt_res = optimize.OptimizeResult()
|
| 1153 |
+
opt_res.success = True
|
| 1154 |
+
opt_res.message = "The fit was performed successfully."
|
| 1155 |
+
opt_res.x = nhd_vals
|
| 1156 |
+
# Only continuous distributions for now, hence discrete=False
|
| 1157 |
+
# There's no fundamental limitation; it's just that we're not using
|
| 1158 |
+
# stats.fit, discrete distributions don't have `fit` method, and
|
| 1159 |
+
# we haven't written any vectorized fit functions for a discrete
|
| 1160 |
+
# distribution yet.
|
| 1161 |
+
return GoodnessOfFitResult(FitResult(dist, data, False, opt_res),
|
| 1162 |
+
res.statistic, res.pvalue,
|
| 1163 |
+
res.null_distribution)
|
| 1164 |
+
|
| 1165 |
+
|
| 1166 |
+
def _get_fit_fun(dist, data, guessed_params, fixed_params):
|
| 1167 |
+
|
| 1168 |
+
shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
|
| 1169 |
+
param_names = shape_names + ['loc', 'scale']
|
| 1170 |
+
fparam_names = ['f'+name for name in param_names]
|
| 1171 |
+
all_fixed = not set(fparam_names).difference(fixed_params)
|
| 1172 |
+
guessed_shapes = [guessed_params.pop(x, None)
|
| 1173 |
+
for x in shape_names if x in guessed_params]
|
| 1174 |
+
|
| 1175 |
+
if all_fixed:
|
| 1176 |
+
def fit_fun(data):
|
| 1177 |
+
return [fixed_params[name] for name in fparam_names]
|
| 1178 |
+
# Define statistic, including fitting distribution to data
|
| 1179 |
+
elif dist in _fit_funs:
|
| 1180 |
+
def fit_fun(data):
|
| 1181 |
+
params = _fit_funs[dist](data, **fixed_params)
|
| 1182 |
+
params = np.asarray(np.broadcast_arrays(*params))
|
| 1183 |
+
if params.ndim > 1:
|
| 1184 |
+
params = params[..., np.newaxis]
|
| 1185 |
+
return params
|
| 1186 |
+
else:
|
| 1187 |
+
def fit_fun_1d(data):
|
| 1188 |
+
return dist.fit(data, *guessed_shapes, **guessed_params,
|
| 1189 |
+
**fixed_params)
|
| 1190 |
+
|
| 1191 |
+
def fit_fun(data):
|
| 1192 |
+
params = np.apply_along_axis(fit_fun_1d, axis=-1, arr=data)
|
| 1193 |
+
if params.ndim > 1:
|
| 1194 |
+
params = params.T[..., np.newaxis]
|
| 1195 |
+
return params
|
| 1196 |
+
|
| 1197 |
+
return fit_fun
|
| 1198 |
+
|
| 1199 |
+
|
| 1200 |
+
# Vectorized fitting functions. These are to accept ND `data` in which each
|
| 1201 |
+
# row (slice along last axis) is a sample to fit and scalar fixed parameters.
|
| 1202 |
+
# They return a tuple of shape parameter arrays, each of shape data.shape[:-1].
|
| 1203 |
+
def _fit_norm(data, floc=None, fscale=None):
|
| 1204 |
+
loc = floc
|
| 1205 |
+
scale = fscale
|
| 1206 |
+
if loc is None and scale is None:
|
| 1207 |
+
loc = np.mean(data, axis=-1)
|
| 1208 |
+
scale = np.std(data, ddof=1, axis=-1)
|
| 1209 |
+
elif loc is None:
|
| 1210 |
+
loc = np.mean(data, axis=-1)
|
| 1211 |
+
elif scale is None:
|
| 1212 |
+
scale = np.sqrt(((data - loc)**2).mean(axis=-1))
|
| 1213 |
+
return loc, scale
|
| 1214 |
+
|
| 1215 |
+
|
| 1216 |
+
_fit_funs = {stats.norm: _fit_norm} # type: ignore[attr-defined]
|
| 1217 |
+
|
| 1218 |
+
|
| 1219 |
+
# Vectorized goodness of fit statistic functions. These accept a frozen
|
| 1220 |
+
# distribution object and `data` in which each row (slice along last axis) is
|
| 1221 |
+
# a sample.
|
| 1222 |
+
|
| 1223 |
+
|
| 1224 |
+
def _anderson_darling(dist, data, axis):
|
| 1225 |
+
x = np.sort(data, axis=-1)
|
| 1226 |
+
n = data.shape[-1]
|
| 1227 |
+
i = np.arange(1, n+1)
|
| 1228 |
+
Si = (2*i - 1)/n * (dist.logcdf(x) + dist.logsf(x[..., ::-1]))
|
| 1229 |
+
S = np.sum(Si, axis=-1)
|
| 1230 |
+
return -n - S
|
| 1231 |
+
|
| 1232 |
+
|
| 1233 |
+
def _compute_dplus(cdfvals): # adapted from _stats_py before gh-17062
|
| 1234 |
+
n = cdfvals.shape[-1]
|
| 1235 |
+
return (np.arange(1.0, n + 1) / n - cdfvals).max(axis=-1)
|
| 1236 |
+
|
| 1237 |
+
|
| 1238 |
+
def _compute_dminus(cdfvals):
|
| 1239 |
+
n = cdfvals.shape[-1]
|
| 1240 |
+
return (cdfvals - np.arange(0.0, n)/n).max(axis=-1)
|
| 1241 |
+
|
| 1242 |
+
|
| 1243 |
+
def _kolmogorov_smirnov(dist, data, axis):
|
| 1244 |
+
x = np.sort(data, axis=-1)
|
| 1245 |
+
cdfvals = dist.cdf(x)
|
| 1246 |
+
Dplus = _compute_dplus(cdfvals) # always works along last axis
|
| 1247 |
+
Dminus = _compute_dminus(cdfvals)
|
| 1248 |
+
return np.maximum(Dplus, Dminus)
|
| 1249 |
+
|
| 1250 |
+
|
| 1251 |
+
def _corr(X, M):
|
| 1252 |
+
# Correlation coefficient r, simplified and vectorized as we need it.
|
| 1253 |
+
# See [7] Equation (2). Lemma 1/2 are only for distributions symmetric
|
| 1254 |
+
# about 0.
|
| 1255 |
+
Xm = X.mean(axis=-1, keepdims=True)
|
| 1256 |
+
Mm = M.mean(axis=-1, keepdims=True)
|
| 1257 |
+
num = np.sum((X - Xm) * (M - Mm), axis=-1)
|
| 1258 |
+
den = np.sqrt(np.sum((X - Xm)**2, axis=-1) * np.sum((M - Mm)**2, axis=-1))
|
| 1259 |
+
return num/den
|
| 1260 |
+
|
| 1261 |
+
|
| 1262 |
+
def _filliben(dist, data, axis):
|
| 1263 |
+
# [7] Section 8 # 1
|
| 1264 |
+
X = np.sort(data, axis=-1)
|
| 1265 |
+
|
| 1266 |
+
# [7] Section 8 # 2
|
| 1267 |
+
n = data.shape[-1]
|
| 1268 |
+
k = np.arange(1, n+1)
|
| 1269 |
+
# Filliben used an approximation for the uniform distribution order
|
| 1270 |
+
# statistic medians.
|
| 1271 |
+
# m = (k - .3175)/(n + 0.365)
|
| 1272 |
+
# m[-1] = 0.5**(1/n)
|
| 1273 |
+
# m[0] = 1 - m[-1]
|
| 1274 |
+
# We can just as easily use the (theoretically) exact values. See e.g.
|
| 1275 |
+
# https://en.wikipedia.org/wiki/Order_statistic
|
| 1276 |
+
# "Order statistics sampled from a uniform distribution"
|
| 1277 |
+
m = stats.beta(k, n + 1 - k).median()
|
| 1278 |
+
|
| 1279 |
+
# [7] Section 8 # 3
|
| 1280 |
+
M = dist.ppf(m)
|
| 1281 |
+
|
| 1282 |
+
# [7] Section 8 # 4
|
| 1283 |
+
return _corr(X, M)
|
| 1284 |
+
_filliben.alternative = 'less' # type: ignore[attr-defined]
|
| 1285 |
+
|
| 1286 |
+
|
| 1287 |
+
def _cramer_von_mises(dist, data, axis):
|
| 1288 |
+
x = np.sort(data, axis=-1)
|
| 1289 |
+
n = data.shape[-1]
|
| 1290 |
+
cdfvals = dist.cdf(x)
|
| 1291 |
+
u = (2*np.arange(1, n+1) - 1)/(2*n)
|
| 1292 |
+
w = 1 / (12*n) + np.sum((u - cdfvals)**2, axis=-1)
|
| 1293 |
+
return w
|
| 1294 |
+
|
| 1295 |
+
|
| 1296 |
+
_compare_dict = {"ad": _anderson_darling, "ks": _kolmogorov_smirnov,
|
| 1297 |
+
"cvm": _cramer_von_mises, "filliben": _filliben}
|
| 1298 |
+
|
| 1299 |
+
|
| 1300 |
+
def _gof_iv(dist, data, known_params, fit_params, guessed_params, statistic,
|
| 1301 |
+
n_mc_samples, random_state):
|
| 1302 |
+
|
| 1303 |
+
if not isinstance(dist, stats.rv_continuous):
|
| 1304 |
+
message = ("`dist` must be a (non-frozen) instance of "
|
| 1305 |
+
"`stats.rv_continuous`.")
|
| 1306 |
+
raise TypeError(message)
|
| 1307 |
+
|
| 1308 |
+
data = np.asarray(data, dtype=float)
|
| 1309 |
+
if not data.ndim == 1:
|
| 1310 |
+
message = "`data` must be a one-dimensional array of numbers."
|
| 1311 |
+
raise ValueError(message)
|
| 1312 |
+
|
| 1313 |
+
# Leave validation of these key/value pairs to the `fit` method,
|
| 1314 |
+
# but collect these into dictionaries that will be used
|
| 1315 |
+
known_params = known_params or dict()
|
| 1316 |
+
fit_params = fit_params or dict()
|
| 1317 |
+
guessed_params = guessed_params or dict()
|
| 1318 |
+
|
| 1319 |
+
known_params_f = {("f"+key): val for key, val in known_params.items()}
|
| 1320 |
+
fit_params_f = {("f"+key): val for key, val in fit_params.items()}
|
| 1321 |
+
|
| 1322 |
+
# These are the values of parameters of the null distribution family
|
| 1323 |
+
# with which resamples are drawn
|
| 1324 |
+
fixed_nhd_params = known_params_f.copy()
|
| 1325 |
+
fixed_nhd_params.update(fit_params_f)
|
| 1326 |
+
|
| 1327 |
+
# These are fixed when fitting the distribution family to resamples
|
| 1328 |
+
fixed_rfd_params = known_params_f.copy()
|
| 1329 |
+
|
| 1330 |
+
# These are used as guesses when fitting the distribution family to
|
| 1331 |
+
# the original data
|
| 1332 |
+
guessed_nhd_params = guessed_params.copy()
|
| 1333 |
+
|
| 1334 |
+
# These are used as guesses when fitting the distribution family to
|
| 1335 |
+
# resamples
|
| 1336 |
+
guessed_rfd_params = fit_params.copy()
|
| 1337 |
+
guessed_rfd_params.update(guessed_params)
|
| 1338 |
+
|
| 1339 |
+
if not callable(statistic):
|
| 1340 |
+
statistic = statistic.lower()
|
| 1341 |
+
statistics = {'ad', 'ks', 'cvm', 'filliben'}
|
| 1342 |
+
if statistic not in statistics:
|
| 1343 |
+
message = f"`statistic` must be one of {statistics}."
|
| 1344 |
+
raise ValueError(message)
|
| 1345 |
+
|
| 1346 |
+
n_mc_samples_int = int(n_mc_samples)
|
| 1347 |
+
if n_mc_samples_int != n_mc_samples:
|
| 1348 |
+
message = "`n_mc_samples` must be an integer."
|
| 1349 |
+
raise TypeError(message)
|
| 1350 |
+
|
| 1351 |
+
random_state = check_random_state(random_state)
|
| 1352 |
+
|
| 1353 |
+
return (dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
|
| 1354 |
+
guessed_rfd_params, statistic, n_mc_samples_int, random_state)
|
parrot/lib/python3.10/site-packages/scipy/stats/_hypotests.py
ADDED
|
@@ -0,0 +1,2027 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import namedtuple
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from math import comb
|
| 4 |
+
import numpy as np
|
| 5 |
+
import warnings
|
| 6 |
+
from itertools import combinations
|
| 7 |
+
import scipy.stats
|
| 8 |
+
from scipy.optimize import shgo
|
| 9 |
+
from . import distributions
|
| 10 |
+
from ._common import ConfidenceInterval
|
| 11 |
+
from ._continuous_distns import norm
|
| 12 |
+
from scipy.special import gamma, kv, gammaln
|
| 13 |
+
from scipy.fft import ifft
|
| 14 |
+
from ._stats_pythran import _a_ij_Aij_Dij2
|
| 15 |
+
from ._stats_pythran import (
|
| 16 |
+
_concordant_pairs as _P, _discordant_pairs as _Q
|
| 17 |
+
)
|
| 18 |
+
from ._axis_nan_policy import _axis_nan_policy_factory
|
| 19 |
+
from scipy.stats import _stats_py
|
| 20 |
+
|
| 21 |
+
__all__ = ['epps_singleton_2samp', 'cramervonmises', 'somersd',
|
| 22 |
+
'barnard_exact', 'boschloo_exact', 'cramervonmises_2samp',
|
| 23 |
+
'tukey_hsd', 'poisson_means_test']
|
| 24 |
+
|
| 25 |
+
Epps_Singleton_2sampResult = namedtuple('Epps_Singleton_2sampResult',
|
| 26 |
+
('statistic', 'pvalue'))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@_axis_nan_policy_factory(Epps_Singleton_2sampResult, n_samples=2, too_small=4)
|
| 30 |
+
def epps_singleton_2samp(x, y, t=(0.4, 0.8)):
|
| 31 |
+
"""Compute the Epps-Singleton (ES) test statistic.
|
| 32 |
+
|
| 33 |
+
Test the null hypothesis that two samples have the same underlying
|
| 34 |
+
probability distribution.
|
| 35 |
+
|
| 36 |
+
Parameters
|
| 37 |
+
----------
|
| 38 |
+
x, y : array-like
|
| 39 |
+
The two samples of observations to be tested. Input must not have more
|
| 40 |
+
than one dimension. Samples can have different lengths, but both
|
| 41 |
+
must have at least five observations.
|
| 42 |
+
t : array-like, optional
|
| 43 |
+
The points (t1, ..., tn) where the empirical characteristic function is
|
| 44 |
+
to be evaluated. It should be positive distinct numbers. The default
|
| 45 |
+
value (0.4, 0.8) is proposed in [1]_. Input must not have more than
|
| 46 |
+
one dimension.
|
| 47 |
+
|
| 48 |
+
Returns
|
| 49 |
+
-------
|
| 50 |
+
statistic : float
|
| 51 |
+
The test statistic.
|
| 52 |
+
pvalue : float
|
| 53 |
+
The associated p-value based on the asymptotic chi2-distribution.
|
| 54 |
+
|
| 55 |
+
See Also
|
| 56 |
+
--------
|
| 57 |
+
ks_2samp, anderson_ksamp
|
| 58 |
+
|
| 59 |
+
Notes
|
| 60 |
+
-----
|
| 61 |
+
Testing whether two samples are generated by the same underlying
|
| 62 |
+
distribution is a classical question in statistics. A widely used test is
|
| 63 |
+
the Kolmogorov-Smirnov (KS) test which relies on the empirical
|
| 64 |
+
distribution function. Epps and Singleton introduce a test based on the
|
| 65 |
+
empirical characteristic function in [1]_.
|
| 66 |
+
|
| 67 |
+
One advantage of the ES test compared to the KS test is that is does
|
| 68 |
+
not assume a continuous distribution. In [1]_, the authors conclude
|
| 69 |
+
that the test also has a higher power than the KS test in many
|
| 70 |
+
examples. They recommend the use of the ES test for discrete samples as
|
| 71 |
+
well as continuous samples with at least 25 observations each, whereas
|
| 72 |
+
`anderson_ksamp` is recommended for smaller sample sizes in the
|
| 73 |
+
continuous case.
|
| 74 |
+
|
| 75 |
+
The p-value is computed from the asymptotic distribution of the test
|
| 76 |
+
statistic which follows a `chi2` distribution. If the sample size of both
|
| 77 |
+
`x` and `y` is below 25, the small sample correction proposed in [1]_ is
|
| 78 |
+
applied to the test statistic.
|
| 79 |
+
|
| 80 |
+
The default values of `t` are determined in [1]_ by considering
|
| 81 |
+
various distributions and finding good values that lead to a high power
|
| 82 |
+
of the test in general. Table III in [1]_ gives the optimal values for
|
| 83 |
+
the distributions tested in that study. The values of `t` are scaled by
|
| 84 |
+
the semi-interquartile range in the implementation, see [1]_.
|
| 85 |
+
|
| 86 |
+
References
|
| 87 |
+
----------
|
| 88 |
+
.. [1] T. W. Epps and K. J. Singleton, "An omnibus test for the two-sample
|
| 89 |
+
problem using the empirical characteristic function", Journal of
|
| 90 |
+
Statistical Computation and Simulation 26, p. 177--203, 1986.
|
| 91 |
+
|
| 92 |
+
.. [2] S. J. Goerg and J. Kaiser, "Nonparametric testing of distributions
|
| 93 |
+
- the Epps-Singleton two-sample test using the empirical characteristic
|
| 94 |
+
function", The Stata Journal 9(3), p. 454--465, 2009.
|
| 95 |
+
|
| 96 |
+
"""
|
| 97 |
+
# x and y are converted to arrays by the decorator
|
| 98 |
+
t = np.asarray(t)
|
| 99 |
+
# check if x and y are valid inputs
|
| 100 |
+
nx, ny = len(x), len(y)
|
| 101 |
+
if (nx < 5) or (ny < 5):
|
| 102 |
+
raise ValueError('x and y should have at least 5 elements, but len(x) '
|
| 103 |
+
f'= {nx} and len(y) = {ny}.')
|
| 104 |
+
if not np.isfinite(x).all():
|
| 105 |
+
raise ValueError('x must not contain nonfinite values.')
|
| 106 |
+
if not np.isfinite(y).all():
|
| 107 |
+
raise ValueError('y must not contain nonfinite values.')
|
| 108 |
+
n = nx + ny
|
| 109 |
+
|
| 110 |
+
# check if t is valid
|
| 111 |
+
if t.ndim > 1:
|
| 112 |
+
raise ValueError(f't must be 1d, but t.ndim equals {t.ndim}.')
|
| 113 |
+
if np.less_equal(t, 0).any():
|
| 114 |
+
raise ValueError('t must contain positive elements only.')
|
| 115 |
+
|
| 116 |
+
# rescale t with semi-iqr as proposed in [1]; import iqr here to avoid
|
| 117 |
+
# circular import
|
| 118 |
+
from scipy.stats import iqr
|
| 119 |
+
sigma = iqr(np.hstack((x, y))) / 2
|
| 120 |
+
ts = np.reshape(t, (-1, 1)) / sigma
|
| 121 |
+
|
| 122 |
+
# covariance estimation of ES test
|
| 123 |
+
gx = np.vstack((np.cos(ts*x), np.sin(ts*x))).T # shape = (nx, 2*len(t))
|
| 124 |
+
gy = np.vstack((np.cos(ts*y), np.sin(ts*y))).T
|
| 125 |
+
cov_x = np.cov(gx.T, bias=True) # the test uses biased cov-estimate
|
| 126 |
+
cov_y = np.cov(gy.T, bias=True)
|
| 127 |
+
est_cov = (n/nx)*cov_x + (n/ny)*cov_y
|
| 128 |
+
est_cov_inv = np.linalg.pinv(est_cov)
|
| 129 |
+
r = np.linalg.matrix_rank(est_cov_inv)
|
| 130 |
+
if r < 2*len(t):
|
| 131 |
+
warnings.warn('Estimated covariance matrix does not have full rank. '
|
| 132 |
+
'This indicates a bad choice of the input t and the '
|
| 133 |
+
'test might not be consistent.', # see p. 183 in [1]_
|
| 134 |
+
stacklevel=2)
|
| 135 |
+
|
| 136 |
+
# compute test statistic w distributed asympt. as chisquare with df=r
|
| 137 |
+
g_diff = np.mean(gx, axis=0) - np.mean(gy, axis=0)
|
| 138 |
+
w = n*np.dot(g_diff.T, np.dot(est_cov_inv, g_diff))
|
| 139 |
+
|
| 140 |
+
# apply small-sample correction
|
| 141 |
+
if (max(nx, ny) < 25):
|
| 142 |
+
corr = 1.0/(1.0 + n**(-0.45) + 10.1*(nx**(-1.7) + ny**(-1.7)))
|
| 143 |
+
w = corr * w
|
| 144 |
+
|
| 145 |
+
chi2 = _stats_py._SimpleChi2(r)
|
| 146 |
+
p = _stats_py._get_pvalue(w, chi2, alternative='greater', symmetric=False, xp=np)
|
| 147 |
+
|
| 148 |
+
return Epps_Singleton_2sampResult(w, p)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def poisson_means_test(k1, n1, k2, n2, *, diff=0, alternative='two-sided'):
|
| 152 |
+
r"""
|
| 153 |
+
Performs the Poisson means test, AKA the "E-test".
|
| 154 |
+
|
| 155 |
+
This is a test of the null hypothesis that the difference between means of
|
| 156 |
+
two Poisson distributions is `diff`. The samples are provided as the
|
| 157 |
+
number of events `k1` and `k2` observed within measurement intervals
|
| 158 |
+
(e.g. of time, space, number of observations) of sizes `n1` and `n2`.
|
| 159 |
+
|
| 160 |
+
Parameters
|
| 161 |
+
----------
|
| 162 |
+
k1 : int
|
| 163 |
+
Number of events observed from distribution 1.
|
| 164 |
+
n1: float
|
| 165 |
+
Size of sample from distribution 1.
|
| 166 |
+
k2 : int
|
| 167 |
+
Number of events observed from distribution 2.
|
| 168 |
+
n2 : float
|
| 169 |
+
Size of sample from distribution 2.
|
| 170 |
+
diff : float, default=0
|
| 171 |
+
The hypothesized difference in means between the distributions
|
| 172 |
+
underlying the samples.
|
| 173 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
| 174 |
+
Defines the alternative hypothesis.
|
| 175 |
+
The following options are available (default is 'two-sided'):
|
| 176 |
+
|
| 177 |
+
* 'two-sided': the difference between distribution means is not
|
| 178 |
+
equal to `diff`
|
| 179 |
+
* 'less': the difference between distribution means is less than
|
| 180 |
+
`diff`
|
| 181 |
+
* 'greater': the difference between distribution means is greater
|
| 182 |
+
than `diff`
|
| 183 |
+
|
| 184 |
+
Returns
|
| 185 |
+
-------
|
| 186 |
+
statistic : float
|
| 187 |
+
The test statistic (see [1]_ equation 3.3).
|
| 188 |
+
pvalue : float
|
| 189 |
+
The probability of achieving such an extreme value of the test
|
| 190 |
+
statistic under the null hypothesis.
|
| 191 |
+
|
| 192 |
+
Notes
|
| 193 |
+
-----
|
| 194 |
+
|
| 195 |
+
Let:
|
| 196 |
+
|
| 197 |
+
.. math:: X_1 \sim \mbox{Poisson}(\mathtt{n1}\lambda_1)
|
| 198 |
+
|
| 199 |
+
be a random variable independent of
|
| 200 |
+
|
| 201 |
+
.. math:: X_2 \sim \mbox{Poisson}(\mathtt{n2}\lambda_2)
|
| 202 |
+
|
| 203 |
+
and let ``k1`` and ``k2`` be the observed values of :math:`X_1`
|
| 204 |
+
and :math:`X_2`, respectively. Then `poisson_means_test` uses the number
|
| 205 |
+
of observed events ``k1`` and ``k2`` from samples of size ``n1`` and
|
| 206 |
+
``n2``, respectively, to test the null hypothesis that
|
| 207 |
+
|
| 208 |
+
.. math::
|
| 209 |
+
H_0: \lambda_1 - \lambda_2 = \mathtt{diff}
|
| 210 |
+
|
| 211 |
+
A benefit of the E-test is that it has good power for small sample sizes,
|
| 212 |
+
which can reduce sampling costs [1]_. It has been evaluated and determined
|
| 213 |
+
to be more powerful than the comparable C-test, sometimes referred to as
|
| 214 |
+
the Poisson exact test.
|
| 215 |
+
|
| 216 |
+
References
|
| 217 |
+
----------
|
| 218 |
+
.. [1] Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
|
| 219 |
+
comparing two Poisson means. Journal of Statistical Planning and
|
| 220 |
+
Inference, 119(1), 23-35.
|
| 221 |
+
|
| 222 |
+
.. [2] Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
|
| 223 |
+
testing samples from Poisson series: With an application to testing
|
| 224 |
+
clover seed for dodder. Biometrika, 31(3/4), 313-323.
|
| 225 |
+
|
| 226 |
+
Examples
|
| 227 |
+
--------
|
| 228 |
+
|
| 229 |
+
Suppose that a gardener wishes to test the number of dodder (weed) seeds
|
| 230 |
+
in a sack of clover seeds that they buy from a seed company. It has
|
| 231 |
+
previously been established that the number of dodder seeds in clover
|
| 232 |
+
follows the Poisson distribution.
|
| 233 |
+
|
| 234 |
+
A 100 gram sample is drawn from the sack before being shipped to the
|
| 235 |
+
gardener. The sample is analyzed, and it is found to contain no dodder
|
| 236 |
+
seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
|
| 237 |
+
another 100 gram sample from the sack. This time, three dodder seeds are
|
| 238 |
+
found in the sample; that is, `k2` is 3. The gardener would like to
|
| 239 |
+
know if the difference is significant and not due to chance. The
|
| 240 |
+
null hypothesis is that the difference between the two samples is merely
|
| 241 |
+
due to chance, or that :math:`\lambda_1 - \lambda_2 = \mathtt{diff}`
|
| 242 |
+
where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
|
| 243 |
+
difference is not due to chance, or :math:`\lambda_1 - \lambda_2 \ne 0`.
|
| 244 |
+
The gardener selects a significance level of 5% to reject the null
|
| 245 |
+
hypothesis in favor of the alternative [2]_.
|
| 246 |
+
|
| 247 |
+
>>> import scipy.stats as stats
|
| 248 |
+
>>> res = stats.poisson_means_test(0, 100, 3, 100)
|
| 249 |
+
>>> res.statistic, res.pvalue
|
| 250 |
+
(-1.7320508075688772, 0.08837900929018157)
|
| 251 |
+
|
| 252 |
+
The p-value is .088, indicating a near 9% chance of observing a value of
|
| 253 |
+
the test statistic under the null hypothesis. This exceeds 5%, so the
|
| 254 |
+
gardener does not reject the null hypothesis as the difference cannot be
|
| 255 |
+
regarded as significant at this level.
|
| 256 |
+
"""
|
| 257 |
+
|
| 258 |
+
_poisson_means_test_iv(k1, n1, k2, n2, diff, alternative)
|
| 259 |
+
|
| 260 |
+
# "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4)
|
| 261 |
+
lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2))
|
| 262 |
+
|
| 263 |
+
# "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this
|
| 264 |
+
# case the null hypothesis cannot be rejected ... [and] it is not necessary
|
| 265 |
+
# to compute the p-value". [1] page 26 below eq. (3.6).
|
| 266 |
+
if lmbd_hat2 <= 0:
|
| 267 |
+
return _stats_py.SignificanceResult(0, 1)
|
| 268 |
+
|
| 269 |
+
# The unbiased variance estimate [1] (3.2)
|
| 270 |
+
var = k1 / (n1 ** 2) + k2 / (n2 ** 2)
|
| 271 |
+
|
| 272 |
+
# The _observed_ pivot statistic from the input. It follows the
|
| 273 |
+
# unnumbered equation following equation (3.3) This is used later in
|
| 274 |
+
# comparison with the computed pivot statistics in an indicator function.
|
| 275 |
+
t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var)
|
| 276 |
+
|
| 277 |
+
# Equation (3.5) of [1] is lengthy, so it is broken into several parts,
|
| 278 |
+
# beginning here. Note that the probability mass function of poisson is
|
| 279 |
+
# exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted
|
| 280 |
+
# here as nlmbd_hat*. The strategy for evaluating the double summation in
|
| 281 |
+
# (3.5) is to create two arrays of the values of the two products inside
|
| 282 |
+
# the summation and then broadcast them together into a matrix, and then
|
| 283 |
+
# sum across the entire matrix.
|
| 284 |
+
|
| 285 |
+
# Compute constants (as seen in the first and second separated products in
|
| 286 |
+
# (3.5).). (This is the shape (\mu) parameter of the poisson distribution.)
|
| 287 |
+
nlmbd_hat1 = n1 * (lmbd_hat2 + diff)
|
| 288 |
+
nlmbd_hat2 = n2 * lmbd_hat2
|
| 289 |
+
|
| 290 |
+
# Determine summation bounds for tail ends of distribution rather than
|
| 291 |
+
# summing to infinity. `x1*` is for the outer sum and `x2*` is the inner
|
| 292 |
+
# sum.
|
| 293 |
+
x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1)
|
| 294 |
+
x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2)
|
| 295 |
+
|
| 296 |
+
# Construct arrays to function as the x_1 and x_2 counters on the summation
|
| 297 |
+
# in (3.5). `x1` is in columns and `x2` is in rows to allow for
|
| 298 |
+
# broadcasting.
|
| 299 |
+
x1 = np.arange(x1_lb, x1_ub + 1)
|
| 300 |
+
x2 = np.arange(x2_lb, x2_ub + 1)[:, None]
|
| 301 |
+
|
| 302 |
+
# These are the two products in equation (3.5) with `prob_x1` being the
|
| 303 |
+
# first (left side) and `prob_x2` being the second (right side). (To
|
| 304 |
+
# make as clear as possible: the 1st contains a "+ d" term, the 2nd does
|
| 305 |
+
# not.)
|
| 306 |
+
prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1)
|
| 307 |
+
prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2)
|
| 308 |
+
|
| 309 |
+
# compute constants for use in the "pivot statistic" per the
|
| 310 |
+
# unnumbered equation following (3.3).
|
| 311 |
+
lmbd_x1 = x1 / n1
|
| 312 |
+
lmbd_x2 = x2 / n2
|
| 313 |
+
lmbds_diff = lmbd_x1 - lmbd_x2 - diff
|
| 314 |
+
var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2
|
| 315 |
+
|
| 316 |
+
# This is the 'pivot statistic' for use in the indicator of the summation
|
| 317 |
+
# (left side of "I[.]").
|
| 318 |
+
with np.errstate(invalid='ignore', divide='ignore'):
|
| 319 |
+
t_x1x2 = lmbds_diff / np.sqrt(var_x1x2)
|
| 320 |
+
|
| 321 |
+
# `[indicator]` implements the "I[.] ... the indicator function" per
|
| 322 |
+
# the paragraph following equation (3.5).
|
| 323 |
+
if alternative == 'two-sided':
|
| 324 |
+
indicator = np.abs(t_x1x2) >= np.abs(t_k1k2)
|
| 325 |
+
elif alternative == 'less':
|
| 326 |
+
indicator = t_x1x2 <= t_k1k2
|
| 327 |
+
else:
|
| 328 |
+
indicator = t_x1x2 >= t_k1k2
|
| 329 |
+
|
| 330 |
+
# Multiply all combinations of the products together, exclude terms
|
| 331 |
+
# based on the `indicator` and then sum. (3.5)
|
| 332 |
+
pvalue = np.sum((prob_x1 * prob_x2)[indicator])
|
| 333 |
+
return _stats_py.SignificanceResult(t_k1k2, pvalue)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def _poisson_means_test_iv(k1, n1, k2, n2, diff, alternative):
|
| 337 |
+
# """check for valid types and values of input to `poisson_mean_test`."""
|
| 338 |
+
if k1 != int(k1) or k2 != int(k2):
|
| 339 |
+
raise TypeError('`k1` and `k2` must be integers.')
|
| 340 |
+
|
| 341 |
+
count_err = '`k1` and `k2` must be greater than or equal to 0.'
|
| 342 |
+
if k1 < 0 or k2 < 0:
|
| 343 |
+
raise ValueError(count_err)
|
| 344 |
+
|
| 345 |
+
if n1 <= 0 or n2 <= 0:
|
| 346 |
+
raise ValueError('`n1` and `n2` must be greater than 0.')
|
| 347 |
+
|
| 348 |
+
if diff < 0:
|
| 349 |
+
raise ValueError('diff must be greater than or equal to 0.')
|
| 350 |
+
|
| 351 |
+
alternatives = {'two-sided', 'less', 'greater'}
|
| 352 |
+
if alternative.lower() not in alternatives:
|
| 353 |
+
raise ValueError(f"Alternative must be one of '{alternatives}'.")
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
class CramerVonMisesResult:
|
| 357 |
+
def __init__(self, statistic, pvalue):
|
| 358 |
+
self.statistic = statistic
|
| 359 |
+
self.pvalue = pvalue
|
| 360 |
+
|
| 361 |
+
def __repr__(self):
|
| 362 |
+
return (f"{self.__class__.__name__}(statistic={self.statistic}, "
|
| 363 |
+
f"pvalue={self.pvalue})")
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def _psi1_mod(x):
|
| 367 |
+
"""
|
| 368 |
+
psi1 is defined in equation 1.10 in Csörgő, S. and Faraway, J. (1996).
|
| 369 |
+
This implements a modified version by excluding the term V(x) / 12
|
| 370 |
+
(here: _cdf_cvm_inf(x) / 12) to avoid evaluating _cdf_cvm_inf(x)
|
| 371 |
+
twice in _cdf_cvm.
|
| 372 |
+
|
| 373 |
+
Implementation based on MAPLE code of Julian Faraway and R code of the
|
| 374 |
+
function pCvM in the package goftest (v1.1.1), permission granted
|
| 375 |
+
by Adrian Baddeley. Main difference in the implementation: the code
|
| 376 |
+
here keeps adding terms of the series until the terms are small enough.
|
| 377 |
+
"""
|
| 378 |
+
|
| 379 |
+
def _ed2(y):
|
| 380 |
+
z = y**2 / 4
|
| 381 |
+
b = kv(1/4, z) + kv(3/4, z)
|
| 382 |
+
return np.exp(-z) * (y/2)**(3/2) * b / np.sqrt(np.pi)
|
| 383 |
+
|
| 384 |
+
def _ed3(y):
|
| 385 |
+
z = y**2 / 4
|
| 386 |
+
c = np.exp(-z) / np.sqrt(np.pi)
|
| 387 |
+
return c * (y/2)**(5/2) * (2*kv(1/4, z) + 3*kv(3/4, z) - kv(5/4, z))
|
| 388 |
+
|
| 389 |
+
def _Ak(k, x):
|
| 390 |
+
m = 2*k + 1
|
| 391 |
+
sx = 2 * np.sqrt(x)
|
| 392 |
+
y1 = x**(3/4)
|
| 393 |
+
y2 = x**(5/4)
|
| 394 |
+
|
| 395 |
+
e1 = m * gamma(k + 1/2) * _ed2((4 * k + 3)/sx) / (9 * y1)
|
| 396 |
+
e2 = gamma(k + 1/2) * _ed3((4 * k + 1) / sx) / (72 * y2)
|
| 397 |
+
e3 = 2 * (m + 2) * gamma(k + 3/2) * _ed3((4 * k + 5) / sx) / (12 * y2)
|
| 398 |
+
e4 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 1) / sx) / (144 * y1)
|
| 399 |
+
e5 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 5) / sx) / (144 * y1)
|
| 400 |
+
|
| 401 |
+
return e1 + e2 + e3 + e4 + e5
|
| 402 |
+
|
| 403 |
+
x = np.asarray(x)
|
| 404 |
+
tot = np.zeros_like(x, dtype='float')
|
| 405 |
+
cond = np.ones_like(x, dtype='bool')
|
| 406 |
+
k = 0
|
| 407 |
+
while np.any(cond):
|
| 408 |
+
z = -_Ak(k, x[cond]) / (np.pi * gamma(k + 1))
|
| 409 |
+
tot[cond] = tot[cond] + z
|
| 410 |
+
cond[cond] = np.abs(z) >= 1e-7
|
| 411 |
+
k += 1
|
| 412 |
+
|
| 413 |
+
return tot
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def _cdf_cvm_inf(x):
|
| 417 |
+
"""
|
| 418 |
+
Calculate the cdf of the Cramér-von Mises statistic (infinite sample size).
|
| 419 |
+
|
| 420 |
+
See equation 1.2 in Csörgő, S. and Faraway, J. (1996).
|
| 421 |
+
|
| 422 |
+
Implementation based on MAPLE code of Julian Faraway and R code of the
|
| 423 |
+
function pCvM in the package goftest (v1.1.1), permission granted
|
| 424 |
+
by Adrian Baddeley. Main difference in the implementation: the code
|
| 425 |
+
here keeps adding terms of the series until the terms are small enough.
|
| 426 |
+
|
| 427 |
+
The function is not expected to be accurate for large values of x, say
|
| 428 |
+
x > 4, when the cdf is very close to 1.
|
| 429 |
+
"""
|
| 430 |
+
x = np.asarray(x)
|
| 431 |
+
|
| 432 |
+
def term(x, k):
|
| 433 |
+
# this expression can be found in [2], second line of (1.3)
|
| 434 |
+
u = np.exp(gammaln(k + 0.5) - gammaln(k+1)) / (np.pi**1.5 * np.sqrt(x))
|
| 435 |
+
y = 4*k + 1
|
| 436 |
+
q = y**2 / (16*x)
|
| 437 |
+
b = kv(0.25, q)
|
| 438 |
+
return u * np.sqrt(y) * np.exp(-q) * b
|
| 439 |
+
|
| 440 |
+
tot = np.zeros_like(x, dtype='float')
|
| 441 |
+
cond = np.ones_like(x, dtype='bool')
|
| 442 |
+
k = 0
|
| 443 |
+
while np.any(cond):
|
| 444 |
+
z = term(x[cond], k)
|
| 445 |
+
tot[cond] = tot[cond] + z
|
| 446 |
+
cond[cond] = np.abs(z) >= 1e-7
|
| 447 |
+
k += 1
|
| 448 |
+
|
| 449 |
+
return tot
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def _cdf_cvm(x, n=None):
|
| 453 |
+
"""
|
| 454 |
+
Calculate the cdf of the Cramér-von Mises statistic for a finite sample
|
| 455 |
+
size n. If N is None, use the asymptotic cdf (n=inf).
|
| 456 |
+
|
| 457 |
+
See equation 1.8 in Csörgő, S. and Faraway, J. (1996) for finite samples,
|
| 458 |
+
1.2 for the asymptotic cdf.
|
| 459 |
+
|
| 460 |
+
The function is not expected to be accurate for large values of x, say
|
| 461 |
+
x > 2, when the cdf is very close to 1 and it might return values > 1
|
| 462 |
+
in that case, e.g. _cdf_cvm(2.0, 12) = 1.0000027556716846. Moreover, it
|
| 463 |
+
is not accurate for small values of n, especially close to the bounds of
|
| 464 |
+
the distribution's domain, [1/(12*n), n/3], where the value jumps to 0
|
| 465 |
+
and 1, respectively. These are limitations of the approximation by Csörgő
|
| 466 |
+
and Faraway (1996) implemented in this function.
|
| 467 |
+
"""
|
| 468 |
+
x = np.asarray(x)
|
| 469 |
+
if n is None:
|
| 470 |
+
y = _cdf_cvm_inf(x)
|
| 471 |
+
else:
|
| 472 |
+
# support of the test statistic is [12/n, n/3], see 1.1 in [2]
|
| 473 |
+
y = np.zeros_like(x, dtype='float')
|
| 474 |
+
sup = (1./(12*n) < x) & (x < n/3.)
|
| 475 |
+
# note: _psi1_mod does not include the term _cdf_cvm_inf(x) / 12
|
| 476 |
+
# therefore, we need to add it here
|
| 477 |
+
y[sup] = _cdf_cvm_inf(x[sup]) * (1 + 1./(12*n)) + _psi1_mod(x[sup]) / n
|
| 478 |
+
y[x >= n/3] = 1
|
| 479 |
+
|
| 480 |
+
if y.ndim == 0:
|
| 481 |
+
return y[()]
|
| 482 |
+
return y
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def _cvm_result_to_tuple(res):
|
| 486 |
+
return res.statistic, res.pvalue
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
@_axis_nan_policy_factory(CramerVonMisesResult, n_samples=1, too_small=1,
|
| 490 |
+
result_to_tuple=_cvm_result_to_tuple)
|
| 491 |
+
def cramervonmises(rvs, cdf, args=()):
|
| 492 |
+
"""Perform the one-sample Cramér-von Mises test for goodness of fit.
|
| 493 |
+
|
| 494 |
+
This performs a test of the goodness of fit of a cumulative distribution
|
| 495 |
+
function (cdf) :math:`F` compared to the empirical distribution function
|
| 496 |
+
:math:`F_n` of observed random variates :math:`X_1, ..., X_n` that are
|
| 497 |
+
assumed to be independent and identically distributed ([1]_).
|
| 498 |
+
The null hypothesis is that the :math:`X_i` have cumulative distribution
|
| 499 |
+
:math:`F`.
|
| 500 |
+
|
| 501 |
+
Parameters
|
| 502 |
+
----------
|
| 503 |
+
rvs : array_like
|
| 504 |
+
A 1-D array of observed values of the random variables :math:`X_i`.
|
| 505 |
+
The sample must contain at least two observations.
|
| 506 |
+
cdf : str or callable
|
| 507 |
+
The cumulative distribution function :math:`F` to test the
|
| 508 |
+
observations against. If a string, it should be the name of a
|
| 509 |
+
distribution in `scipy.stats`. If a callable, that callable is used
|
| 510 |
+
to calculate the cdf: ``cdf(x, *args) -> float``.
|
| 511 |
+
args : tuple, optional
|
| 512 |
+
Distribution parameters. These are assumed to be known; see Notes.
|
| 513 |
+
|
| 514 |
+
Returns
|
| 515 |
+
-------
|
| 516 |
+
res : object with attributes
|
| 517 |
+
statistic : float
|
| 518 |
+
Cramér-von Mises statistic.
|
| 519 |
+
pvalue : float
|
| 520 |
+
The p-value.
|
| 521 |
+
|
| 522 |
+
See Also
|
| 523 |
+
--------
|
| 524 |
+
kstest, cramervonmises_2samp
|
| 525 |
+
|
| 526 |
+
Notes
|
| 527 |
+
-----
|
| 528 |
+
.. versionadded:: 1.6.0
|
| 529 |
+
|
| 530 |
+
The p-value relies on the approximation given by equation 1.8 in [2]_.
|
| 531 |
+
It is important to keep in mind that the p-value is only accurate if
|
| 532 |
+
one tests a simple hypothesis, i.e. the parameters of the reference
|
| 533 |
+
distribution are known. If the parameters are estimated from the data
|
| 534 |
+
(composite hypothesis), the computed p-value is not reliable.
|
| 535 |
+
|
| 536 |
+
References
|
| 537 |
+
----------
|
| 538 |
+
.. [1] Cramér-von Mises criterion, Wikipedia,
|
| 539 |
+
https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion
|
| 540 |
+
.. [2] Csörgő, S. and Faraway, J. (1996). The Exact and Asymptotic
|
| 541 |
+
Distribution of Cramér-von Mises Statistics. Journal of the
|
| 542 |
+
Royal Statistical Society, pp. 221-234.
|
| 543 |
+
|
| 544 |
+
Examples
|
| 545 |
+
--------
|
| 546 |
+
|
| 547 |
+
Suppose we wish to test whether data generated by ``scipy.stats.norm.rvs``
|
| 548 |
+
were, in fact, drawn from the standard normal distribution. We choose a
|
| 549 |
+
significance level of ``alpha=0.05``.
|
| 550 |
+
|
| 551 |
+
>>> import numpy as np
|
| 552 |
+
>>> from scipy import stats
|
| 553 |
+
>>> rng = np.random.default_rng(165417232101553420507139617764912913465)
|
| 554 |
+
>>> x = stats.norm.rvs(size=500, random_state=rng)
|
| 555 |
+
>>> res = stats.cramervonmises(x, 'norm')
|
| 556 |
+
>>> res.statistic, res.pvalue
|
| 557 |
+
(0.1072085112565724, 0.5508482238203407)
|
| 558 |
+
|
| 559 |
+
The p-value exceeds our chosen significance level, so we do not
|
| 560 |
+
reject the null hypothesis that the observed sample is drawn from the
|
| 561 |
+
standard normal distribution.
|
| 562 |
+
|
| 563 |
+
Now suppose we wish to check whether the same samples shifted by 2.1 is
|
| 564 |
+
consistent with being drawn from a normal distribution with a mean of 2.
|
| 565 |
+
|
| 566 |
+
>>> y = x + 2.1
|
| 567 |
+
>>> res = stats.cramervonmises(y, 'norm', args=(2,))
|
| 568 |
+
>>> res.statistic, res.pvalue
|
| 569 |
+
(0.8364446265294695, 0.00596286797008283)
|
| 570 |
+
|
| 571 |
+
Here we have used the `args` keyword to specify the mean (``loc``)
|
| 572 |
+
of the normal distribution to test the data against. This is equivalent
|
| 573 |
+
to the following, in which we create a frozen normal distribution with
|
| 574 |
+
mean 2.1, then pass its ``cdf`` method as an argument.
|
| 575 |
+
|
| 576 |
+
>>> frozen_dist = stats.norm(loc=2)
|
| 577 |
+
>>> res = stats.cramervonmises(y, frozen_dist.cdf)
|
| 578 |
+
>>> res.statistic, res.pvalue
|
| 579 |
+
(0.8364446265294695, 0.00596286797008283)
|
| 580 |
+
|
| 581 |
+
In either case, we would reject the null hypothesis that the observed
|
| 582 |
+
sample is drawn from a normal distribution with a mean of 2 (and default
|
| 583 |
+
variance of 1) because the p-value is less than our chosen
|
| 584 |
+
significance level.
|
| 585 |
+
|
| 586 |
+
"""
|
| 587 |
+
if isinstance(cdf, str):
|
| 588 |
+
cdf = getattr(distributions, cdf).cdf
|
| 589 |
+
|
| 590 |
+
vals = np.sort(np.asarray(rvs))
|
| 591 |
+
|
| 592 |
+
if vals.size <= 1:
|
| 593 |
+
raise ValueError('The sample must contain at least two observations.')
|
| 594 |
+
|
| 595 |
+
n = len(vals)
|
| 596 |
+
cdfvals = cdf(vals, *args)
|
| 597 |
+
|
| 598 |
+
u = (2*np.arange(1, n+1) - 1)/(2*n)
|
| 599 |
+
w = 1/(12*n) + np.sum((u - cdfvals)**2)
|
| 600 |
+
|
| 601 |
+
# avoid small negative values that can occur due to the approximation
|
| 602 |
+
p = max(0, 1. - _cdf_cvm(w, n))
|
| 603 |
+
|
| 604 |
+
return CramerVonMisesResult(statistic=w, pvalue=p)
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
def _get_wilcoxon_distr(n):
|
| 608 |
+
"""
|
| 609 |
+
Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
|
| 610 |
+
of ranks of positive differences).
|
| 611 |
+
Returns an array with the probabilities of all the possible ranks
|
| 612 |
+
r = 0, ..., n*(n+1)/2
|
| 613 |
+
"""
|
| 614 |
+
c = np.ones(1, dtype=np.float64)
|
| 615 |
+
for k in range(1, n + 1):
|
| 616 |
+
prev_c = c
|
| 617 |
+
c = np.zeros(k * (k + 1) // 2 + 1, dtype=np.float64)
|
| 618 |
+
m = len(prev_c)
|
| 619 |
+
c[:m] = prev_c * 0.5
|
| 620 |
+
c[-m:] += prev_c * 0.5
|
| 621 |
+
return c
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
def _get_wilcoxon_distr2(n):
|
| 625 |
+
"""
|
| 626 |
+
Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
|
| 627 |
+
of ranks of positive differences).
|
| 628 |
+
Returns an array with the probabilities of all the possible ranks
|
| 629 |
+
r = 0, ..., n*(n+1)/2
|
| 630 |
+
This is a slower reference function
|
| 631 |
+
References
|
| 632 |
+
----------
|
| 633 |
+
.. [1] 1. Harris T, Hardin JW. Exact Wilcoxon Signed-Rank and Wilcoxon
|
| 634 |
+
Mann-Whitney Ranksum Tests. The Stata Journal. 2013;13(2):337-343.
|
| 635 |
+
"""
|
| 636 |
+
ai = np.arange(1, n+1)[:, None]
|
| 637 |
+
t = n*(n+1)/2
|
| 638 |
+
q = 2*t
|
| 639 |
+
j = np.arange(q)
|
| 640 |
+
theta = 2*np.pi/q*j
|
| 641 |
+
phi_sp = np.prod(np.cos(theta*ai), axis=0)
|
| 642 |
+
phi_s = np.exp(1j*theta*t) * phi_sp
|
| 643 |
+
p = np.real(ifft(phi_s))
|
| 644 |
+
res = np.zeros(int(t)+1)
|
| 645 |
+
res[:-1:] = p[::2]
|
| 646 |
+
res[0] /= 2
|
| 647 |
+
res[-1] = res[0]
|
| 648 |
+
return res
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
def _tau_b(A):
|
| 652 |
+
"""Calculate Kendall's tau-b and p-value from contingency table."""
|
| 653 |
+
# See [2] 2.2 and 4.2
|
| 654 |
+
|
| 655 |
+
# contingency table must be truly 2D
|
| 656 |
+
if A.shape[0] == 1 or A.shape[1] == 1:
|
| 657 |
+
return np.nan, np.nan
|
| 658 |
+
|
| 659 |
+
NA = A.sum()
|
| 660 |
+
PA = _P(A)
|
| 661 |
+
QA = _Q(A)
|
| 662 |
+
Sri2 = (A.sum(axis=1)**2).sum()
|
| 663 |
+
Scj2 = (A.sum(axis=0)**2).sum()
|
| 664 |
+
denominator = (NA**2 - Sri2)*(NA**2 - Scj2)
|
| 665 |
+
|
| 666 |
+
tau = (PA-QA)/(denominator)**0.5
|
| 667 |
+
|
| 668 |
+
numerator = 4*(_a_ij_Aij_Dij2(A) - (PA - QA)**2 / NA)
|
| 669 |
+
s02_tau_b = numerator/denominator
|
| 670 |
+
if s02_tau_b == 0: # Avoid divide by zero
|
| 671 |
+
return tau, 0
|
| 672 |
+
Z = tau/s02_tau_b**0.5
|
| 673 |
+
p = 2*norm.sf(abs(Z)) # 2-sided p-value
|
| 674 |
+
|
| 675 |
+
return tau, p
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
def _somers_d(A, alternative='two-sided'):
|
| 679 |
+
"""Calculate Somers' D and p-value from contingency table."""
|
| 680 |
+
# See [3] page 1740
|
| 681 |
+
|
| 682 |
+
# contingency table must be truly 2D
|
| 683 |
+
if A.shape[0] <= 1 or A.shape[1] <= 1:
|
| 684 |
+
return np.nan, np.nan
|
| 685 |
+
|
| 686 |
+
NA = A.sum()
|
| 687 |
+
NA2 = NA**2
|
| 688 |
+
PA = _P(A)
|
| 689 |
+
QA = _Q(A)
|
| 690 |
+
Sri2 = (A.sum(axis=1)**2).sum()
|
| 691 |
+
|
| 692 |
+
d = (PA - QA)/(NA2 - Sri2)
|
| 693 |
+
|
| 694 |
+
S = _a_ij_Aij_Dij2(A) - (PA-QA)**2/NA
|
| 695 |
+
|
| 696 |
+
with np.errstate(divide='ignore'):
|
| 697 |
+
Z = (PA - QA)/(4*(S))**0.5
|
| 698 |
+
|
| 699 |
+
norm = _stats_py._SimpleNormal()
|
| 700 |
+
p = _stats_py._get_pvalue(Z, norm, alternative, xp=np)
|
| 701 |
+
|
| 702 |
+
return d, p
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
@dataclass
|
| 706 |
+
class SomersDResult:
|
| 707 |
+
statistic: float
|
| 708 |
+
pvalue: float
|
| 709 |
+
table: np.ndarray
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
def somersd(x, y=None, alternative='two-sided'):
|
| 713 |
+
r"""Calculates Somers' D, an asymmetric measure of ordinal association.
|
| 714 |
+
|
| 715 |
+
Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the
|
| 716 |
+
correspondence between two rankings. Both statistics consider the
|
| 717 |
+
difference between the number of concordant and discordant pairs in two
|
| 718 |
+
rankings :math:`X` and :math:`Y`, and both are normalized such that values
|
| 719 |
+
close to 1 indicate strong agreement and values close to -1 indicate
|
| 720 |
+
strong disagreement. They differ in how they are normalized. To show the
|
| 721 |
+
relationship, Somers' :math:`D` can be defined in terms of Kendall's
|
| 722 |
+
:math:`\tau_a`:
|
| 723 |
+
|
| 724 |
+
.. math::
|
| 725 |
+
D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)}
|
| 726 |
+
|
| 727 |
+
Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the
|
| 728 |
+
second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of
|
| 729 |
+
:math:`n` rankings can also be viewed as an :math:`r \times s` contingency
|
| 730 |
+
table in which element :math:`i, j` is the number of rank pairs with rank
|
| 731 |
+
:math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`.
|
| 732 |
+
Accordingly, `somersd` also allows the input data to be supplied as a
|
| 733 |
+
single, 2D contingency table instead of as two separate, 1D rankings.
|
| 734 |
+
|
| 735 |
+
Note that the definition of Somers' :math:`D` is asymmetric: in general,
|
| 736 |
+
:math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers'
|
| 737 |
+
:math:`D(Y|X)`: the "row" variable :math:`X` is treated as an independent
|
| 738 |
+
variable, and the "column" variable :math:`Y` is dependent. For Somers'
|
| 739 |
+
:math:`D(X|Y)`, swap the input lists or transpose the input table.
|
| 740 |
+
|
| 741 |
+
Parameters
|
| 742 |
+
----------
|
| 743 |
+
x : array_like
|
| 744 |
+
1D array of rankings, treated as the (row) independent variable.
|
| 745 |
+
Alternatively, a 2D contingency table.
|
| 746 |
+
y : array_like, optional
|
| 747 |
+
If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the
|
| 748 |
+
same length, treated as the (column) dependent variable.
|
| 749 |
+
If `x` is 2D, `y` is ignored.
|
| 750 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
| 751 |
+
Defines the alternative hypothesis. Default is 'two-sided'.
|
| 752 |
+
The following options are available:
|
| 753 |
+
* 'two-sided': the rank correlation is nonzero
|
| 754 |
+
* 'less': the rank correlation is negative (less than zero)
|
| 755 |
+
* 'greater': the rank correlation is positive (greater than zero)
|
| 756 |
+
|
| 757 |
+
Returns
|
| 758 |
+
-------
|
| 759 |
+
res : SomersDResult
|
| 760 |
+
A `SomersDResult` object with the following fields:
|
| 761 |
+
|
| 762 |
+
statistic : float
|
| 763 |
+
The Somers' :math:`D` statistic.
|
| 764 |
+
pvalue : float
|
| 765 |
+
The p-value for a hypothesis test whose null
|
| 766 |
+
hypothesis is an absence of association, :math:`D=0`.
|
| 767 |
+
See notes for more information.
|
| 768 |
+
table : 2D array
|
| 769 |
+
The contingency table formed from rankings `x` and `y` (or the
|
| 770 |
+
provided contingency table, if `x` is a 2D array)
|
| 771 |
+
|
| 772 |
+
See Also
|
| 773 |
+
--------
|
| 774 |
+
kendalltau : Calculates Kendall's tau, another correlation measure.
|
| 775 |
+
weightedtau : Computes a weighted version of Kendall's tau.
|
| 776 |
+
spearmanr : Calculates a Spearman rank-order correlation coefficient.
|
| 777 |
+
pearsonr : Calculates a Pearson correlation coefficient.
|
| 778 |
+
|
| 779 |
+
Notes
|
| 780 |
+
-----
|
| 781 |
+
This function follows the contingency table approach of [2]_ and
|
| 782 |
+
[3]_. *p*-values are computed based on an asymptotic approximation of
|
| 783 |
+
the test statistic distribution under the null hypothesis :math:`D=0`.
|
| 784 |
+
|
| 785 |
+
Theoretically, hypothesis tests based on Kendall's :math:`tau` and Somers'
|
| 786 |
+
:math:`D` should be identical.
|
| 787 |
+
However, the *p*-values returned by `kendalltau` are based
|
| 788 |
+
on the null hypothesis of *independence* between :math:`X` and :math:`Y`
|
| 789 |
+
(i.e. the population from which pairs in :math:`X` and :math:`Y` are
|
| 790 |
+
sampled contains equal numbers of all possible pairs), which is more
|
| 791 |
+
specific than the null hypothesis :math:`D=0` used here. If the null
|
| 792 |
+
hypothesis of independence is desired, it is acceptable to use the
|
| 793 |
+
*p*-value returned by `kendalltau` with the statistic returned by
|
| 794 |
+
`somersd` and vice versa. For more information, see [2]_.
|
| 795 |
+
|
| 796 |
+
Contingency tables are formatted according to the convention used by
|
| 797 |
+
SAS and R: the first ranking supplied (``x``) is the "row" variable, and
|
| 798 |
+
the second ranking supplied (``y``) is the "column" variable. This is
|
| 799 |
+
opposite the convention of Somers' original paper [1]_.
|
| 800 |
+
|
| 801 |
+
References
|
| 802 |
+
----------
|
| 803 |
+
.. [1] Robert H. Somers, "A New Asymmetric Measure of Association for
|
| 804 |
+
Ordinal Variables", *American Sociological Review*, Vol. 27, No. 6,
|
| 805 |
+
pp. 799--811, 1962.
|
| 806 |
+
|
| 807 |
+
.. [2] Morton B. Brown and Jacqueline K. Benedetti, "Sampling Behavior of
|
| 808 |
+
Tests for Correlation in Two-Way Contingency Tables", *Journal of
|
| 809 |
+
the American Statistical Association* Vol. 72, No. 358, pp.
|
| 810 |
+
309--315, 1977.
|
| 811 |
+
|
| 812 |
+
.. [3] SAS Institute, Inc., "The FREQ Procedure (Book Excerpt)",
|
| 813 |
+
*SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009.
|
| 814 |
+
|
| 815 |
+
.. [4] Laerd Statistics, "Somers' d using SPSS Statistics", *SPSS
|
| 816 |
+
Statistics Tutorials and Statistical Guides*,
|
| 817 |
+
https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php,
|
| 818 |
+
Accessed July 31, 2020.
|
| 819 |
+
|
| 820 |
+
Examples
|
| 821 |
+
--------
|
| 822 |
+
We calculate Somers' D for the example given in [4]_, in which a hotel
|
| 823 |
+
chain owner seeks to determine the association between hotel room
|
| 824 |
+
cleanliness and customer satisfaction. The independent variable, hotel
|
| 825 |
+
room cleanliness, is ranked on an ordinal scale: "below average (1)",
|
| 826 |
+
"average (2)", or "above average (3)". The dependent variable, customer
|
| 827 |
+
satisfaction, is ranked on a second scale: "very dissatisfied (1)",
|
| 828 |
+
"moderately dissatisfied (2)", "neither dissatisfied nor satisfied (3)",
|
| 829 |
+
"moderately satisfied (4)", or "very satisfied (5)". 189 customers
|
| 830 |
+
respond to the survey, and the results are cast into a contingency table
|
| 831 |
+
with the hotel room cleanliness as the "row" variable and customer
|
| 832 |
+
satisfaction as the "column" variable.
|
| 833 |
+
|
| 834 |
+
+-----+-----+-----+-----+-----+-----+
|
| 835 |
+
| | (1) | (2) | (3) | (4) | (5) |
|
| 836 |
+
+=====+=====+=====+=====+=====+=====+
|
| 837 |
+
| (1) | 27 | 25 | 14 | 7 | 0 |
|
| 838 |
+
+-----+-----+-----+-----+-----+-----+
|
| 839 |
+
| (2) | 7 | 14 | 18 | 35 | 12 |
|
| 840 |
+
+-----+-----+-----+-----+-----+-----+
|
| 841 |
+
| (3) | 1 | 3 | 2 | 7 | 17 |
|
| 842 |
+
+-----+-----+-----+-----+-----+-----+
|
| 843 |
+
|
| 844 |
+
For example, 27 customers assigned their room a cleanliness ranking of
|
| 845 |
+
"below average (1)" and a corresponding satisfaction of "very
|
| 846 |
+
dissatisfied (1)". We perform the analysis as follows.
|
| 847 |
+
|
| 848 |
+
>>> from scipy.stats import somersd
|
| 849 |
+
>>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
|
| 850 |
+
>>> res = somersd(table)
|
| 851 |
+
>>> res.statistic
|
| 852 |
+
0.6032766111513396
|
| 853 |
+
>>> res.pvalue
|
| 854 |
+
1.0007091191074533e-27
|
| 855 |
+
|
| 856 |
+
The value of the Somers' D statistic is approximately 0.6, indicating
|
| 857 |
+
a positive correlation between room cleanliness and customer satisfaction
|
| 858 |
+
in the sample.
|
| 859 |
+
The *p*-value is very small, indicating a very small probability of
|
| 860 |
+
observing such an extreme value of the statistic under the null
|
| 861 |
+
hypothesis that the statistic of the entire population (from which
|
| 862 |
+
our sample of 189 customers is drawn) is zero. This supports the
|
| 863 |
+
alternative hypothesis that the true value of Somers' D for the population
|
| 864 |
+
is nonzero.
|
| 865 |
+
|
| 866 |
+
"""
|
| 867 |
+
x, y = np.array(x), np.array(y)
|
| 868 |
+
if x.ndim == 1:
|
| 869 |
+
if x.size != y.size:
|
| 870 |
+
raise ValueError("Rankings must be of equal length.")
|
| 871 |
+
table = scipy.stats.contingency.crosstab(x, y)[1]
|
| 872 |
+
elif x.ndim == 2:
|
| 873 |
+
if np.any(x < 0):
|
| 874 |
+
raise ValueError("All elements of the contingency table must be "
|
| 875 |
+
"non-negative.")
|
| 876 |
+
if np.any(x != x.astype(int)):
|
| 877 |
+
raise ValueError("All elements of the contingency table must be "
|
| 878 |
+
"integer.")
|
| 879 |
+
if x.nonzero()[0].size < 2:
|
| 880 |
+
raise ValueError("At least two elements of the contingency table "
|
| 881 |
+
"must be nonzero.")
|
| 882 |
+
table = x
|
| 883 |
+
else:
|
| 884 |
+
raise ValueError("x must be either a 1D or 2D array")
|
| 885 |
+
# The table type is converted to a float to avoid an integer overflow
|
| 886 |
+
d, p = _somers_d(table.astype(float), alternative)
|
| 887 |
+
|
| 888 |
+
# add alias for consistency with other correlation functions
|
| 889 |
+
res = SomersDResult(d, p, table)
|
| 890 |
+
res.correlation = d
|
| 891 |
+
return res
|
| 892 |
+
|
| 893 |
+
|
| 894 |
+
# This could be combined with `_all_partitions` in `_resampling.py`
|
| 895 |
+
def _all_partitions(nx, ny):
|
| 896 |
+
"""
|
| 897 |
+
Partition a set of indices into two fixed-length sets in all possible ways
|
| 898 |
+
|
| 899 |
+
Partition a set of indices 0 ... nx + ny - 1 into two sets of length nx and
|
| 900 |
+
ny in all possible ways (ignoring order of elements).
|
| 901 |
+
"""
|
| 902 |
+
z = np.arange(nx+ny)
|
| 903 |
+
for c in combinations(z, nx):
|
| 904 |
+
x = np.array(c)
|
| 905 |
+
mask = np.ones(nx+ny, bool)
|
| 906 |
+
mask[x] = False
|
| 907 |
+
y = z[mask]
|
| 908 |
+
yield x, y
|
| 909 |
+
|
| 910 |
+
|
| 911 |
+
def _compute_log_combinations(n):
|
| 912 |
+
"""Compute all log combination of C(n, k)."""
|
| 913 |
+
gammaln_arr = gammaln(np.arange(n + 1) + 1)
|
| 914 |
+
return gammaln(n + 1) - gammaln_arr - gammaln_arr[::-1]
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
@dataclass
|
| 918 |
+
class BarnardExactResult:
|
| 919 |
+
statistic: float
|
| 920 |
+
pvalue: float
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
def barnard_exact(table, alternative="two-sided", pooled=True, n=32):
|
| 924 |
+
r"""Perform a Barnard exact test on a 2x2 contingency table.
|
| 925 |
+
|
| 926 |
+
Parameters
|
| 927 |
+
----------
|
| 928 |
+
table : array_like of ints
|
| 929 |
+
A 2x2 contingency table. Elements should be non-negative integers.
|
| 930 |
+
|
| 931 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
| 932 |
+
Defines the null and alternative hypotheses. Default is 'two-sided'.
|
| 933 |
+
Please see explanations in the Notes section below.
|
| 934 |
+
|
| 935 |
+
pooled : bool, optional
|
| 936 |
+
Whether to compute score statistic with pooled variance (as in
|
| 937 |
+
Student's t-test, for example) or unpooled variance (as in Welch's
|
| 938 |
+
t-test). Default is ``True``.
|
| 939 |
+
|
| 940 |
+
n : int, optional
|
| 941 |
+
Number of sampling points used in the construction of the sampling
|
| 942 |
+
method. Note that this argument will automatically be converted to
|
| 943 |
+
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
|
| 944 |
+
select sample points. Default is 32. Must be positive. In most cases,
|
| 945 |
+
32 points is enough to reach good precision. More points comes at
|
| 946 |
+
performance cost.
|
| 947 |
+
|
| 948 |
+
Returns
|
| 949 |
+
-------
|
| 950 |
+
ber : BarnardExactResult
|
| 951 |
+
A result object with the following attributes.
|
| 952 |
+
|
| 953 |
+
statistic : float
|
| 954 |
+
The Wald statistic with pooled or unpooled variance, depending
|
| 955 |
+
on the user choice of `pooled`.
|
| 956 |
+
|
| 957 |
+
pvalue : float
|
| 958 |
+
P-value, the probability of obtaining a distribution at least as
|
| 959 |
+
extreme as the one that was actually observed, assuming that the
|
| 960 |
+
null hypothesis is true.
|
| 961 |
+
|
| 962 |
+
See Also
|
| 963 |
+
--------
|
| 964 |
+
chi2_contingency : Chi-square test of independence of variables in a
|
| 965 |
+
contingency table.
|
| 966 |
+
fisher_exact : Fisher exact test on a 2x2 contingency table.
|
| 967 |
+
boschloo_exact : Boschloo's exact test on a 2x2 contingency table,
|
| 968 |
+
which is an uniformly more powerful alternative to Fisher's exact test.
|
| 969 |
+
|
| 970 |
+
Notes
|
| 971 |
+
-----
|
| 972 |
+
Barnard's test is an exact test used in the analysis of contingency
|
| 973 |
+
tables. It examines the association of two categorical variables, and
|
| 974 |
+
is a more powerful alternative than Fisher's exact test
|
| 975 |
+
for 2x2 contingency tables.
|
| 976 |
+
|
| 977 |
+
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
|
| 978 |
+
where each column stores the binomial experiment, as in the example
|
| 979 |
+
below. Let's also define :math:`p_1, p_2` the theoretical binomial
|
| 980 |
+
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
|
| 981 |
+
Barnard exact test, we can assert three different null hypotheses :
|
| 982 |
+
|
| 983 |
+
- :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
|
| 984 |
+
with `alternative` = "less"
|
| 985 |
+
|
| 986 |
+
- :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
|
| 987 |
+
with `alternative` = "greater"
|
| 988 |
+
|
| 989 |
+
- :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
|
| 990 |
+
with `alternative` = "two-sided" (default one)
|
| 991 |
+
|
| 992 |
+
In order to compute Barnard's exact test, we are using the Wald
|
| 993 |
+
statistic [3]_ with pooled or unpooled variance.
|
| 994 |
+
Under the default assumption that both variances are equal
|
| 995 |
+
(``pooled = True``), the statistic is computed as:
|
| 996 |
+
|
| 997 |
+
.. math::
|
| 998 |
+
|
| 999 |
+
T(X) = \frac{
|
| 1000 |
+
\hat{p}_1 - \hat{p}_2
|
| 1001 |
+
}{
|
| 1002 |
+
\sqrt{
|
| 1003 |
+
\hat{p}(1 - \hat{p})
|
| 1004 |
+
(\frac{1}{c_1} +
|
| 1005 |
+
\frac{1}{c_2})
|
| 1006 |
+
}
|
| 1007 |
+
}
|
| 1008 |
+
|
| 1009 |
+
with :math:`\hat{p}_1, \hat{p}_2` and :math:`\hat{p}` the estimator of
|
| 1010 |
+
:math:`p_1, p_2` and :math:`p`, the latter being the combined probability,
|
| 1011 |
+
given the assumption that :math:`p_1 = p_2`.
|
| 1012 |
+
|
| 1013 |
+
If this assumption is invalid (``pooled = False``), the statistic is:
|
| 1014 |
+
|
| 1015 |
+
.. math::
|
| 1016 |
+
|
| 1017 |
+
T(X) = \frac{
|
| 1018 |
+
\hat{p}_1 - \hat{p}_2
|
| 1019 |
+
}{
|
| 1020 |
+
\sqrt{
|
| 1021 |
+
\frac{\hat{p}_1 (1 - \hat{p}_1)}{c_1} +
|
| 1022 |
+
\frac{\hat{p}_2 (1 - \hat{p}_2)}{c_2}
|
| 1023 |
+
}
|
| 1024 |
+
}
|
| 1025 |
+
|
| 1026 |
+
The p-value is then computed as:
|
| 1027 |
+
|
| 1028 |
+
.. math::
|
| 1029 |
+
|
| 1030 |
+
\sum
|
| 1031 |
+
\binom{c_1}{x_{11}}
|
| 1032 |
+
\binom{c_2}{x_{12}}
|
| 1033 |
+
\pi^{x_{11} + x_{12}}
|
| 1034 |
+
(1 - \pi)^{t - x_{11} - x_{12}}
|
| 1035 |
+
|
| 1036 |
+
where the sum is over all 2x2 contingency tables :math:`X` such that:
|
| 1037 |
+
* :math:`T(X) \leq T(X_0)` when `alternative` = "less",
|
| 1038 |
+
* :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
|
| 1039 |
+
* :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
|
| 1040 |
+
Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
|
| 1041 |
+
and :math:`t` the total (sum of the 4 sample's element).
|
| 1042 |
+
|
| 1043 |
+
The returned p-value is the maximum p-value taken over the nuisance
|
| 1044 |
+
parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.
|
| 1045 |
+
|
| 1046 |
+
This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
|
| 1047 |
+
number of sample points.
|
| 1048 |
+
|
| 1049 |
+
References
|
| 1050 |
+
----------
|
| 1051 |
+
.. [1] Barnard, G. A. "Significance Tests for 2x2 Tables". *Biometrika*.
|
| 1052 |
+
34.1/2 (1947): 123-138. :doi:`dpgkg3`
|
| 1053 |
+
|
| 1054 |
+
.. [2] Mehta, Cyrus R., and Pralay Senchaudhuri. "Conditional versus
|
| 1055 |
+
unconditional exact tests for comparing two binomials."
|
| 1056 |
+
*Cytel Software Corporation* 675 (2003): 1-5.
|
| 1057 |
+
|
| 1058 |
+
.. [3] "Wald Test". *Wikipedia*. https://en.wikipedia.org/wiki/Wald_test
|
| 1059 |
+
|
| 1060 |
+
Examples
|
| 1061 |
+
--------
|
| 1062 |
+
An example use of Barnard's test is presented in [2]_.
|
| 1063 |
+
|
| 1064 |
+
Consider the following example of a vaccine efficacy study
|
| 1065 |
+
(Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
|
| 1066 |
+
inoculated with a recombinant DNA influenza vaccine and the 15 were
|
| 1067 |
+
inoculated with a placebo. Twelve of the 15 subjects in the placebo
|
| 1068 |
+
group (80%) eventually became infected with influenza whereas for the
|
| 1069 |
+
vaccine group, only 7 of the 15 subjects (47%) became infected. The
|
| 1070 |
+
data are tabulated as a 2 x 2 table::
|
| 1071 |
+
|
| 1072 |
+
Vaccine Placebo
|
| 1073 |
+
Yes 7 12
|
| 1074 |
+
No 8 3
|
| 1075 |
+
|
| 1076 |
+
When working with statistical hypothesis testing, we usually use a
|
| 1077 |
+
threshold probability or significance level upon which we decide
|
| 1078 |
+
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
|
| 1079 |
+
significance level of 5%.
|
| 1080 |
+
|
| 1081 |
+
Our alternative hypothesis is that the vaccine will lower the chance of
|
| 1082 |
+
becoming infected with the virus; that is, the probability :math:`p_1` of
|
| 1083 |
+
catching the virus with the vaccine will be *less than* the probability
|
| 1084 |
+
:math:`p_2` of catching the virus without the vaccine. Therefore, we call
|
| 1085 |
+
`barnard_exact` with the ``alternative="less"`` option:
|
| 1086 |
+
|
| 1087 |
+
>>> import scipy.stats as stats
|
| 1088 |
+
>>> res = stats.barnard_exact([[7, 12], [8, 3]], alternative="less")
|
| 1089 |
+
>>> res.statistic
|
| 1090 |
+
-1.894
|
| 1091 |
+
>>> res.pvalue
|
| 1092 |
+
0.03407
|
| 1093 |
+
|
| 1094 |
+
Under the null hypothesis that the vaccine will not lower the chance of
|
| 1095 |
+
becoming infected, the probability of obtaining test results at least as
|
| 1096 |
+
extreme as the observed data is approximately 3.4%. Since this p-value is
|
| 1097 |
+
less than our chosen significance level, we have evidence to reject
|
| 1098 |
+
:math:`H_0` in favor of the alternative.
|
| 1099 |
+
|
| 1100 |
+
Suppose we had used Fisher's exact test instead:
|
| 1101 |
+
|
| 1102 |
+
>>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
|
| 1103 |
+
>>> pvalue
|
| 1104 |
+
0.0640
|
| 1105 |
+
|
| 1106 |
+
With the same threshold significance of 5%, we would not have been able
|
| 1107 |
+
to reject the null hypothesis in favor of the alternative. As stated in
|
| 1108 |
+
[2]_, Barnard's test is uniformly more powerful than Fisher's exact test
|
| 1109 |
+
because Barnard's test does not condition on any margin. Fisher's test
|
| 1110 |
+
should only be used when both sets of marginals are fixed.
|
| 1111 |
+
|
| 1112 |
+
"""
|
| 1113 |
+
if n <= 0:
|
| 1114 |
+
raise ValueError(
|
| 1115 |
+
"Number of points `n` must be strictly positive, "
|
| 1116 |
+
f"found {n!r}"
|
| 1117 |
+
)
|
| 1118 |
+
|
| 1119 |
+
table = np.asarray(table, dtype=np.int64)
|
| 1120 |
+
|
| 1121 |
+
if not table.shape == (2, 2):
|
| 1122 |
+
raise ValueError("The input `table` must be of shape (2, 2).")
|
| 1123 |
+
|
| 1124 |
+
if np.any(table < 0):
|
| 1125 |
+
raise ValueError("All values in `table` must be nonnegative.")
|
| 1126 |
+
|
| 1127 |
+
if 0 in table.sum(axis=0):
|
| 1128 |
+
# If both values in column are zero, the p-value is 1 and
|
| 1129 |
+
# the score's statistic is NaN.
|
| 1130 |
+
return BarnardExactResult(np.nan, 1.0)
|
| 1131 |
+
|
| 1132 |
+
total_col_1, total_col_2 = table.sum(axis=0)
|
| 1133 |
+
|
| 1134 |
+
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(-1, 1)
|
| 1135 |
+
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(1, -1)
|
| 1136 |
+
|
| 1137 |
+
# We need to calculate the wald statistics for each combination of x1 and
|
| 1138 |
+
# x2.
|
| 1139 |
+
p1, p2 = x1 / total_col_1, x2 / total_col_2
|
| 1140 |
+
|
| 1141 |
+
if pooled:
|
| 1142 |
+
p = (x1 + x2) / (total_col_1 + total_col_2)
|
| 1143 |
+
variances = p * (1 - p) * (1 / total_col_1 + 1 / total_col_2)
|
| 1144 |
+
else:
|
| 1145 |
+
variances = p1 * (1 - p1) / total_col_1 + p2 * (1 - p2) / total_col_2
|
| 1146 |
+
|
| 1147 |
+
# To avoid warning when dividing by 0
|
| 1148 |
+
with np.errstate(divide="ignore", invalid="ignore"):
|
| 1149 |
+
wald_statistic = np.divide((p1 - p2), np.sqrt(variances))
|
| 1150 |
+
|
| 1151 |
+
wald_statistic[p1 == p2] = 0 # Removing NaN values
|
| 1152 |
+
|
| 1153 |
+
wald_stat_obs = wald_statistic[table[0, 0], table[0, 1]]
|
| 1154 |
+
|
| 1155 |
+
if alternative == "two-sided":
|
| 1156 |
+
index_arr = np.abs(wald_statistic) >= abs(wald_stat_obs)
|
| 1157 |
+
elif alternative == "less":
|
| 1158 |
+
index_arr = wald_statistic <= wald_stat_obs
|
| 1159 |
+
elif alternative == "greater":
|
| 1160 |
+
index_arr = wald_statistic >= wald_stat_obs
|
| 1161 |
+
else:
|
| 1162 |
+
msg = (
|
| 1163 |
+
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
|
| 1164 |
+
f" found {alternative!r}"
|
| 1165 |
+
)
|
| 1166 |
+
raise ValueError(msg)
|
| 1167 |
+
|
| 1168 |
+
x1_sum_x2 = x1 + x2
|
| 1169 |
+
|
| 1170 |
+
x1_log_comb = _compute_log_combinations(total_col_1)
|
| 1171 |
+
x2_log_comb = _compute_log_combinations(total_col_2)
|
| 1172 |
+
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
|
| 1173 |
+
|
| 1174 |
+
result = shgo(
|
| 1175 |
+
_get_binomial_log_p_value_with_nuisance_param,
|
| 1176 |
+
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
|
| 1177 |
+
bounds=((0, 1),),
|
| 1178 |
+
n=n,
|
| 1179 |
+
sampling_method="sobol",
|
| 1180 |
+
)
|
| 1181 |
+
|
| 1182 |
+
# result.fun is the negative log pvalue and therefore needs to be
|
| 1183 |
+
# changed before return
|
| 1184 |
+
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
|
| 1185 |
+
return BarnardExactResult(wald_stat_obs, p_value)
|
| 1186 |
+
|
| 1187 |
+
|
| 1188 |
+
@dataclass
|
| 1189 |
+
class BoschlooExactResult:
|
| 1190 |
+
statistic: float
|
| 1191 |
+
pvalue: float
|
| 1192 |
+
|
| 1193 |
+
|
| 1194 |
+
def boschloo_exact(table, alternative="two-sided", n=32):
|
| 1195 |
+
r"""Perform Boschloo's exact test on a 2x2 contingency table.
|
| 1196 |
+
|
| 1197 |
+
Parameters
|
| 1198 |
+
----------
|
| 1199 |
+
table : array_like of ints
|
| 1200 |
+
A 2x2 contingency table. Elements should be non-negative integers.
|
| 1201 |
+
|
| 1202 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
| 1203 |
+
Defines the null and alternative hypotheses. Default is 'two-sided'.
|
| 1204 |
+
Please see explanations in the Notes section below.
|
| 1205 |
+
|
| 1206 |
+
n : int, optional
|
| 1207 |
+
Number of sampling points used in the construction of the sampling
|
| 1208 |
+
method. Note that this argument will automatically be converted to
|
| 1209 |
+
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
|
| 1210 |
+
select sample points. Default is 32. Must be positive. In most cases,
|
| 1211 |
+
32 points is enough to reach good precision. More points comes at
|
| 1212 |
+
performance cost.
|
| 1213 |
+
|
| 1214 |
+
Returns
|
| 1215 |
+
-------
|
| 1216 |
+
ber : BoschlooExactResult
|
| 1217 |
+
A result object with the following attributes.
|
| 1218 |
+
|
| 1219 |
+
statistic : float
|
| 1220 |
+
The statistic used in Boschloo's test; that is, the p-value
|
| 1221 |
+
from Fisher's exact test.
|
| 1222 |
+
|
| 1223 |
+
pvalue : float
|
| 1224 |
+
P-value, the probability of obtaining a distribution at least as
|
| 1225 |
+
extreme as the one that was actually observed, assuming that the
|
| 1226 |
+
null hypothesis is true.
|
| 1227 |
+
|
| 1228 |
+
See Also
|
| 1229 |
+
--------
|
| 1230 |
+
chi2_contingency : Chi-square test of independence of variables in a
|
| 1231 |
+
contingency table.
|
| 1232 |
+
fisher_exact : Fisher exact test on a 2x2 contingency table.
|
| 1233 |
+
barnard_exact : Barnard's exact test, which is a more powerful alternative
|
| 1234 |
+
than Fisher's exact test for 2x2 contingency tables.
|
| 1235 |
+
|
| 1236 |
+
Notes
|
| 1237 |
+
-----
|
| 1238 |
+
Boschloo's test is an exact test used in the analysis of contingency
|
| 1239 |
+
tables. It examines the association of two categorical variables, and
|
| 1240 |
+
is a uniformly more powerful alternative to Fisher's exact test
|
| 1241 |
+
for 2x2 contingency tables.
|
| 1242 |
+
|
| 1243 |
+
Boschloo's exact test uses the p-value of Fisher's exact test as a
|
| 1244 |
+
statistic, and Boschloo's p-value is the probability under the null
|
| 1245 |
+
hypothesis of observing such an extreme value of this statistic.
|
| 1246 |
+
|
| 1247 |
+
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
|
| 1248 |
+
where each column stores the binomial experiment, as in the example
|
| 1249 |
+
below. Let's also define :math:`p_1, p_2` the theoretical binomial
|
| 1250 |
+
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
|
| 1251 |
+
Boschloo exact test, we can assert three different alternative hypotheses:
|
| 1252 |
+
|
| 1253 |
+
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 < p_2`,
|
| 1254 |
+
with `alternative` = "less"
|
| 1255 |
+
|
| 1256 |
+
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 > p_2`,
|
| 1257 |
+
with `alternative` = "greater"
|
| 1258 |
+
|
| 1259 |
+
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 \neq p_2`,
|
| 1260 |
+
with `alternative` = "two-sided" (default)
|
| 1261 |
+
|
| 1262 |
+
There are multiple conventions for computing a two-sided p-value when the
|
| 1263 |
+
null distribution is asymmetric. Here, we apply the convention that the
|
| 1264 |
+
p-value of a two-sided test is twice the minimum of the p-values of the
|
| 1265 |
+
one-sided tests (clipped to 1.0). Note that `fisher_exact` follows a
|
| 1266 |
+
different convention, so for a given `table`, the statistic reported by
|
| 1267 |
+
`boschloo_exact` may differ from the p-value reported by `fisher_exact`
|
| 1268 |
+
when ``alternative='two-sided'``.
|
| 1269 |
+
|
| 1270 |
+
.. versionadded:: 1.7.0
|
| 1271 |
+
|
| 1272 |
+
References
|
| 1273 |
+
----------
|
| 1274 |
+
.. [1] R.D. Boschloo. "Raised conditional level of significance for the
|
| 1275 |
+
2 x 2-table when testing the equality of two probabilities",
|
| 1276 |
+
Statistica Neerlandica, 24(1), 1970
|
| 1277 |
+
|
| 1278 |
+
.. [2] "Boschloo's test", Wikipedia,
|
| 1279 |
+
https://en.wikipedia.org/wiki/Boschloo%27s_test
|
| 1280 |
+
|
| 1281 |
+
.. [3] Lise M. Saari et al. "Employee attitudes and job satisfaction",
|
| 1282 |
+
Human Resource Management, 43(4), 395-407, 2004,
|
| 1283 |
+
:doi:`10.1002/hrm.20032`.
|
| 1284 |
+
|
| 1285 |
+
Examples
|
| 1286 |
+
--------
|
| 1287 |
+
In the following example, we consider the article "Employee
|
| 1288 |
+
attitudes and job satisfaction" [3]_
|
| 1289 |
+
which reports the results of a survey from 63 scientists and 117 college
|
| 1290 |
+
professors. Of the 63 scientists, 31 said they were very satisfied with
|
| 1291 |
+
their jobs, whereas 74 of the college professors were very satisfied
|
| 1292 |
+
with their work. Is this significant evidence that college
|
| 1293 |
+
professors are happier with their work than scientists?
|
| 1294 |
+
The following table summarizes the data mentioned above::
|
| 1295 |
+
|
| 1296 |
+
college professors scientists
|
| 1297 |
+
Very Satisfied 74 31
|
| 1298 |
+
Dissatisfied 43 32
|
| 1299 |
+
|
| 1300 |
+
When working with statistical hypothesis testing, we usually use a
|
| 1301 |
+
threshold probability or significance level upon which we decide
|
| 1302 |
+
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
|
| 1303 |
+
significance level of 5%.
|
| 1304 |
+
|
| 1305 |
+
Our alternative hypothesis is that college professors are truly more
|
| 1306 |
+
satisfied with their work than scientists. Therefore, we expect
|
| 1307 |
+
:math:`p_1` the proportion of very satisfied college professors to be
|
| 1308 |
+
greater than :math:`p_2`, the proportion of very satisfied scientists.
|
| 1309 |
+
We thus call `boschloo_exact` with the ``alternative="greater"`` option:
|
| 1310 |
+
|
| 1311 |
+
>>> import scipy.stats as stats
|
| 1312 |
+
>>> res = stats.boschloo_exact([[74, 31], [43, 32]], alternative="greater")
|
| 1313 |
+
>>> res.statistic
|
| 1314 |
+
0.0483
|
| 1315 |
+
>>> res.pvalue
|
| 1316 |
+
0.0355
|
| 1317 |
+
|
| 1318 |
+
Under the null hypothesis that scientists are happier in their work than
|
| 1319 |
+
college professors, the probability of obtaining test
|
| 1320 |
+
results at least as extreme as the observed data is approximately 3.55%.
|
| 1321 |
+
Since this p-value is less than our chosen significance level, we have
|
| 1322 |
+
evidence to reject :math:`H_0` in favor of the alternative hypothesis.
|
| 1323 |
+
|
| 1324 |
+
"""
|
| 1325 |
+
hypergeom = distributions.hypergeom
|
| 1326 |
+
|
| 1327 |
+
if n <= 0:
|
| 1328 |
+
raise ValueError(
|
| 1329 |
+
"Number of points `n` must be strictly positive,"
|
| 1330 |
+
f" found {n!r}"
|
| 1331 |
+
)
|
| 1332 |
+
|
| 1333 |
+
table = np.asarray(table, dtype=np.int64)
|
| 1334 |
+
|
| 1335 |
+
if not table.shape == (2, 2):
|
| 1336 |
+
raise ValueError("The input `table` must be of shape (2, 2).")
|
| 1337 |
+
|
| 1338 |
+
if np.any(table < 0):
|
| 1339 |
+
raise ValueError("All values in `table` must be nonnegative.")
|
| 1340 |
+
|
| 1341 |
+
if 0 in table.sum(axis=0):
|
| 1342 |
+
# If both values in column are zero, the p-value is 1 and
|
| 1343 |
+
# the score's statistic is NaN.
|
| 1344 |
+
return BoschlooExactResult(np.nan, np.nan)
|
| 1345 |
+
|
| 1346 |
+
total_col_1, total_col_2 = table.sum(axis=0)
|
| 1347 |
+
total = total_col_1 + total_col_2
|
| 1348 |
+
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1)
|
| 1349 |
+
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1)
|
| 1350 |
+
x1_sum_x2 = x1 + x2
|
| 1351 |
+
|
| 1352 |
+
if alternative == 'less':
|
| 1353 |
+
pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
|
| 1354 |
+
elif alternative == 'greater':
|
| 1355 |
+
# Same formula as the 'less' case, but with the second column.
|
| 1356 |
+
pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
|
| 1357 |
+
elif alternative == 'two-sided':
|
| 1358 |
+
boschloo_less = boschloo_exact(table, alternative="less", n=n)
|
| 1359 |
+
boschloo_greater = boschloo_exact(table, alternative="greater", n=n)
|
| 1360 |
+
|
| 1361 |
+
res = (
|
| 1362 |
+
boschloo_less if boschloo_less.pvalue < boschloo_greater.pvalue
|
| 1363 |
+
else boschloo_greater
|
| 1364 |
+
)
|
| 1365 |
+
|
| 1366 |
+
# Two-sided p-value is defined as twice the minimum of the one-sided
|
| 1367 |
+
# p-values
|
| 1368 |
+
pvalue = np.clip(2 * res.pvalue, a_min=0, a_max=1)
|
| 1369 |
+
return BoschlooExactResult(res.statistic, pvalue)
|
| 1370 |
+
else:
|
| 1371 |
+
msg = (
|
| 1372 |
+
f"`alternative` should be one of {'two-sided', 'less', 'greater'},"
|
| 1373 |
+
f" found {alternative!r}"
|
| 1374 |
+
)
|
| 1375 |
+
raise ValueError(msg)
|
| 1376 |
+
|
| 1377 |
+
fisher_stat = pvalues[table[0, 0], table[0, 1]]
|
| 1378 |
+
|
| 1379 |
+
# fisher_stat * (1+1e-13) guards us from small numerical error. It is
|
| 1380 |
+
# equivalent to np.isclose with relative tol of 1e-13 and absolute tol of 0
|
| 1381 |
+
# For more throughout explanations, see gh-14178
|
| 1382 |
+
index_arr = pvalues <= fisher_stat * (1+1e-13)
|
| 1383 |
+
|
| 1384 |
+
x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T
|
| 1385 |
+
x1_log_comb = _compute_log_combinations(total_col_1)
|
| 1386 |
+
x2_log_comb = _compute_log_combinations(total_col_2)
|
| 1387 |
+
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
|
| 1388 |
+
|
| 1389 |
+
result = shgo(
|
| 1390 |
+
_get_binomial_log_p_value_with_nuisance_param,
|
| 1391 |
+
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
|
| 1392 |
+
bounds=((0, 1),),
|
| 1393 |
+
n=n,
|
| 1394 |
+
sampling_method="sobol",
|
| 1395 |
+
)
|
| 1396 |
+
|
| 1397 |
+
# result.fun is the negative log pvalue and therefore needs to be
|
| 1398 |
+
# changed before return
|
| 1399 |
+
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
|
| 1400 |
+
return BoschlooExactResult(fisher_stat, p_value)
|
| 1401 |
+
|
| 1402 |
+
|
| 1403 |
+
def _get_binomial_log_p_value_with_nuisance_param(
|
| 1404 |
+
nuisance_param, x1_sum_x2, x1_sum_x2_log_comb, index_arr
|
| 1405 |
+
):
|
| 1406 |
+
r"""
|
| 1407 |
+
Compute the log pvalue in respect of a nuisance parameter considering
|
| 1408 |
+
a 2x2 sample space.
|
| 1409 |
+
|
| 1410 |
+
Parameters
|
| 1411 |
+
----------
|
| 1412 |
+
nuisance_param : float
|
| 1413 |
+
nuisance parameter used in the computation of the maximisation of
|
| 1414 |
+
the p-value. Must be between 0 and 1
|
| 1415 |
+
|
| 1416 |
+
x1_sum_x2 : ndarray
|
| 1417 |
+
Sum of x1 and x2 inside barnard_exact
|
| 1418 |
+
|
| 1419 |
+
x1_sum_x2_log_comb : ndarray
|
| 1420 |
+
sum of the log combination of x1 and x2
|
| 1421 |
+
|
| 1422 |
+
index_arr : ndarray of boolean
|
| 1423 |
+
|
| 1424 |
+
Returns
|
| 1425 |
+
-------
|
| 1426 |
+
p_value : float
|
| 1427 |
+
Return the maximum p-value considering every nuisance parameter
|
| 1428 |
+
between 0 and 1
|
| 1429 |
+
|
| 1430 |
+
Notes
|
| 1431 |
+
-----
|
| 1432 |
+
|
| 1433 |
+
Both Barnard's test and Boschloo's test iterate over a nuisance parameter
|
| 1434 |
+
:math:`\pi \in [0, 1]` to find the maximum p-value. To search this
|
| 1435 |
+
maxima, this function return the negative log pvalue with respect to the
|
| 1436 |
+
nuisance parameter passed in params. This negative log p-value is then
|
| 1437 |
+
used in `shgo` to find the minimum negative pvalue which is our maximum
|
| 1438 |
+
pvalue.
|
| 1439 |
+
|
| 1440 |
+
Also, to compute the different combination used in the
|
| 1441 |
+
p-values' computation formula, this function uses `gammaln` which is
|
| 1442 |
+
more tolerant for large value than `scipy.special.comb`. `gammaln` gives
|
| 1443 |
+
a log combination. For the little precision loss, performances are
|
| 1444 |
+
improved a lot.
|
| 1445 |
+
"""
|
| 1446 |
+
t1, t2 = x1_sum_x2.shape
|
| 1447 |
+
n = t1 + t2 - 2
|
| 1448 |
+
with np.errstate(divide="ignore", invalid="ignore"):
|
| 1449 |
+
log_nuisance = np.log(
|
| 1450 |
+
nuisance_param,
|
| 1451 |
+
out=np.zeros_like(nuisance_param),
|
| 1452 |
+
where=nuisance_param >= 0,
|
| 1453 |
+
)
|
| 1454 |
+
log_1_minus_nuisance = np.log(
|
| 1455 |
+
1 - nuisance_param,
|
| 1456 |
+
out=np.zeros_like(nuisance_param),
|
| 1457 |
+
where=1 - nuisance_param >= 0,
|
| 1458 |
+
)
|
| 1459 |
+
|
| 1460 |
+
nuisance_power_x1_x2 = log_nuisance * x1_sum_x2
|
| 1461 |
+
nuisance_power_x1_x2[(x1_sum_x2 == 0)[:, :]] = 0
|
| 1462 |
+
|
| 1463 |
+
nuisance_power_n_minus_x1_x2 = log_1_minus_nuisance * (n - x1_sum_x2)
|
| 1464 |
+
nuisance_power_n_minus_x1_x2[(x1_sum_x2 == n)[:, :]] = 0
|
| 1465 |
+
|
| 1466 |
+
tmp_log_values_arr = (
|
| 1467 |
+
x1_sum_x2_log_comb
|
| 1468 |
+
+ nuisance_power_x1_x2
|
| 1469 |
+
+ nuisance_power_n_minus_x1_x2
|
| 1470 |
+
)
|
| 1471 |
+
|
| 1472 |
+
tmp_values_from_index = tmp_log_values_arr[index_arr]
|
| 1473 |
+
|
| 1474 |
+
# To avoid dividing by zero in log function and getting inf value,
|
| 1475 |
+
# values are centered according to the max
|
| 1476 |
+
max_value = tmp_values_from_index.max()
|
| 1477 |
+
|
| 1478 |
+
# To have better result's precision, the log pvalue is taken here.
|
| 1479 |
+
# Indeed, pvalue is included inside [0, 1] interval. Passing the
|
| 1480 |
+
# pvalue to log makes the interval a lot bigger ([-inf, 0]), and thus
|
| 1481 |
+
# help us to achieve better precision
|
| 1482 |
+
with np.errstate(divide="ignore", invalid="ignore"):
|
| 1483 |
+
log_probs = np.exp(tmp_values_from_index - max_value).sum()
|
| 1484 |
+
log_pvalue = max_value + np.log(
|
| 1485 |
+
log_probs,
|
| 1486 |
+
out=np.full_like(log_probs, -np.inf),
|
| 1487 |
+
where=log_probs > 0,
|
| 1488 |
+
)
|
| 1489 |
+
|
| 1490 |
+
# Since shgo find the minima, minus log pvalue is returned
|
| 1491 |
+
return -log_pvalue
|
| 1492 |
+
|
| 1493 |
+
|
| 1494 |
+
def _pval_cvm_2samp_exact(s, m, n):
|
| 1495 |
+
"""
|
| 1496 |
+
Compute the exact p-value of the Cramer-von Mises two-sample test
|
| 1497 |
+
for a given value s of the test statistic.
|
| 1498 |
+
m and n are the sizes of the samples.
|
| 1499 |
+
|
| 1500 |
+
[1] Y. Xiao, A. Gordon, and A. Yakovlev, "A C++ Program for
|
| 1501 |
+
the Cramér-Von Mises Two-Sample Test", J. Stat. Soft.,
|
| 1502 |
+
vol. 17, no. 8, pp. 1-15, Dec. 2006.
|
| 1503 |
+
[2] T. W. Anderson "On the Distribution of the Two-Sample Cramer-von Mises
|
| 1504 |
+
Criterion," The Annals of Mathematical Statistics, Ann. Math. Statist.
|
| 1505 |
+
33(3), 1148-1159, (September, 1962)
|
| 1506 |
+
"""
|
| 1507 |
+
|
| 1508 |
+
# [1, p. 3]
|
| 1509 |
+
lcm = np.lcm(m, n)
|
| 1510 |
+
# [1, p. 4], below eq. 3
|
| 1511 |
+
a = lcm // m
|
| 1512 |
+
b = lcm // n
|
| 1513 |
+
# Combine Eq. 9 in [2] with Eq. 2 in [1] and solve for $\zeta$
|
| 1514 |
+
# Hint: `s` is $U$ in [2], and $T_2$ in [1] is $T$ in [2]
|
| 1515 |
+
mn = m * n
|
| 1516 |
+
zeta = lcm ** 2 * (m + n) * (6 * s - mn * (4 * mn - 1)) // (6 * mn ** 2)
|
| 1517 |
+
|
| 1518 |
+
# bound maximum value that may appear in `gs` (remember both rows!)
|
| 1519 |
+
zeta_bound = lcm**2 * (m + n) # bound elements in row 1
|
| 1520 |
+
combinations = comb(m + n, m) # sum of row 2
|
| 1521 |
+
max_gs = max(zeta_bound, combinations)
|
| 1522 |
+
dtype = np.min_scalar_type(max_gs)
|
| 1523 |
+
|
| 1524 |
+
# the frequency table of $g_{u, v}^+$ defined in [1, p. 6]
|
| 1525 |
+
gs = ([np.array([[0], [1]], dtype=dtype)]
|
| 1526 |
+
+ [np.empty((2, 0), dtype=dtype) for _ in range(m)])
|
| 1527 |
+
for u in range(n + 1):
|
| 1528 |
+
next_gs = []
|
| 1529 |
+
tmp = np.empty((2, 0), dtype=dtype)
|
| 1530 |
+
for v, g in enumerate(gs):
|
| 1531 |
+
# Calculate g recursively with eq. 11 in [1]. Even though it
|
| 1532 |
+
# doesn't look like it, this also does 12/13 (all of Algorithm 1).
|
| 1533 |
+
vi, i0, i1 = np.intersect1d(tmp[0], g[0], return_indices=True)
|
| 1534 |
+
tmp = np.concatenate([
|
| 1535 |
+
np.stack([vi, tmp[1, i0] + g[1, i1]]),
|
| 1536 |
+
np.delete(tmp, i0, 1),
|
| 1537 |
+
np.delete(g, i1, 1)
|
| 1538 |
+
], 1)
|
| 1539 |
+
res = (a * v - b * u) ** 2
|
| 1540 |
+
tmp[0] += res.astype(dtype)
|
| 1541 |
+
next_gs.append(tmp)
|
| 1542 |
+
gs = next_gs
|
| 1543 |
+
value, freq = gs[m]
|
| 1544 |
+
return np.float64(np.sum(freq[value >= zeta]) / combinations)
|
| 1545 |
+
|
| 1546 |
+
|
| 1547 |
+
@_axis_nan_policy_factory(CramerVonMisesResult, n_samples=2, too_small=1,
|
| 1548 |
+
result_to_tuple=_cvm_result_to_tuple)
|
| 1549 |
+
def cramervonmises_2samp(x, y, method='auto'):
|
| 1550 |
+
"""Perform the two-sample Cramér-von Mises test for goodness of fit.
|
| 1551 |
+
|
| 1552 |
+
This is the two-sample version of the Cramér-von Mises test ([1]_):
|
| 1553 |
+
for two independent samples :math:`X_1, ..., X_n` and
|
| 1554 |
+
:math:`Y_1, ..., Y_m`, the null hypothesis is that the samples
|
| 1555 |
+
come from the same (unspecified) continuous distribution.
|
| 1556 |
+
|
| 1557 |
+
Parameters
|
| 1558 |
+
----------
|
| 1559 |
+
x : array_like
|
| 1560 |
+
A 1-D array of observed values of the random variables :math:`X_i`.
|
| 1561 |
+
Must contain at least two observations.
|
| 1562 |
+
y : array_like
|
| 1563 |
+
A 1-D array of observed values of the random variables :math:`Y_i`.
|
| 1564 |
+
Must contain at least two observations.
|
| 1565 |
+
method : {'auto', 'asymptotic', 'exact'}, optional
|
| 1566 |
+
The method used to compute the p-value, see Notes for details.
|
| 1567 |
+
The default is 'auto'.
|
| 1568 |
+
|
| 1569 |
+
Returns
|
| 1570 |
+
-------
|
| 1571 |
+
res : object with attributes
|
| 1572 |
+
statistic : float
|
| 1573 |
+
Cramér-von Mises statistic.
|
| 1574 |
+
pvalue : float
|
| 1575 |
+
The p-value.
|
| 1576 |
+
|
| 1577 |
+
See Also
|
| 1578 |
+
--------
|
| 1579 |
+
cramervonmises, anderson_ksamp, epps_singleton_2samp, ks_2samp
|
| 1580 |
+
|
| 1581 |
+
Notes
|
| 1582 |
+
-----
|
| 1583 |
+
.. versionadded:: 1.7.0
|
| 1584 |
+
|
| 1585 |
+
The statistic is computed according to equation 9 in [2]_. The
|
| 1586 |
+
calculation of the p-value depends on the keyword `method`:
|
| 1587 |
+
|
| 1588 |
+
- ``asymptotic``: The p-value is approximated by using the limiting
|
| 1589 |
+
distribution of the test statistic.
|
| 1590 |
+
- ``exact``: The exact p-value is computed by enumerating all
|
| 1591 |
+
possible combinations of the test statistic, see [2]_.
|
| 1592 |
+
|
| 1593 |
+
If ``method='auto'``, the exact approach is used
|
| 1594 |
+
if both samples contain equal to or less than 20 observations,
|
| 1595 |
+
otherwise the asymptotic distribution is used.
|
| 1596 |
+
|
| 1597 |
+
If the underlying distribution is not continuous, the p-value is likely to
|
| 1598 |
+
be conservative (Section 6.2 in [3]_). When ranking the data to compute
|
| 1599 |
+
the test statistic, midranks are used if there are ties.
|
| 1600 |
+
|
| 1601 |
+
References
|
| 1602 |
+
----------
|
| 1603 |
+
.. [1] https://en.wikipedia.org/wiki/Cramer-von_Mises_criterion
|
| 1604 |
+
.. [2] Anderson, T.W. (1962). On the distribution of the two-sample
|
| 1605 |
+
Cramer-von-Mises criterion. The Annals of Mathematical
|
| 1606 |
+
Statistics, pp. 1148-1159.
|
| 1607 |
+
.. [3] Conover, W.J., Practical Nonparametric Statistics, 1971.
|
| 1608 |
+
|
| 1609 |
+
Examples
|
| 1610 |
+
--------
|
| 1611 |
+
|
| 1612 |
+
Suppose we wish to test whether two samples generated by
|
| 1613 |
+
``scipy.stats.norm.rvs`` have the same distribution. We choose a
|
| 1614 |
+
significance level of alpha=0.05.
|
| 1615 |
+
|
| 1616 |
+
>>> import numpy as np
|
| 1617 |
+
>>> from scipy import stats
|
| 1618 |
+
>>> rng = np.random.default_rng()
|
| 1619 |
+
>>> x = stats.norm.rvs(size=100, random_state=rng)
|
| 1620 |
+
>>> y = stats.norm.rvs(size=70, random_state=rng)
|
| 1621 |
+
>>> res = stats.cramervonmises_2samp(x, y)
|
| 1622 |
+
>>> res.statistic, res.pvalue
|
| 1623 |
+
(0.29376470588235293, 0.1412873014573014)
|
| 1624 |
+
|
| 1625 |
+
The p-value exceeds our chosen significance level, so we do not
|
| 1626 |
+
reject the null hypothesis that the observed samples are drawn from the
|
| 1627 |
+
same distribution.
|
| 1628 |
+
|
| 1629 |
+
For small sample sizes, one can compute the exact p-values:
|
| 1630 |
+
|
| 1631 |
+
>>> x = stats.norm.rvs(size=7, random_state=rng)
|
| 1632 |
+
>>> y = stats.t.rvs(df=2, size=6, random_state=rng)
|
| 1633 |
+
>>> res = stats.cramervonmises_2samp(x, y, method='exact')
|
| 1634 |
+
>>> res.statistic, res.pvalue
|
| 1635 |
+
(0.197802197802198, 0.31643356643356646)
|
| 1636 |
+
|
| 1637 |
+
The p-value based on the asymptotic distribution is a good approximation
|
| 1638 |
+
even though the sample size is small.
|
| 1639 |
+
|
| 1640 |
+
>>> res = stats.cramervonmises_2samp(x, y, method='asymptotic')
|
| 1641 |
+
>>> res.statistic, res.pvalue
|
| 1642 |
+
(0.197802197802198, 0.2966041181527128)
|
| 1643 |
+
|
| 1644 |
+
Independent of the method, one would not reject the null hypothesis at the
|
| 1645 |
+
chosen significance level in this example.
|
| 1646 |
+
|
| 1647 |
+
"""
|
| 1648 |
+
xa = np.sort(np.asarray(x))
|
| 1649 |
+
ya = np.sort(np.asarray(y))
|
| 1650 |
+
|
| 1651 |
+
if xa.size <= 1 or ya.size <= 1:
|
| 1652 |
+
raise ValueError('x and y must contain at least two observations.')
|
| 1653 |
+
if method not in ['auto', 'exact', 'asymptotic']:
|
| 1654 |
+
raise ValueError('method must be either auto, exact or asymptotic.')
|
| 1655 |
+
|
| 1656 |
+
nx = len(xa)
|
| 1657 |
+
ny = len(ya)
|
| 1658 |
+
|
| 1659 |
+
if method == 'auto':
|
| 1660 |
+
if max(nx, ny) > 20:
|
| 1661 |
+
method = 'asymptotic'
|
| 1662 |
+
else:
|
| 1663 |
+
method = 'exact'
|
| 1664 |
+
|
| 1665 |
+
# get ranks of x and y in the pooled sample
|
| 1666 |
+
z = np.concatenate([xa, ya])
|
| 1667 |
+
# in case of ties, use midrank (see [1])
|
| 1668 |
+
r = scipy.stats.rankdata(z, method='average')
|
| 1669 |
+
rx = r[:nx]
|
| 1670 |
+
ry = r[nx:]
|
| 1671 |
+
|
| 1672 |
+
# compute U (eq. 10 in [2])
|
| 1673 |
+
u = nx * np.sum((rx - np.arange(1, nx+1))**2)
|
| 1674 |
+
u += ny * np.sum((ry - np.arange(1, ny+1))**2)
|
| 1675 |
+
|
| 1676 |
+
# compute T (eq. 9 in [2])
|
| 1677 |
+
k, N = nx*ny, nx + ny
|
| 1678 |
+
t = u / (k*N) - (4*k - 1)/(6*N)
|
| 1679 |
+
|
| 1680 |
+
if method == 'exact':
|
| 1681 |
+
p = _pval_cvm_2samp_exact(u, nx, ny)
|
| 1682 |
+
else:
|
| 1683 |
+
# compute expected value and variance of T (eq. 11 and 14 in [2])
|
| 1684 |
+
et = (1 + 1/N)/6
|
| 1685 |
+
vt = (N+1) * (4*k*N - 3*(nx**2 + ny**2) - 2*k)
|
| 1686 |
+
vt = vt / (45 * N**2 * 4 * k)
|
| 1687 |
+
|
| 1688 |
+
# computed the normalized statistic (eq. 15 in [2])
|
| 1689 |
+
tn = 1/6 + (t - et) / np.sqrt(45 * vt)
|
| 1690 |
+
|
| 1691 |
+
# approximate distribution of tn with limiting distribution
|
| 1692 |
+
# of the one-sample test statistic
|
| 1693 |
+
# if tn < 0.003, the _cdf_cvm_inf(tn) < 1.28*1e-18, return 1.0 directly
|
| 1694 |
+
if tn < 0.003:
|
| 1695 |
+
p = 1.0
|
| 1696 |
+
else:
|
| 1697 |
+
p = max(0, 1. - _cdf_cvm_inf(tn))
|
| 1698 |
+
|
| 1699 |
+
return CramerVonMisesResult(statistic=t, pvalue=p)
|
| 1700 |
+
|
| 1701 |
+
|
| 1702 |
+
class TukeyHSDResult:
|
| 1703 |
+
"""Result of `scipy.stats.tukey_hsd`.
|
| 1704 |
+
|
| 1705 |
+
Attributes
|
| 1706 |
+
----------
|
| 1707 |
+
statistic : float ndarray
|
| 1708 |
+
The computed statistic of the test for each comparison. The element
|
| 1709 |
+
at index ``(i, j)`` is the statistic for the comparison between groups
|
| 1710 |
+
``i`` and ``j``.
|
| 1711 |
+
pvalue : float ndarray
|
| 1712 |
+
The associated p-value from the studentized range distribution. The
|
| 1713 |
+
element at index ``(i, j)`` is the p-value for the comparison
|
| 1714 |
+
between groups ``i`` and ``j``.
|
| 1715 |
+
|
| 1716 |
+
Notes
|
| 1717 |
+
-----
|
| 1718 |
+
The string representation of this object displays the most recently
|
| 1719 |
+
calculated confidence interval, and if none have been previously
|
| 1720 |
+
calculated, it will evaluate ``confidence_interval()``.
|
| 1721 |
+
|
| 1722 |
+
References
|
| 1723 |
+
----------
|
| 1724 |
+
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
|
| 1725 |
+
Method."
|
| 1726 |
+
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
|
| 1727 |
+
28 November 2020.
|
| 1728 |
+
"""
|
| 1729 |
+
|
| 1730 |
+
def __init__(self, statistic, pvalue, _nobs, _ntreatments, _stand_err):
|
| 1731 |
+
self.statistic = statistic
|
| 1732 |
+
self.pvalue = pvalue
|
| 1733 |
+
self._ntreatments = _ntreatments
|
| 1734 |
+
self._nobs = _nobs
|
| 1735 |
+
self._stand_err = _stand_err
|
| 1736 |
+
self._ci = None
|
| 1737 |
+
self._ci_cl = None
|
| 1738 |
+
|
| 1739 |
+
def __str__(self):
|
| 1740 |
+
# Note: `__str__` prints the confidence intervals from the most
|
| 1741 |
+
# recent call to `confidence_interval`. If it has not been called,
|
| 1742 |
+
# it will be called with the default CL of .95.
|
| 1743 |
+
if self._ci is None:
|
| 1744 |
+
self.confidence_interval(confidence_level=.95)
|
| 1745 |
+
s = ("Tukey's HSD Pairwise Group Comparisons"
|
| 1746 |
+
f" ({self._ci_cl*100:.1f}% Confidence Interval)\n")
|
| 1747 |
+
s += "Comparison Statistic p-value Lower CI Upper CI\n"
|
| 1748 |
+
for i in range(self.pvalue.shape[0]):
|
| 1749 |
+
for j in range(self.pvalue.shape[0]):
|
| 1750 |
+
if i != j:
|
| 1751 |
+
s += (f" ({i} - {j}) {self.statistic[i, j]:>10.3f}"
|
| 1752 |
+
f"{self.pvalue[i, j]:>10.3f}"
|
| 1753 |
+
f"{self._ci.low[i, j]:>10.3f}"
|
| 1754 |
+
f"{self._ci.high[i, j]:>10.3f}\n")
|
| 1755 |
+
return s
|
| 1756 |
+
|
| 1757 |
+
def confidence_interval(self, confidence_level=.95):
|
| 1758 |
+
"""Compute the confidence interval for the specified confidence level.
|
| 1759 |
+
|
| 1760 |
+
Parameters
|
| 1761 |
+
----------
|
| 1762 |
+
confidence_level : float, optional
|
| 1763 |
+
Confidence level for the computed confidence interval
|
| 1764 |
+
of the estimated proportion. Default is .95.
|
| 1765 |
+
|
| 1766 |
+
Returns
|
| 1767 |
+
-------
|
| 1768 |
+
ci : ``ConfidenceInterval`` object
|
| 1769 |
+
The object has attributes ``low`` and ``high`` that hold the
|
| 1770 |
+
lower and upper bounds of the confidence intervals for each
|
| 1771 |
+
comparison. The high and low values are accessible for each
|
| 1772 |
+
comparison at index ``(i, j)`` between groups ``i`` and ``j``.
|
| 1773 |
+
|
| 1774 |
+
References
|
| 1775 |
+
----------
|
| 1776 |
+
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1.
|
| 1777 |
+
Tukey's Method."
|
| 1778 |
+
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
|
| 1779 |
+
28 November 2020.
|
| 1780 |
+
|
| 1781 |
+
Examples
|
| 1782 |
+
--------
|
| 1783 |
+
>>> from scipy.stats import tukey_hsd
|
| 1784 |
+
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
|
| 1785 |
+
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
|
| 1786 |
+
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
|
| 1787 |
+
>>> result = tukey_hsd(group0, group1, group2)
|
| 1788 |
+
>>> ci = result.confidence_interval()
|
| 1789 |
+
>>> ci.low
|
| 1790 |
+
array([[-3.649159, -8.249159, -3.909159],
|
| 1791 |
+
[ 0.950841, -3.649159, 0.690841],
|
| 1792 |
+
[-3.389159, -7.989159, -3.649159]])
|
| 1793 |
+
>>> ci.high
|
| 1794 |
+
array([[ 3.649159, -0.950841, 3.389159],
|
| 1795 |
+
[ 8.249159, 3.649159, 7.989159],
|
| 1796 |
+
[ 3.909159, -0.690841, 3.649159]])
|
| 1797 |
+
"""
|
| 1798 |
+
# check to see if the supplied confidence level matches that of the
|
| 1799 |
+
# previously computed CI.
|
| 1800 |
+
if (self._ci is not None and self._ci_cl is not None and
|
| 1801 |
+
confidence_level == self._ci_cl):
|
| 1802 |
+
return self._ci
|
| 1803 |
+
|
| 1804 |
+
if not 0 < confidence_level < 1:
|
| 1805 |
+
raise ValueError("Confidence level must be between 0 and 1.")
|
| 1806 |
+
# determine the critical value of the studentized range using the
|
| 1807 |
+
# appropriate confidence level, number of treatments, and degrees
|
| 1808 |
+
# of freedom as determined by the number of data less the number of
|
| 1809 |
+
# treatments. ("Confidence limits for Tukey's method")[1]. Note that
|
| 1810 |
+
# in the cases of unequal sample sizes there will be a criterion for
|
| 1811 |
+
# each group comparison.
|
| 1812 |
+
params = (confidence_level, self._nobs, self._ntreatments - self._nobs)
|
| 1813 |
+
srd = distributions.studentized_range.ppf(*params)
|
| 1814 |
+
# also called maximum critical value, the Tukey criterion is the
|
| 1815 |
+
# studentized range critical value * the square root of mean square
|
| 1816 |
+
# error over the sample size.
|
| 1817 |
+
tukey_criterion = srd * self._stand_err
|
| 1818 |
+
# the confidence levels are determined by the
|
| 1819 |
+
# `mean_differences` +- `tukey_criterion`
|
| 1820 |
+
upper_conf = self.statistic + tukey_criterion
|
| 1821 |
+
lower_conf = self.statistic - tukey_criterion
|
| 1822 |
+
self._ci = ConfidenceInterval(low=lower_conf, high=upper_conf)
|
| 1823 |
+
self._ci_cl = confidence_level
|
| 1824 |
+
return self._ci
|
| 1825 |
+
|
| 1826 |
+
|
| 1827 |
+
def _tukey_hsd_iv(args):
|
| 1828 |
+
if (len(args)) < 2:
|
| 1829 |
+
raise ValueError("There must be more than 1 treatment.")
|
| 1830 |
+
args = [np.asarray(arg) for arg in args]
|
| 1831 |
+
for arg in args:
|
| 1832 |
+
if arg.ndim != 1:
|
| 1833 |
+
raise ValueError("Input samples must be one-dimensional.")
|
| 1834 |
+
if arg.size <= 1:
|
| 1835 |
+
raise ValueError("Input sample size must be greater than one.")
|
| 1836 |
+
if np.isinf(arg).any():
|
| 1837 |
+
raise ValueError("Input samples must be finite.")
|
| 1838 |
+
return args
|
| 1839 |
+
|
| 1840 |
+
|
| 1841 |
+
def tukey_hsd(*args):
|
| 1842 |
+
"""Perform Tukey's HSD test for equality of means over multiple treatments.
|
| 1843 |
+
|
| 1844 |
+
Tukey's honestly significant difference (HSD) test performs pairwise
|
| 1845 |
+
comparison of means for a set of samples. Whereas ANOVA (e.g. `f_oneway`)
|
| 1846 |
+
assesses whether the true means underlying each sample are identical,
|
| 1847 |
+
Tukey's HSD is a post hoc test used to compare the mean of each sample
|
| 1848 |
+
to the mean of each other sample.
|
| 1849 |
+
|
| 1850 |
+
The null hypothesis is that the distributions underlying the samples all
|
| 1851 |
+
have the same mean. The test statistic, which is computed for every
|
| 1852 |
+
possible pairing of samples, is simply the difference between the sample
|
| 1853 |
+
means. For each pair, the p-value is the probability under the null
|
| 1854 |
+
hypothesis (and other assumptions; see notes) of observing such an extreme
|
| 1855 |
+
value of the statistic, considering that many pairwise comparisons are
|
| 1856 |
+
being performed. Confidence intervals for the difference between each pair
|
| 1857 |
+
of means are also available.
|
| 1858 |
+
|
| 1859 |
+
Parameters
|
| 1860 |
+
----------
|
| 1861 |
+
sample1, sample2, ... : array_like
|
| 1862 |
+
The sample measurements for each group. There must be at least
|
| 1863 |
+
two arguments.
|
| 1864 |
+
|
| 1865 |
+
Returns
|
| 1866 |
+
-------
|
| 1867 |
+
result : `~scipy.stats._result_classes.TukeyHSDResult` instance
|
| 1868 |
+
The return value is an object with the following attributes:
|
| 1869 |
+
|
| 1870 |
+
statistic : float ndarray
|
| 1871 |
+
The computed statistic of the test for each comparison. The element
|
| 1872 |
+
at index ``(i, j)`` is the statistic for the comparison between
|
| 1873 |
+
groups ``i`` and ``j``.
|
| 1874 |
+
pvalue : float ndarray
|
| 1875 |
+
The computed p-value of the test for each comparison. The element
|
| 1876 |
+
at index ``(i, j)`` is the p-value for the comparison between
|
| 1877 |
+
groups ``i`` and ``j``.
|
| 1878 |
+
|
| 1879 |
+
The object has the following methods:
|
| 1880 |
+
|
| 1881 |
+
confidence_interval(confidence_level=0.95):
|
| 1882 |
+
Compute the confidence interval for the specified confidence level.
|
| 1883 |
+
|
| 1884 |
+
See Also
|
| 1885 |
+
--------
|
| 1886 |
+
dunnett : performs comparison of means against a control group.
|
| 1887 |
+
|
| 1888 |
+
Notes
|
| 1889 |
+
-----
|
| 1890 |
+
The use of this test relies on several assumptions.
|
| 1891 |
+
|
| 1892 |
+
1. The observations are independent within and among groups.
|
| 1893 |
+
2. The observations within each group are normally distributed.
|
| 1894 |
+
3. The distributions from which the samples are drawn have the same finite
|
| 1895 |
+
variance.
|
| 1896 |
+
|
| 1897 |
+
The original formulation of the test was for samples of equal size [6]_.
|
| 1898 |
+
In case of unequal sample sizes, the test uses the Tukey-Kramer method
|
| 1899 |
+
[4]_.
|
| 1900 |
+
|
| 1901 |
+
References
|
| 1902 |
+
----------
|
| 1903 |
+
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
|
| 1904 |
+
Method."
|
| 1905 |
+
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
|
| 1906 |
+
28 November 2020.
|
| 1907 |
+
.. [2] Abdi, Herve & Williams, Lynne. (2021). "Tukey's Honestly Significant
|
| 1908 |
+
Difference (HSD) Test."
|
| 1909 |
+
https://personal.utdallas.edu/~herve/abdi-HSD2010-pretty.pdf
|
| 1910 |
+
.. [3] "One-Way ANOVA Using SAS PROC ANOVA & PROC GLM." SAS
|
| 1911 |
+
Tutorials, 2007, www.stattutorials.com/SAS/TUTORIAL-PROC-GLM.htm.
|
| 1912 |
+
.. [4] Kramer, Clyde Young. "Extension of Multiple Range Tests to Group
|
| 1913 |
+
Means with Unequal Numbers of Replications." Biometrics, vol. 12,
|
| 1914 |
+
no. 3, 1956, pp. 307-310. JSTOR, www.jstor.org/stable/3001469.
|
| 1915 |
+
Accessed 25 May 2021.
|
| 1916 |
+
.. [5] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.3.3.
|
| 1917 |
+
The ANOVA table and tests of hypotheses about means"
|
| 1918 |
+
https://www.itl.nist.gov/div898/handbook/prc/section4/prc433.htm,
|
| 1919 |
+
2 June 2021.
|
| 1920 |
+
.. [6] Tukey, John W. "Comparing Individual Means in the Analysis of
|
| 1921 |
+
Variance." Biometrics, vol. 5, no. 2, 1949, pp. 99-114. JSTOR,
|
| 1922 |
+
www.jstor.org/stable/3001913. Accessed 14 June 2021.
|
| 1923 |
+
|
| 1924 |
+
|
| 1925 |
+
Examples
|
| 1926 |
+
--------
|
| 1927 |
+
Here are some data comparing the time to relief of three brands of
|
| 1928 |
+
headache medicine, reported in minutes. Data adapted from [3]_.
|
| 1929 |
+
|
| 1930 |
+
>>> import numpy as np
|
| 1931 |
+
>>> from scipy.stats import tukey_hsd
|
| 1932 |
+
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
|
| 1933 |
+
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
|
| 1934 |
+
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
|
| 1935 |
+
|
| 1936 |
+
We would like to see if the means between any of the groups are
|
| 1937 |
+
significantly different. First, visually examine a box and whisker plot.
|
| 1938 |
+
|
| 1939 |
+
>>> import matplotlib.pyplot as plt
|
| 1940 |
+
>>> fig, ax = plt.subplots(1, 1)
|
| 1941 |
+
>>> ax.boxplot([group0, group1, group2])
|
| 1942 |
+
>>> ax.set_xticklabels(["group0", "group1", "group2"]) # doctest: +SKIP
|
| 1943 |
+
>>> ax.set_ylabel("mean") # doctest: +SKIP
|
| 1944 |
+
>>> plt.show()
|
| 1945 |
+
|
| 1946 |
+
From the box and whisker plot, we can see overlap in the interquartile
|
| 1947 |
+
ranges group 1 to group 2 and group 3, but we can apply the ``tukey_hsd``
|
| 1948 |
+
test to determine if the difference between means is significant. We
|
| 1949 |
+
set a significance level of .05 to reject the null hypothesis.
|
| 1950 |
+
|
| 1951 |
+
>>> res = tukey_hsd(group0, group1, group2)
|
| 1952 |
+
>>> print(res)
|
| 1953 |
+
Tukey's HSD Pairwise Group Comparisons (95.0% Confidence Interval)
|
| 1954 |
+
Comparison Statistic p-value Lower CI Upper CI
|
| 1955 |
+
(0 - 1) -4.600 0.014 -8.249 -0.951
|
| 1956 |
+
(0 - 2) -0.260 0.980 -3.909 3.389
|
| 1957 |
+
(1 - 0) 4.600 0.014 0.951 8.249
|
| 1958 |
+
(1 - 2) 4.340 0.020 0.691 7.989
|
| 1959 |
+
(2 - 0) 0.260 0.980 -3.389 3.909
|
| 1960 |
+
(2 - 1) -4.340 0.020 -7.989 -0.691
|
| 1961 |
+
|
| 1962 |
+
The null hypothesis is that each group has the same mean. The p-value for
|
| 1963 |
+
comparisons between ``group0`` and ``group1`` as well as ``group1`` and
|
| 1964 |
+
``group2`` do not exceed .05, so we reject the null hypothesis that they
|
| 1965 |
+
have the same means. The p-value of the comparison between ``group0``
|
| 1966 |
+
and ``group2`` exceeds .05, so we accept the null hypothesis that there
|
| 1967 |
+
is not a significant difference between their means.
|
| 1968 |
+
|
| 1969 |
+
We can also compute the confidence interval associated with our chosen
|
| 1970 |
+
confidence level.
|
| 1971 |
+
|
| 1972 |
+
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
|
| 1973 |
+
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
|
| 1974 |
+
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
|
| 1975 |
+
>>> result = tukey_hsd(group0, group1, group2)
|
| 1976 |
+
>>> conf = res.confidence_interval(confidence_level=.99)
|
| 1977 |
+
>>> for ((i, j), l) in np.ndenumerate(conf.low):
|
| 1978 |
+
... # filter out self comparisons
|
| 1979 |
+
... if i != j:
|
| 1980 |
+
... h = conf.high[i,j]
|
| 1981 |
+
... print(f"({i} - {j}) {l:>6.3f} {h:>6.3f}")
|
| 1982 |
+
(0 - 1) -9.480 0.280
|
| 1983 |
+
(0 - 2) -5.140 4.620
|
| 1984 |
+
(1 - 0) -0.280 9.480
|
| 1985 |
+
(1 - 2) -0.540 9.220
|
| 1986 |
+
(2 - 0) -4.620 5.140
|
| 1987 |
+
(2 - 1) -9.220 0.540
|
| 1988 |
+
"""
|
| 1989 |
+
args = _tukey_hsd_iv(args)
|
| 1990 |
+
ntreatments = len(args)
|
| 1991 |
+
means = np.asarray([np.mean(arg) for arg in args])
|
| 1992 |
+
nsamples_treatments = np.asarray([a.size for a in args])
|
| 1993 |
+
nobs = np.sum(nsamples_treatments)
|
| 1994 |
+
|
| 1995 |
+
# determine mean square error [5]. Note that this is sometimes called
|
| 1996 |
+
# mean square error within.
|
| 1997 |
+
mse = (np.sum([np.var(arg, ddof=1) for arg in args] *
|
| 1998 |
+
(nsamples_treatments - 1)) / (nobs - ntreatments))
|
| 1999 |
+
|
| 2000 |
+
# The calculation of the standard error differs when treatments differ in
|
| 2001 |
+
# size. See ("Unequal sample sizes")[1].
|
| 2002 |
+
if np.unique(nsamples_treatments).size == 1:
|
| 2003 |
+
# all input groups are the same length, so only one value needs to be
|
| 2004 |
+
# calculated [1].
|
| 2005 |
+
normalize = 2 / nsamples_treatments[0]
|
| 2006 |
+
else:
|
| 2007 |
+
# to compare groups of differing sizes, we must compute a variance
|
| 2008 |
+
# value for each individual comparison. Use broadcasting to get the
|
| 2009 |
+
# resulting matrix. [3], verified against [4] (page 308).
|
| 2010 |
+
normalize = 1 / nsamples_treatments + 1 / nsamples_treatments[None].T
|
| 2011 |
+
|
| 2012 |
+
# the standard error is used in the computation of the tukey criterion and
|
| 2013 |
+
# finding the p-values.
|
| 2014 |
+
stand_err = np.sqrt(normalize * mse / 2)
|
| 2015 |
+
|
| 2016 |
+
# the mean difference is the test statistic.
|
| 2017 |
+
mean_differences = means[None].T - means
|
| 2018 |
+
|
| 2019 |
+
# Calculate the t-statistic to use within the survival function of the
|
| 2020 |
+
# studentized range to get the p-value.
|
| 2021 |
+
t_stat = np.abs(mean_differences) / stand_err
|
| 2022 |
+
|
| 2023 |
+
params = t_stat, ntreatments, nobs - ntreatments
|
| 2024 |
+
pvalues = distributions.studentized_range.sf(*params)
|
| 2025 |
+
|
| 2026 |
+
return TukeyHSDResult(mean_differences, pvalues, ntreatments,
|
| 2027 |
+
nobs, stand_err)
|
parrot/lib/python3.10/site-packages/scipy/stats/_kde.py
ADDED
|
@@ -0,0 +1,725 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#-------------------------------------------------------------------------------
|
| 2 |
+
#
|
| 3 |
+
# Define classes for (uni/multi)-variate kernel density estimation.
|
| 4 |
+
#
|
| 5 |
+
# Currently, only Gaussian kernels are implemented.
|
| 6 |
+
#
|
| 7 |
+
# Written by: Robert Kern
|
| 8 |
+
#
|
| 9 |
+
# Date: 2004-08-09
|
| 10 |
+
#
|
| 11 |
+
# Modified: 2005-02-10 by Robert Kern.
|
| 12 |
+
# Contributed to SciPy
|
| 13 |
+
# 2005-10-07 by Robert Kern.
|
| 14 |
+
# Some fixes to match the new scipy_core
|
| 15 |
+
#
|
| 16 |
+
# Copyright 2004-2005 by Enthought, Inc.
|
| 17 |
+
#
|
| 18 |
+
#-------------------------------------------------------------------------------
|
| 19 |
+
|
| 20 |
+
# Standard library imports.
|
| 21 |
+
import warnings
|
| 22 |
+
|
| 23 |
+
# SciPy imports.
|
| 24 |
+
from scipy import linalg, special
|
| 25 |
+
from scipy._lib._util import check_random_state
|
| 26 |
+
|
| 27 |
+
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, exp, pi,
|
| 28 |
+
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
|
| 29 |
+
ones, cov)
|
| 30 |
+
import numpy as np
|
| 31 |
+
|
| 32 |
+
# Local imports.
|
| 33 |
+
from . import _mvn
|
| 34 |
+
from ._stats import gaussian_kernel_estimate, gaussian_kernel_estimate_log
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
__all__ = ['gaussian_kde']
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class gaussian_kde:
|
| 41 |
+
"""Representation of a kernel-density estimate using Gaussian kernels.
|
| 42 |
+
|
| 43 |
+
Kernel density estimation is a way to estimate the probability density
|
| 44 |
+
function (PDF) of a random variable in a non-parametric way.
|
| 45 |
+
`gaussian_kde` works for both uni-variate and multi-variate data. It
|
| 46 |
+
includes automatic bandwidth determination. The estimation works best for
|
| 47 |
+
a unimodal distribution; bimodal or multi-modal distributions tend to be
|
| 48 |
+
oversmoothed.
|
| 49 |
+
|
| 50 |
+
Parameters
|
| 51 |
+
----------
|
| 52 |
+
dataset : array_like
|
| 53 |
+
Datapoints to estimate from. In case of univariate data this is a 1-D
|
| 54 |
+
array, otherwise a 2-D array with shape (# of dims, # of data).
|
| 55 |
+
bw_method : str, scalar or callable, optional
|
| 56 |
+
The method used to calculate the estimator bandwidth. This can be
|
| 57 |
+
'scott', 'silverman', a scalar constant or a callable. If a scalar,
|
| 58 |
+
this will be used directly as `kde.factor`. If a callable, it should
|
| 59 |
+
take a `gaussian_kde` instance as only parameter and return a scalar.
|
| 60 |
+
If None (default), 'scott' is used. See Notes for more details.
|
| 61 |
+
weights : array_like, optional
|
| 62 |
+
weights of datapoints. This must be the same shape as dataset.
|
| 63 |
+
If None (default), the samples are assumed to be equally weighted
|
| 64 |
+
|
| 65 |
+
Attributes
|
| 66 |
+
----------
|
| 67 |
+
dataset : ndarray
|
| 68 |
+
The dataset with which `gaussian_kde` was initialized.
|
| 69 |
+
d : int
|
| 70 |
+
Number of dimensions.
|
| 71 |
+
n : int
|
| 72 |
+
Number of datapoints.
|
| 73 |
+
neff : int
|
| 74 |
+
Effective number of datapoints.
|
| 75 |
+
|
| 76 |
+
.. versionadded:: 1.2.0
|
| 77 |
+
factor : float
|
| 78 |
+
The bandwidth factor, obtained from `kde.covariance_factor`. The square
|
| 79 |
+
of `kde.factor` multiplies the covariance matrix of the data in the kde
|
| 80 |
+
estimation.
|
| 81 |
+
covariance : ndarray
|
| 82 |
+
The covariance matrix of `dataset`, scaled by the calculated bandwidth
|
| 83 |
+
(`kde.factor`).
|
| 84 |
+
inv_cov : ndarray
|
| 85 |
+
The inverse of `covariance`.
|
| 86 |
+
|
| 87 |
+
Methods
|
| 88 |
+
-------
|
| 89 |
+
evaluate
|
| 90 |
+
__call__
|
| 91 |
+
integrate_gaussian
|
| 92 |
+
integrate_box_1d
|
| 93 |
+
integrate_box
|
| 94 |
+
integrate_kde
|
| 95 |
+
pdf
|
| 96 |
+
logpdf
|
| 97 |
+
resample
|
| 98 |
+
set_bandwidth
|
| 99 |
+
covariance_factor
|
| 100 |
+
|
| 101 |
+
Notes
|
| 102 |
+
-----
|
| 103 |
+
Bandwidth selection strongly influences the estimate obtained from the KDE
|
| 104 |
+
(much more so than the actual shape of the kernel). Bandwidth selection
|
| 105 |
+
can be done by a "rule of thumb", by cross-validation, by "plug-in
|
| 106 |
+
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
|
| 107 |
+
uses a rule of thumb, the default is Scott's Rule.
|
| 108 |
+
|
| 109 |
+
Scott's Rule [1]_, implemented as `scotts_factor`, is::
|
| 110 |
+
|
| 111 |
+
n**(-1./(d+4)),
|
| 112 |
+
|
| 113 |
+
with ``n`` the number of data points and ``d`` the number of dimensions.
|
| 114 |
+
In the case of unequally weighted points, `scotts_factor` becomes::
|
| 115 |
+
|
| 116 |
+
neff**(-1./(d+4)),
|
| 117 |
+
|
| 118 |
+
with ``neff`` the effective number of datapoints.
|
| 119 |
+
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
|
| 120 |
+
|
| 121 |
+
(n * (d + 2) / 4.)**(-1. / (d + 4)).
|
| 122 |
+
|
| 123 |
+
or in the case of unequally weighted points::
|
| 124 |
+
|
| 125 |
+
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
|
| 126 |
+
|
| 127 |
+
Good general descriptions of kernel density estimation can be found in [1]_
|
| 128 |
+
and [2]_, the mathematics for this multi-dimensional implementation can be
|
| 129 |
+
found in [1]_.
|
| 130 |
+
|
| 131 |
+
With a set of weighted samples, the effective number of datapoints ``neff``
|
| 132 |
+
is defined by::
|
| 133 |
+
|
| 134 |
+
neff = sum(weights)^2 / sum(weights^2)
|
| 135 |
+
|
| 136 |
+
as detailed in [5]_.
|
| 137 |
+
|
| 138 |
+
`gaussian_kde` does not currently support data that lies in a
|
| 139 |
+
lower-dimensional subspace of the space in which it is expressed. For such
|
| 140 |
+
data, consider performing principle component analysis / dimensionality
|
| 141 |
+
reduction and using `gaussian_kde` with the transformed data.
|
| 142 |
+
|
| 143 |
+
References
|
| 144 |
+
----------
|
| 145 |
+
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
|
| 146 |
+
Visualization", John Wiley & Sons, New York, Chicester, 1992.
|
| 147 |
+
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
|
| 148 |
+
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
|
| 149 |
+
Chapman and Hall, London, 1986.
|
| 150 |
+
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
|
| 151 |
+
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
|
| 152 |
+
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
|
| 153 |
+
conditional density estimation", Computational Statistics & Data
|
| 154 |
+
Analysis, Vol. 36, pp. 279-298, 2001.
|
| 155 |
+
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
|
| 156 |
+
Series A (General), 132, 272
|
| 157 |
+
|
| 158 |
+
Examples
|
| 159 |
+
--------
|
| 160 |
+
Generate some random two-dimensional data:
|
| 161 |
+
|
| 162 |
+
>>> import numpy as np
|
| 163 |
+
>>> from scipy import stats
|
| 164 |
+
>>> def measure(n):
|
| 165 |
+
... "Measurement model, return two coupled measurements."
|
| 166 |
+
... m1 = np.random.normal(size=n)
|
| 167 |
+
... m2 = np.random.normal(scale=0.5, size=n)
|
| 168 |
+
... return m1+m2, m1-m2
|
| 169 |
+
|
| 170 |
+
>>> m1, m2 = measure(2000)
|
| 171 |
+
>>> xmin = m1.min()
|
| 172 |
+
>>> xmax = m1.max()
|
| 173 |
+
>>> ymin = m2.min()
|
| 174 |
+
>>> ymax = m2.max()
|
| 175 |
+
|
| 176 |
+
Perform a kernel density estimate on the data:
|
| 177 |
+
|
| 178 |
+
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
|
| 179 |
+
>>> positions = np.vstack([X.ravel(), Y.ravel()])
|
| 180 |
+
>>> values = np.vstack([m1, m2])
|
| 181 |
+
>>> kernel = stats.gaussian_kde(values)
|
| 182 |
+
>>> Z = np.reshape(kernel(positions).T, X.shape)
|
| 183 |
+
|
| 184 |
+
Plot the results:
|
| 185 |
+
|
| 186 |
+
>>> import matplotlib.pyplot as plt
|
| 187 |
+
>>> fig, ax = plt.subplots()
|
| 188 |
+
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
|
| 189 |
+
... extent=[xmin, xmax, ymin, ymax])
|
| 190 |
+
>>> ax.plot(m1, m2, 'k.', markersize=2)
|
| 191 |
+
>>> ax.set_xlim([xmin, xmax])
|
| 192 |
+
>>> ax.set_ylim([ymin, ymax])
|
| 193 |
+
>>> plt.show()
|
| 194 |
+
|
| 195 |
+
"""
|
| 196 |
+
def __init__(self, dataset, bw_method=None, weights=None):
|
| 197 |
+
self.dataset = atleast_2d(asarray(dataset))
|
| 198 |
+
if not self.dataset.size > 1:
|
| 199 |
+
raise ValueError("`dataset` input should have multiple elements.")
|
| 200 |
+
|
| 201 |
+
self.d, self.n = self.dataset.shape
|
| 202 |
+
|
| 203 |
+
if weights is not None:
|
| 204 |
+
self._weights = atleast_1d(weights).astype(float)
|
| 205 |
+
self._weights /= sum(self._weights)
|
| 206 |
+
if self.weights.ndim != 1:
|
| 207 |
+
raise ValueError("`weights` input should be one-dimensional.")
|
| 208 |
+
if len(self._weights) != self.n:
|
| 209 |
+
raise ValueError("`weights` input should be of length n")
|
| 210 |
+
self._neff = 1/sum(self._weights**2)
|
| 211 |
+
|
| 212 |
+
# This can be converted to a warning once gh-10205 is resolved
|
| 213 |
+
if self.d > self.n:
|
| 214 |
+
msg = ("Number of dimensions is greater than number of samples. "
|
| 215 |
+
"This results in a singular data covariance matrix, which "
|
| 216 |
+
"cannot be treated using the algorithms implemented in "
|
| 217 |
+
"`gaussian_kde`. Note that `gaussian_kde` interprets each "
|
| 218 |
+
"*column* of `dataset` to be a point; consider transposing "
|
| 219 |
+
"the input to `dataset`.")
|
| 220 |
+
raise ValueError(msg)
|
| 221 |
+
|
| 222 |
+
try:
|
| 223 |
+
self.set_bandwidth(bw_method=bw_method)
|
| 224 |
+
except linalg.LinAlgError as e:
|
| 225 |
+
msg = ("The data appears to lie in a lower-dimensional subspace "
|
| 226 |
+
"of the space in which it is expressed. This has resulted "
|
| 227 |
+
"in a singular data covariance matrix, which cannot be "
|
| 228 |
+
"treated using the algorithms implemented in "
|
| 229 |
+
"`gaussian_kde`. Consider performing principle component "
|
| 230 |
+
"analysis / dimensionality reduction and using "
|
| 231 |
+
"`gaussian_kde` with the transformed data.")
|
| 232 |
+
raise linalg.LinAlgError(msg) from e
|
| 233 |
+
|
| 234 |
+
def evaluate(self, points):
|
| 235 |
+
"""Evaluate the estimated pdf on a set of points.
|
| 236 |
+
|
| 237 |
+
Parameters
|
| 238 |
+
----------
|
| 239 |
+
points : (# of dimensions, # of points)-array
|
| 240 |
+
Alternatively, a (# of dimensions,) vector can be passed in and
|
| 241 |
+
treated as a single point.
|
| 242 |
+
|
| 243 |
+
Returns
|
| 244 |
+
-------
|
| 245 |
+
values : (# of points,)-array
|
| 246 |
+
The values at each point.
|
| 247 |
+
|
| 248 |
+
Raises
|
| 249 |
+
------
|
| 250 |
+
ValueError : if the dimensionality of the input points is different than
|
| 251 |
+
the dimensionality of the KDE.
|
| 252 |
+
|
| 253 |
+
"""
|
| 254 |
+
points = atleast_2d(asarray(points))
|
| 255 |
+
|
| 256 |
+
d, m = points.shape
|
| 257 |
+
if d != self.d:
|
| 258 |
+
if d == 1 and m == self.d:
|
| 259 |
+
# points was passed in as a row vector
|
| 260 |
+
points = reshape(points, (self.d, 1))
|
| 261 |
+
m = 1
|
| 262 |
+
else:
|
| 263 |
+
msg = (f"points have dimension {d}, "
|
| 264 |
+
f"dataset has dimension {self.d}")
|
| 265 |
+
raise ValueError(msg)
|
| 266 |
+
|
| 267 |
+
output_dtype, spec = _get_output_dtype(self.covariance, points)
|
| 268 |
+
result = gaussian_kernel_estimate[spec](
|
| 269 |
+
self.dataset.T, self.weights[:, None],
|
| 270 |
+
points.T, self.cho_cov, output_dtype)
|
| 271 |
+
|
| 272 |
+
return result[:, 0]
|
| 273 |
+
|
| 274 |
+
__call__ = evaluate
|
| 275 |
+
|
| 276 |
+
def integrate_gaussian(self, mean, cov):
|
| 277 |
+
"""
|
| 278 |
+
Multiply estimated density by a multivariate Gaussian and integrate
|
| 279 |
+
over the whole space.
|
| 280 |
+
|
| 281 |
+
Parameters
|
| 282 |
+
----------
|
| 283 |
+
mean : aray_like
|
| 284 |
+
A 1-D array, specifying the mean of the Gaussian.
|
| 285 |
+
cov : array_like
|
| 286 |
+
A 2-D array, specifying the covariance matrix of the Gaussian.
|
| 287 |
+
|
| 288 |
+
Returns
|
| 289 |
+
-------
|
| 290 |
+
result : scalar
|
| 291 |
+
The value of the integral.
|
| 292 |
+
|
| 293 |
+
Raises
|
| 294 |
+
------
|
| 295 |
+
ValueError
|
| 296 |
+
If the mean or covariance of the input Gaussian differs from
|
| 297 |
+
the KDE's dimensionality.
|
| 298 |
+
|
| 299 |
+
"""
|
| 300 |
+
mean = atleast_1d(squeeze(mean))
|
| 301 |
+
cov = atleast_2d(cov)
|
| 302 |
+
|
| 303 |
+
if mean.shape != (self.d,):
|
| 304 |
+
raise ValueError("mean does not have dimension %s" % self.d)
|
| 305 |
+
if cov.shape != (self.d, self.d):
|
| 306 |
+
raise ValueError("covariance does not have dimension %s" % self.d)
|
| 307 |
+
|
| 308 |
+
# make mean a column vector
|
| 309 |
+
mean = mean[:, newaxis]
|
| 310 |
+
|
| 311 |
+
sum_cov = self.covariance + cov
|
| 312 |
+
|
| 313 |
+
# This will raise LinAlgError if the new cov matrix is not s.p.d
|
| 314 |
+
# cho_factor returns (ndarray, bool) where bool is a flag for whether
|
| 315 |
+
# or not ndarray is upper or lower triangular
|
| 316 |
+
sum_cov_chol = linalg.cho_factor(sum_cov)
|
| 317 |
+
|
| 318 |
+
diff = self.dataset - mean
|
| 319 |
+
tdiff = linalg.cho_solve(sum_cov_chol, diff)
|
| 320 |
+
|
| 321 |
+
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
|
| 322 |
+
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
|
| 323 |
+
|
| 324 |
+
energies = sum(diff * tdiff, axis=0) / 2.0
|
| 325 |
+
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
|
| 326 |
+
|
| 327 |
+
return result
|
| 328 |
+
|
| 329 |
+
def integrate_box_1d(self, low, high):
|
| 330 |
+
"""
|
| 331 |
+
Computes the integral of a 1D pdf between two bounds.
|
| 332 |
+
|
| 333 |
+
Parameters
|
| 334 |
+
----------
|
| 335 |
+
low : scalar
|
| 336 |
+
Lower bound of integration.
|
| 337 |
+
high : scalar
|
| 338 |
+
Upper bound of integration.
|
| 339 |
+
|
| 340 |
+
Returns
|
| 341 |
+
-------
|
| 342 |
+
value : scalar
|
| 343 |
+
The result of the integral.
|
| 344 |
+
|
| 345 |
+
Raises
|
| 346 |
+
------
|
| 347 |
+
ValueError
|
| 348 |
+
If the KDE is over more than one dimension.
|
| 349 |
+
|
| 350 |
+
"""
|
| 351 |
+
if self.d != 1:
|
| 352 |
+
raise ValueError("integrate_box_1d() only handles 1D pdfs")
|
| 353 |
+
|
| 354 |
+
stdev = ravel(sqrt(self.covariance))[0]
|
| 355 |
+
|
| 356 |
+
normalized_low = ravel((low - self.dataset) / stdev)
|
| 357 |
+
normalized_high = ravel((high - self.dataset) / stdev)
|
| 358 |
+
|
| 359 |
+
value = np.sum(self.weights*(
|
| 360 |
+
special.ndtr(normalized_high) -
|
| 361 |
+
special.ndtr(normalized_low)))
|
| 362 |
+
return value
|
| 363 |
+
|
| 364 |
+
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
|
| 365 |
+
"""Computes the integral of a pdf over a rectangular interval.
|
| 366 |
+
|
| 367 |
+
Parameters
|
| 368 |
+
----------
|
| 369 |
+
low_bounds : array_like
|
| 370 |
+
A 1-D array containing the lower bounds of integration.
|
| 371 |
+
high_bounds : array_like
|
| 372 |
+
A 1-D array containing the upper bounds of integration.
|
| 373 |
+
maxpts : int, optional
|
| 374 |
+
The maximum number of points to use for integration.
|
| 375 |
+
|
| 376 |
+
Returns
|
| 377 |
+
-------
|
| 378 |
+
value : scalar
|
| 379 |
+
The result of the integral.
|
| 380 |
+
|
| 381 |
+
"""
|
| 382 |
+
if maxpts is not None:
|
| 383 |
+
extra_kwds = {'maxpts': maxpts}
|
| 384 |
+
else:
|
| 385 |
+
extra_kwds = {}
|
| 386 |
+
|
| 387 |
+
value, inform = _mvn.mvnun_weighted(low_bounds, high_bounds,
|
| 388 |
+
self.dataset, self.weights,
|
| 389 |
+
self.covariance, **extra_kwds)
|
| 390 |
+
if inform:
|
| 391 |
+
msg = ('An integral in _mvn.mvnun requires more points than %s' %
|
| 392 |
+
(self.d * 1000))
|
| 393 |
+
warnings.warn(msg, stacklevel=2)
|
| 394 |
+
|
| 395 |
+
return value
|
| 396 |
+
|
| 397 |
+
def integrate_kde(self, other):
|
| 398 |
+
"""
|
| 399 |
+
Computes the integral of the product of this kernel density estimate
|
| 400 |
+
with another.
|
| 401 |
+
|
| 402 |
+
Parameters
|
| 403 |
+
----------
|
| 404 |
+
other : gaussian_kde instance
|
| 405 |
+
The other kde.
|
| 406 |
+
|
| 407 |
+
Returns
|
| 408 |
+
-------
|
| 409 |
+
value : scalar
|
| 410 |
+
The result of the integral.
|
| 411 |
+
|
| 412 |
+
Raises
|
| 413 |
+
------
|
| 414 |
+
ValueError
|
| 415 |
+
If the KDEs have different dimensionality.
|
| 416 |
+
|
| 417 |
+
"""
|
| 418 |
+
if other.d != self.d:
|
| 419 |
+
raise ValueError("KDEs are not the same dimensionality")
|
| 420 |
+
|
| 421 |
+
# we want to iterate over the smallest number of points
|
| 422 |
+
if other.n < self.n:
|
| 423 |
+
small = other
|
| 424 |
+
large = self
|
| 425 |
+
else:
|
| 426 |
+
small = self
|
| 427 |
+
large = other
|
| 428 |
+
|
| 429 |
+
sum_cov = small.covariance + large.covariance
|
| 430 |
+
sum_cov_chol = linalg.cho_factor(sum_cov)
|
| 431 |
+
result = 0.0
|
| 432 |
+
for i in range(small.n):
|
| 433 |
+
mean = small.dataset[:, i, newaxis]
|
| 434 |
+
diff = large.dataset - mean
|
| 435 |
+
tdiff = linalg.cho_solve(sum_cov_chol, diff)
|
| 436 |
+
|
| 437 |
+
energies = sum(diff * tdiff, axis=0) / 2.0
|
| 438 |
+
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
|
| 439 |
+
|
| 440 |
+
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
|
| 441 |
+
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
|
| 442 |
+
|
| 443 |
+
result /= norm_const
|
| 444 |
+
|
| 445 |
+
return result
|
| 446 |
+
|
| 447 |
+
def resample(self, size=None, seed=None):
|
| 448 |
+
"""Randomly sample a dataset from the estimated pdf.
|
| 449 |
+
|
| 450 |
+
Parameters
|
| 451 |
+
----------
|
| 452 |
+
size : int, optional
|
| 453 |
+
The number of samples to draw. If not provided, then the size is
|
| 454 |
+
the same as the effective number of samples in the underlying
|
| 455 |
+
dataset.
|
| 456 |
+
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
|
| 457 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 458 |
+
singleton is used.
|
| 459 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 460 |
+
seeded with `seed`.
|
| 461 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 462 |
+
that instance is used.
|
| 463 |
+
|
| 464 |
+
Returns
|
| 465 |
+
-------
|
| 466 |
+
resample : (self.d, `size`) ndarray
|
| 467 |
+
The sampled dataset.
|
| 468 |
+
|
| 469 |
+
""" # numpy/numpydoc#87 # noqa: E501
|
| 470 |
+
if size is None:
|
| 471 |
+
size = int(self.neff)
|
| 472 |
+
|
| 473 |
+
random_state = check_random_state(seed)
|
| 474 |
+
norm = transpose(random_state.multivariate_normal(
|
| 475 |
+
zeros((self.d,), float), self.covariance, size=size
|
| 476 |
+
))
|
| 477 |
+
indices = random_state.choice(self.n, size=size, p=self.weights)
|
| 478 |
+
means = self.dataset[:, indices]
|
| 479 |
+
|
| 480 |
+
return means + norm
|
| 481 |
+
|
| 482 |
+
def scotts_factor(self):
|
| 483 |
+
"""Compute Scott's factor.
|
| 484 |
+
|
| 485 |
+
Returns
|
| 486 |
+
-------
|
| 487 |
+
s : float
|
| 488 |
+
Scott's factor.
|
| 489 |
+
"""
|
| 490 |
+
return power(self.neff, -1./(self.d+4))
|
| 491 |
+
|
| 492 |
+
def silverman_factor(self):
|
| 493 |
+
"""Compute the Silverman factor.
|
| 494 |
+
|
| 495 |
+
Returns
|
| 496 |
+
-------
|
| 497 |
+
s : float
|
| 498 |
+
The silverman factor.
|
| 499 |
+
"""
|
| 500 |
+
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
|
| 501 |
+
|
| 502 |
+
# Default method to calculate bandwidth, can be overwritten by subclass
|
| 503 |
+
covariance_factor = scotts_factor
|
| 504 |
+
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
|
| 505 |
+
multiplies the data covariance matrix to obtain the kernel covariance
|
| 506 |
+
matrix. The default is `scotts_factor`. A subclass can overwrite this
|
| 507 |
+
method to provide a different method, or set it through a call to
|
| 508 |
+
`kde.set_bandwidth`."""
|
| 509 |
+
|
| 510 |
+
def set_bandwidth(self, bw_method=None):
|
| 511 |
+
"""Compute the estimator bandwidth with given method.
|
| 512 |
+
|
| 513 |
+
The new bandwidth calculated after a call to `set_bandwidth` is used
|
| 514 |
+
for subsequent evaluations of the estimated density.
|
| 515 |
+
|
| 516 |
+
Parameters
|
| 517 |
+
----------
|
| 518 |
+
bw_method : str, scalar or callable, optional
|
| 519 |
+
The method used to calculate the estimator bandwidth. This can be
|
| 520 |
+
'scott', 'silverman', a scalar constant or a callable. If a
|
| 521 |
+
scalar, this will be used directly as `kde.factor`. If a callable,
|
| 522 |
+
it should take a `gaussian_kde` instance as only parameter and
|
| 523 |
+
return a scalar. If None (default), nothing happens; the current
|
| 524 |
+
`kde.covariance_factor` method is kept.
|
| 525 |
+
|
| 526 |
+
Notes
|
| 527 |
+
-----
|
| 528 |
+
.. versionadded:: 0.11
|
| 529 |
+
|
| 530 |
+
Examples
|
| 531 |
+
--------
|
| 532 |
+
>>> import numpy as np
|
| 533 |
+
>>> import scipy.stats as stats
|
| 534 |
+
>>> x1 = np.array([-7, -5, 1, 4, 5.])
|
| 535 |
+
>>> kde = stats.gaussian_kde(x1)
|
| 536 |
+
>>> xs = np.linspace(-10, 10, num=50)
|
| 537 |
+
>>> y1 = kde(xs)
|
| 538 |
+
>>> kde.set_bandwidth(bw_method='silverman')
|
| 539 |
+
>>> y2 = kde(xs)
|
| 540 |
+
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
|
| 541 |
+
>>> y3 = kde(xs)
|
| 542 |
+
|
| 543 |
+
>>> import matplotlib.pyplot as plt
|
| 544 |
+
>>> fig, ax = plt.subplots()
|
| 545 |
+
>>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
|
| 546 |
+
... label='Data points (rescaled)')
|
| 547 |
+
>>> ax.plot(xs, y1, label='Scott (default)')
|
| 548 |
+
>>> ax.plot(xs, y2, label='Silverman')
|
| 549 |
+
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
|
| 550 |
+
>>> ax.legend()
|
| 551 |
+
>>> plt.show()
|
| 552 |
+
|
| 553 |
+
"""
|
| 554 |
+
if bw_method is None:
|
| 555 |
+
pass
|
| 556 |
+
elif bw_method == 'scott':
|
| 557 |
+
self.covariance_factor = self.scotts_factor
|
| 558 |
+
elif bw_method == 'silverman':
|
| 559 |
+
self.covariance_factor = self.silverman_factor
|
| 560 |
+
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
|
| 561 |
+
self._bw_method = 'use constant'
|
| 562 |
+
self.covariance_factor = lambda: bw_method
|
| 563 |
+
elif callable(bw_method):
|
| 564 |
+
self._bw_method = bw_method
|
| 565 |
+
self.covariance_factor = lambda: self._bw_method(self)
|
| 566 |
+
else:
|
| 567 |
+
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
|
| 568 |
+
"or a callable."
|
| 569 |
+
raise ValueError(msg)
|
| 570 |
+
|
| 571 |
+
self._compute_covariance()
|
| 572 |
+
|
| 573 |
+
def _compute_covariance(self):
|
| 574 |
+
"""Computes the covariance matrix for each Gaussian kernel using
|
| 575 |
+
covariance_factor().
|
| 576 |
+
"""
|
| 577 |
+
self.factor = self.covariance_factor()
|
| 578 |
+
# Cache covariance and Cholesky decomp of covariance
|
| 579 |
+
if not hasattr(self, '_data_cho_cov'):
|
| 580 |
+
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
|
| 581 |
+
bias=False,
|
| 582 |
+
aweights=self.weights))
|
| 583 |
+
self._data_cho_cov = linalg.cholesky(self._data_covariance,
|
| 584 |
+
lower=True)
|
| 585 |
+
|
| 586 |
+
self.covariance = self._data_covariance * self.factor**2
|
| 587 |
+
self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64)
|
| 588 |
+
self.log_det = 2*np.log(np.diag(self.cho_cov
|
| 589 |
+
* np.sqrt(2*pi))).sum()
|
| 590 |
+
|
| 591 |
+
@property
|
| 592 |
+
def inv_cov(self):
|
| 593 |
+
# Re-compute from scratch each time because I'm not sure how this is
|
| 594 |
+
# used in the wild. (Perhaps users change the `dataset`, since it's
|
| 595 |
+
# not a private attribute?) `_compute_covariance` used to recalculate
|
| 596 |
+
# all these, so we'll recalculate everything now that this is a
|
| 597 |
+
# a property.
|
| 598 |
+
self.factor = self.covariance_factor()
|
| 599 |
+
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
|
| 600 |
+
bias=False, aweights=self.weights))
|
| 601 |
+
return linalg.inv(self._data_covariance) / self.factor**2
|
| 602 |
+
|
| 603 |
+
def pdf(self, x):
|
| 604 |
+
"""
|
| 605 |
+
Evaluate the estimated pdf on a provided set of points.
|
| 606 |
+
|
| 607 |
+
Notes
|
| 608 |
+
-----
|
| 609 |
+
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
|
| 610 |
+
docstring for more details.
|
| 611 |
+
|
| 612 |
+
"""
|
| 613 |
+
return self.evaluate(x)
|
| 614 |
+
|
| 615 |
+
def logpdf(self, x):
|
| 616 |
+
"""
|
| 617 |
+
Evaluate the log of the estimated pdf on a provided set of points.
|
| 618 |
+
"""
|
| 619 |
+
points = atleast_2d(x)
|
| 620 |
+
|
| 621 |
+
d, m = points.shape
|
| 622 |
+
if d != self.d:
|
| 623 |
+
if d == 1 and m == self.d:
|
| 624 |
+
# points was passed in as a row vector
|
| 625 |
+
points = reshape(points, (self.d, 1))
|
| 626 |
+
m = 1
|
| 627 |
+
else:
|
| 628 |
+
msg = (f"points have dimension {d}, "
|
| 629 |
+
f"dataset has dimension {self.d}")
|
| 630 |
+
raise ValueError(msg)
|
| 631 |
+
|
| 632 |
+
output_dtype, spec = _get_output_dtype(self.covariance, points)
|
| 633 |
+
result = gaussian_kernel_estimate_log[spec](
|
| 634 |
+
self.dataset.T, self.weights[:, None],
|
| 635 |
+
points.T, self.cho_cov, output_dtype)
|
| 636 |
+
|
| 637 |
+
return result[:, 0]
|
| 638 |
+
|
| 639 |
+
def marginal(self, dimensions):
|
| 640 |
+
"""Return a marginal KDE distribution
|
| 641 |
+
|
| 642 |
+
Parameters
|
| 643 |
+
----------
|
| 644 |
+
dimensions : int or 1-d array_like
|
| 645 |
+
The dimensions of the multivariate distribution corresponding
|
| 646 |
+
with the marginal variables, that is, the indices of the dimensions
|
| 647 |
+
that are being retained. The other dimensions are marginalized out.
|
| 648 |
+
|
| 649 |
+
Returns
|
| 650 |
+
-------
|
| 651 |
+
marginal_kde : gaussian_kde
|
| 652 |
+
An object representing the marginal distribution.
|
| 653 |
+
|
| 654 |
+
Notes
|
| 655 |
+
-----
|
| 656 |
+
.. versionadded:: 1.10.0
|
| 657 |
+
|
| 658 |
+
"""
|
| 659 |
+
|
| 660 |
+
dims = np.atleast_1d(dimensions)
|
| 661 |
+
|
| 662 |
+
if not np.issubdtype(dims.dtype, np.integer):
|
| 663 |
+
msg = ("Elements of `dimensions` must be integers - the indices "
|
| 664 |
+
"of the marginal variables being retained.")
|
| 665 |
+
raise ValueError(msg)
|
| 666 |
+
|
| 667 |
+
n = len(self.dataset) # number of dimensions
|
| 668 |
+
original_dims = dims.copy()
|
| 669 |
+
|
| 670 |
+
dims[dims < 0] = n + dims[dims < 0]
|
| 671 |
+
|
| 672 |
+
if len(np.unique(dims)) != len(dims):
|
| 673 |
+
msg = ("All elements of `dimensions` must be unique.")
|
| 674 |
+
raise ValueError(msg)
|
| 675 |
+
|
| 676 |
+
i_invalid = (dims < 0) | (dims >= n)
|
| 677 |
+
if np.any(i_invalid):
|
| 678 |
+
msg = (f"Dimensions {original_dims[i_invalid]} are invalid "
|
| 679 |
+
f"for a distribution in {n} dimensions.")
|
| 680 |
+
raise ValueError(msg)
|
| 681 |
+
|
| 682 |
+
dataset = self.dataset[dims]
|
| 683 |
+
weights = self.weights
|
| 684 |
+
|
| 685 |
+
return gaussian_kde(dataset, bw_method=self.covariance_factor(),
|
| 686 |
+
weights=weights)
|
| 687 |
+
|
| 688 |
+
@property
|
| 689 |
+
def weights(self):
|
| 690 |
+
try:
|
| 691 |
+
return self._weights
|
| 692 |
+
except AttributeError:
|
| 693 |
+
self._weights = ones(self.n)/self.n
|
| 694 |
+
return self._weights
|
| 695 |
+
|
| 696 |
+
@property
|
| 697 |
+
def neff(self):
|
| 698 |
+
try:
|
| 699 |
+
return self._neff
|
| 700 |
+
except AttributeError:
|
| 701 |
+
self._neff = 1/sum(self.weights**2)
|
| 702 |
+
return self._neff
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
def _get_output_dtype(covariance, points):
|
| 706 |
+
"""
|
| 707 |
+
Calculates the output dtype and the "spec" (=C type name).
|
| 708 |
+
|
| 709 |
+
This was necessary in order to deal with the fused types in the Cython
|
| 710 |
+
routine `gaussian_kernel_estimate`. See gh-10824 for details.
|
| 711 |
+
"""
|
| 712 |
+
output_dtype = np.common_type(covariance, points)
|
| 713 |
+
itemsize = np.dtype(output_dtype).itemsize
|
| 714 |
+
if itemsize == 4:
|
| 715 |
+
spec = 'float'
|
| 716 |
+
elif itemsize == 8:
|
| 717 |
+
spec = 'double'
|
| 718 |
+
elif itemsize in (12, 16):
|
| 719 |
+
spec = 'long double'
|
| 720 |
+
else:
|
| 721 |
+
raise ValueError(
|
| 722 |
+
f"{output_dtype} has unexpected item size: {itemsize}"
|
| 723 |
+
)
|
| 724 |
+
|
| 725 |
+
return output_dtype, spec
|
parrot/lib/python3.10/site-packages/scipy/stats/_ksstats.py
ADDED
|
@@ -0,0 +1,600 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Compute the two-sided one-sample Kolmogorov-Smirnov Prob(Dn <= d) where:
|
| 2 |
+
# D_n = sup_x{|F_n(x) - F(x)|},
|
| 3 |
+
# F_n(x) is the empirical CDF for a sample of size n {x_i: i=1,...,n},
|
| 4 |
+
# F(x) is the CDF of a probability distribution.
|
| 5 |
+
#
|
| 6 |
+
# Exact methods:
|
| 7 |
+
# Prob(D_n >= d) can be computed via a matrix algorithm of Durbin[1]
|
| 8 |
+
# or a recursion algorithm due to Pomeranz[2].
|
| 9 |
+
# Marsaglia, Tsang & Wang[3] gave a computation-efficient way to perform
|
| 10 |
+
# the Durbin algorithm.
|
| 11 |
+
# D_n >= d <==> D_n+ >= d or D_n- >= d (the one-sided K-S statistics), hence
|
| 12 |
+
# Prob(D_n >= d) = 2*Prob(D_n+ >= d) - Prob(D_n+ >= d and D_n- >= d).
|
| 13 |
+
# For d > 0.5, the latter intersection probability is 0.
|
| 14 |
+
#
|
| 15 |
+
# Approximate methods:
|
| 16 |
+
# For d close to 0.5, ignoring that intersection term may still give a
|
| 17 |
+
# reasonable approximation.
|
| 18 |
+
# Li-Chien[4] and Korolyuk[5] gave an asymptotic formula extending
|
| 19 |
+
# Kolmogorov's initial asymptotic, suitable for large d. (See
|
| 20 |
+
# scipy.special.kolmogorov for that asymptotic)
|
| 21 |
+
# Pelz-Good[6] used the functional equation for Jacobi theta functions to
|
| 22 |
+
# transform the Li-Chien/Korolyuk formula produce a computational formula
|
| 23 |
+
# suitable for small d.
|
| 24 |
+
#
|
| 25 |
+
# Simard and L'Ecuyer[7] provided an algorithm to decide when to use each of
|
| 26 |
+
# the above approaches and it is that which is used here.
|
| 27 |
+
#
|
| 28 |
+
# Other approaches:
|
| 29 |
+
# Carvalho[8] optimizes Durbin's matrix algorithm for large values of d.
|
| 30 |
+
# Moscovich and Nadler[9] use FFTs to compute the convolutions.
|
| 31 |
+
|
| 32 |
+
# References:
|
| 33 |
+
# [1] Durbin J (1968).
|
| 34 |
+
# "The Probability that the Sample Distribution Function Lies Between Two
|
| 35 |
+
# Parallel Straight Lines."
|
| 36 |
+
# Annals of Mathematical Statistics, 39, 398-411.
|
| 37 |
+
# [2] Pomeranz J (1974).
|
| 38 |
+
# "Exact Cumulative Distribution of the Kolmogorov-Smirnov Statistic for
|
| 39 |
+
# Small Samples (Algorithm 487)."
|
| 40 |
+
# Communications of the ACM, 17(12), 703-704.
|
| 41 |
+
# [3] Marsaglia G, Tsang WW, Wang J (2003).
|
| 42 |
+
# "Evaluating Kolmogorov's Distribution."
|
| 43 |
+
# Journal of Statistical Software, 8(18), 1-4.
|
| 44 |
+
# [4] LI-CHIEN, C. (1956).
|
| 45 |
+
# "On the exact distribution of the statistics of A. N. Kolmogorov and
|
| 46 |
+
# their asymptotic expansion."
|
| 47 |
+
# Acta Matematica Sinica, 6, 55-81.
|
| 48 |
+
# [5] KOROLYUK, V. S. (1960).
|
| 49 |
+
# "Asymptotic analysis of the distribution of the maximum deviation in
|
| 50 |
+
# the Bernoulli scheme."
|
| 51 |
+
# Theor. Probability Appl., 4, 339-366.
|
| 52 |
+
# [6] Pelz W, Good IJ (1976).
|
| 53 |
+
# "Approximating the Lower Tail-areas of the Kolmogorov-Smirnov One-sample
|
| 54 |
+
# Statistic."
|
| 55 |
+
# Journal of the Royal Statistical Society, Series B, 38(2), 152-156.
|
| 56 |
+
# [7] Simard, R., L'Ecuyer, P. (2011)
|
| 57 |
+
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution",
|
| 58 |
+
# Journal of Statistical Software, Vol 39, 11, 1-18.
|
| 59 |
+
# [8] Carvalho, Luis (2015)
|
| 60 |
+
# "An Improved Evaluation of Kolmogorov's Distribution"
|
| 61 |
+
# Journal of Statistical Software, Code Snippets; Vol 65(3), 1-8.
|
| 62 |
+
# [9] Amit Moscovich, Boaz Nadler (2017)
|
| 63 |
+
# "Fast calculation of boundary crossing probabilities for Poisson
|
| 64 |
+
# processes",
|
| 65 |
+
# Statistics & Probability Letters, Vol 123, 177-182.
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
import numpy as np
|
| 69 |
+
import scipy.special
|
| 70 |
+
import scipy.special._ufuncs as scu
|
| 71 |
+
from scipy._lib._finite_differences import _derivative
|
| 72 |
+
|
| 73 |
+
_E128 = 128
|
| 74 |
+
_EP128 = np.ldexp(np.longdouble(1), _E128)
|
| 75 |
+
_EM128 = np.ldexp(np.longdouble(1), -_E128)
|
| 76 |
+
|
| 77 |
+
_SQRT2PI = np.sqrt(2 * np.pi)
|
| 78 |
+
_LOG_2PI = np.log(2 * np.pi)
|
| 79 |
+
_MIN_LOG = -708
|
| 80 |
+
_SQRT3 = np.sqrt(3)
|
| 81 |
+
_PI_SQUARED = np.pi ** 2
|
| 82 |
+
_PI_FOUR = np.pi ** 4
|
| 83 |
+
_PI_SIX = np.pi ** 6
|
| 84 |
+
|
| 85 |
+
# [Lifted from _loggamma.pxd.] If B_m are the Bernoulli numbers,
|
| 86 |
+
# then Stirling coeffs are B_{2j}/(2j)/(2j-1) for j=8,...1.
|
| 87 |
+
_STIRLING_COEFFS = [-2.955065359477124183e-2, 6.4102564102564102564e-3,
|
| 88 |
+
-1.9175269175269175269e-3, 8.4175084175084175084e-4,
|
| 89 |
+
-5.952380952380952381e-4, 7.9365079365079365079e-4,
|
| 90 |
+
-2.7777777777777777778e-3, 8.3333333333333333333e-2]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _log_nfactorial_div_n_pow_n(n):
|
| 94 |
+
# Computes n! / n**n
|
| 95 |
+
# = (n-1)! / n**(n-1)
|
| 96 |
+
# Uses Stirling's approximation, but removes n*log(n) up-front to
|
| 97 |
+
# avoid subtractive cancellation.
|
| 98 |
+
# = log(n)/2 - n + log(sqrt(2pi)) + sum B_{2j}/(2j)/(2j-1)/n**(2j-1)
|
| 99 |
+
rn = 1.0/n
|
| 100 |
+
return np.log(n)/2 - n + _LOG_2PI/2 + rn * np.polyval(_STIRLING_COEFFS, rn/n)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _clip_prob(p):
|
| 104 |
+
"""clips a probability to range 0<=p<=1."""
|
| 105 |
+
return np.clip(p, 0.0, 1.0)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _select_and_clip_prob(cdfprob, sfprob, cdf=True):
|
| 109 |
+
"""Selects either the CDF or SF, and then clips to range 0<=p<=1."""
|
| 110 |
+
p = np.where(cdf, cdfprob, sfprob)
|
| 111 |
+
return _clip_prob(p)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def _kolmogn_DMTW(n, d, cdf=True):
|
| 115 |
+
r"""Computes the Kolmogorov CDF: Pr(D_n <= d) using the MTW approach to
|
| 116 |
+
the Durbin matrix algorithm.
|
| 117 |
+
|
| 118 |
+
Durbin (1968); Marsaglia, Tsang, Wang (2003). [1], [3].
|
| 119 |
+
"""
|
| 120 |
+
# Write d = (k-h)/n, where k is positive integer and 0 <= h < 1
|
| 121 |
+
# Generate initial matrix H of size m*m where m=(2k-1)
|
| 122 |
+
# Compute k-th row of (n!/n^n) * H^n, scaling intermediate results.
|
| 123 |
+
# Requires memory O(m^2) and computation O(m^2 log(n)).
|
| 124 |
+
# Most suitable for small m.
|
| 125 |
+
|
| 126 |
+
if d >= 1.0:
|
| 127 |
+
return _select_and_clip_prob(1.0, 0.0, cdf)
|
| 128 |
+
nd = n * d
|
| 129 |
+
if nd <= 0.5:
|
| 130 |
+
return _select_and_clip_prob(0.0, 1.0, cdf)
|
| 131 |
+
k = int(np.ceil(nd))
|
| 132 |
+
h = k - nd
|
| 133 |
+
m = 2 * k - 1
|
| 134 |
+
|
| 135 |
+
H = np.zeros([m, m])
|
| 136 |
+
|
| 137 |
+
# Initialize: v is first column (and last row) of H
|
| 138 |
+
# v[j] = (1-h^(j+1)/(j+1)! (except for v[-1])
|
| 139 |
+
# w[j] = 1/(j)!
|
| 140 |
+
# q = k-th row of H (actually i!/n^i*H^i)
|
| 141 |
+
intm = np.arange(1, m + 1)
|
| 142 |
+
v = 1.0 - h ** intm
|
| 143 |
+
w = np.empty(m)
|
| 144 |
+
fac = 1.0
|
| 145 |
+
for j in intm:
|
| 146 |
+
w[j - 1] = fac
|
| 147 |
+
fac /= j # This might underflow. Isn't a problem.
|
| 148 |
+
v[j - 1] *= fac
|
| 149 |
+
tt = max(2 * h - 1.0, 0)**m - 2*h**m
|
| 150 |
+
v[-1] = (1.0 + tt) * fac
|
| 151 |
+
|
| 152 |
+
for i in range(1, m):
|
| 153 |
+
H[i - 1:, i] = w[:m - i + 1]
|
| 154 |
+
H[:, 0] = v
|
| 155 |
+
H[-1, :] = np.flip(v, axis=0)
|
| 156 |
+
|
| 157 |
+
Hpwr = np.eye(np.shape(H)[0]) # Holds intermediate powers of H
|
| 158 |
+
nn = n
|
| 159 |
+
expnt = 0 # Scaling of Hpwr
|
| 160 |
+
Hexpnt = 0 # Scaling of H
|
| 161 |
+
while nn > 0:
|
| 162 |
+
if nn % 2:
|
| 163 |
+
Hpwr = np.matmul(Hpwr, H)
|
| 164 |
+
expnt += Hexpnt
|
| 165 |
+
H = np.matmul(H, H)
|
| 166 |
+
Hexpnt *= 2
|
| 167 |
+
# Scale as needed.
|
| 168 |
+
if np.abs(H[k - 1, k - 1]) > _EP128:
|
| 169 |
+
H /= _EP128
|
| 170 |
+
Hexpnt += _E128
|
| 171 |
+
nn = nn // 2
|
| 172 |
+
|
| 173 |
+
p = Hpwr[k - 1, k - 1]
|
| 174 |
+
|
| 175 |
+
# Multiply by n!/n^n
|
| 176 |
+
for i in range(1, n + 1):
|
| 177 |
+
p = i * p / n
|
| 178 |
+
if np.abs(p) < _EM128:
|
| 179 |
+
p *= _EP128
|
| 180 |
+
expnt -= _E128
|
| 181 |
+
|
| 182 |
+
# unscale
|
| 183 |
+
if expnt != 0:
|
| 184 |
+
p = np.ldexp(p, expnt)
|
| 185 |
+
|
| 186 |
+
return _select_and_clip_prob(p, 1.0-p, cdf)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf):
|
| 190 |
+
"""Compute the endpoints of the interval for row i."""
|
| 191 |
+
if i == 0:
|
| 192 |
+
j1, j2 = -ll - ceilf - 1, ll + ceilf - 1
|
| 193 |
+
else:
|
| 194 |
+
# i + 1 = 2*ip1div2 + ip1mod2
|
| 195 |
+
ip1div2, ip1mod2 = divmod(i + 1, 2)
|
| 196 |
+
if ip1mod2 == 0: # i is odd
|
| 197 |
+
if ip1div2 == n + 1:
|
| 198 |
+
j1, j2 = n - ll - ceilf - 1, n + ll + ceilf - 1
|
| 199 |
+
else:
|
| 200 |
+
j1, j2 = ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1
|
| 201 |
+
else:
|
| 202 |
+
j1, j2 = ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1
|
| 203 |
+
|
| 204 |
+
return max(j1 + 2, 0), min(j2, n)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def _kolmogn_Pomeranz(n, x, cdf=True):
|
| 208 |
+
r"""Computes Pr(D_n <= d) using the Pomeranz recursion algorithm.
|
| 209 |
+
|
| 210 |
+
Pomeranz (1974) [2]
|
| 211 |
+
"""
|
| 212 |
+
|
| 213 |
+
# V is n*(2n+2) matrix.
|
| 214 |
+
# Each row is convolution of the previous row and probabilities from a
|
| 215 |
+
# Poisson distribution.
|
| 216 |
+
# Desired CDF probability is n! V[n-1, 2n+1] (final entry in final row).
|
| 217 |
+
# Only two rows are needed at any given stage:
|
| 218 |
+
# - Call them V0 and V1.
|
| 219 |
+
# - Swap each iteration
|
| 220 |
+
# Only a few (contiguous) entries in each row can be non-zero.
|
| 221 |
+
# - Keep track of start and end (j1 and j2 below)
|
| 222 |
+
# - V0s and V1s track the start in the two rows
|
| 223 |
+
# Scale intermediate results as needed.
|
| 224 |
+
# Only a few different Poisson distributions can occur
|
| 225 |
+
t = n * x
|
| 226 |
+
ll = int(np.floor(t))
|
| 227 |
+
f = 1.0 * (t - ll) # fractional part of t
|
| 228 |
+
g = min(f, 1.0 - f)
|
| 229 |
+
ceilf = (1 if f > 0 else 0)
|
| 230 |
+
roundf = (1 if f > 0.5 else 0)
|
| 231 |
+
npwrs = 2 * (ll + 1) # Maximum number of powers needed in convolutions
|
| 232 |
+
gpower = np.empty(npwrs) # gpower = (g/n)^m/m!
|
| 233 |
+
twogpower = np.empty(npwrs) # twogpower = (2g/n)^m/m!
|
| 234 |
+
onem2gpower = np.empty(npwrs) # onem2gpower = ((1-2g)/n)^m/m!
|
| 235 |
+
# gpower etc are *almost* Poisson probs, just missing normalizing factor.
|
| 236 |
+
|
| 237 |
+
gpower[0] = 1.0
|
| 238 |
+
twogpower[0] = 1.0
|
| 239 |
+
onem2gpower[0] = 1.0
|
| 240 |
+
expnt = 0
|
| 241 |
+
g_over_n, two_g_over_n, one_minus_two_g_over_n = g/n, 2*g/n, (1 - 2*g)/n
|
| 242 |
+
for m in range(1, npwrs):
|
| 243 |
+
gpower[m] = gpower[m - 1] * g_over_n / m
|
| 244 |
+
twogpower[m] = twogpower[m - 1] * two_g_over_n / m
|
| 245 |
+
onem2gpower[m] = onem2gpower[m - 1] * one_minus_two_g_over_n / m
|
| 246 |
+
|
| 247 |
+
V0 = np.zeros([npwrs])
|
| 248 |
+
V1 = np.zeros([npwrs])
|
| 249 |
+
V1[0] = 1 # first row
|
| 250 |
+
V0s, V1s = 0, 0 # start indices of the two rows
|
| 251 |
+
|
| 252 |
+
j1, j2 = _pomeranz_compute_j1j2(0, n, ll, ceilf, roundf)
|
| 253 |
+
for i in range(1, 2 * n + 2):
|
| 254 |
+
# Preserve j1, V1, V1s, V0s from last iteration
|
| 255 |
+
k1 = j1
|
| 256 |
+
V0, V1 = V1, V0
|
| 257 |
+
V0s, V1s = V1s, V0s
|
| 258 |
+
V1.fill(0.0)
|
| 259 |
+
j1, j2 = _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf)
|
| 260 |
+
if i == 1 or i == 2 * n + 1:
|
| 261 |
+
pwrs = gpower
|
| 262 |
+
else:
|
| 263 |
+
pwrs = (twogpower if i % 2 else onem2gpower)
|
| 264 |
+
ln2 = j2 - k1 + 1
|
| 265 |
+
if ln2 > 0:
|
| 266 |
+
conv = np.convolve(V0[k1 - V0s:k1 - V0s + ln2], pwrs[:ln2])
|
| 267 |
+
conv_start = j1 - k1 # First index to use from conv
|
| 268 |
+
conv_len = j2 - j1 + 1 # Number of entries to use from conv
|
| 269 |
+
V1[:conv_len] = conv[conv_start:conv_start + conv_len]
|
| 270 |
+
# Scale to avoid underflow.
|
| 271 |
+
if 0 < np.max(V1) < _EM128:
|
| 272 |
+
V1 *= _EP128
|
| 273 |
+
expnt -= _E128
|
| 274 |
+
V1s = V0s + j1 - k1
|
| 275 |
+
|
| 276 |
+
# multiply by n!
|
| 277 |
+
ans = V1[n - V1s]
|
| 278 |
+
for m in range(1, n + 1):
|
| 279 |
+
if np.abs(ans) > _EP128:
|
| 280 |
+
ans *= _EM128
|
| 281 |
+
expnt += _E128
|
| 282 |
+
ans *= m
|
| 283 |
+
|
| 284 |
+
# Undo any intermediate scaling
|
| 285 |
+
if expnt != 0:
|
| 286 |
+
ans = np.ldexp(ans, expnt)
|
| 287 |
+
ans = _select_and_clip_prob(ans, 1.0 - ans, cdf)
|
| 288 |
+
return ans
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def _kolmogn_PelzGood(n, x, cdf=True):
|
| 292 |
+
"""Computes the Pelz-Good approximation to Prob(Dn <= x) with 0<=x<=1.
|
| 293 |
+
|
| 294 |
+
Start with Li-Chien, Korolyuk approximation:
|
| 295 |
+
Prob(Dn <= x) ~ K0(z) + K1(z)/sqrt(n) + K2(z)/n + K3(z)/n**1.5
|
| 296 |
+
where z = x*sqrt(n).
|
| 297 |
+
Transform each K_(z) using Jacobi theta functions into a form suitable
|
| 298 |
+
for small z.
|
| 299 |
+
Pelz-Good (1976). [6]
|
| 300 |
+
"""
|
| 301 |
+
if x <= 0.0:
|
| 302 |
+
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
|
| 303 |
+
if x >= 1.0:
|
| 304 |
+
return _select_and_clip_prob(1.0, 0.0, cdf=cdf)
|
| 305 |
+
|
| 306 |
+
z = np.sqrt(n) * x
|
| 307 |
+
zsquared, zthree, zfour, zsix = z**2, z**3, z**4, z**6
|
| 308 |
+
|
| 309 |
+
qlog = -_PI_SQUARED / 8 / zsquared
|
| 310 |
+
if qlog < _MIN_LOG: # z ~ 0.041743441416853426
|
| 311 |
+
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
|
| 312 |
+
|
| 313 |
+
q = np.exp(qlog)
|
| 314 |
+
|
| 315 |
+
# Coefficients of terms in the sums for K1, K2 and K3
|
| 316 |
+
k1a = -zsquared
|
| 317 |
+
k1b = _PI_SQUARED / 4
|
| 318 |
+
|
| 319 |
+
k2a = 6 * zsix + 2 * zfour
|
| 320 |
+
k2b = (2 * zfour - 5 * zsquared) * _PI_SQUARED / 4
|
| 321 |
+
k2c = _PI_FOUR * (1 - 2 * zsquared) / 16
|
| 322 |
+
|
| 323 |
+
k3d = _PI_SIX * (5 - 30 * zsquared) / 64
|
| 324 |
+
k3c = _PI_FOUR * (-60 * zsquared + 212 * zfour) / 16
|
| 325 |
+
k3b = _PI_SQUARED * (135 * zfour - 96 * zsix) / 4
|
| 326 |
+
k3a = -30 * zsix - 90 * z**8
|
| 327 |
+
|
| 328 |
+
K0to3 = np.zeros(4)
|
| 329 |
+
# Use a Horner scheme to evaluate sum c_i q^(i^2)
|
| 330 |
+
# Reduces to a sum over odd integers.
|
| 331 |
+
maxk = int(np.ceil(16 * z / np.pi))
|
| 332 |
+
for k in range(maxk, 0, -1):
|
| 333 |
+
m = 2 * k - 1
|
| 334 |
+
msquared, mfour, msix = m**2, m**4, m**6
|
| 335 |
+
qpower = np.power(q, 8 * k)
|
| 336 |
+
coeffs = np.array([1.0,
|
| 337 |
+
k1a + k1b*msquared,
|
| 338 |
+
k2a + k2b*msquared + k2c*mfour,
|
| 339 |
+
k3a + k3b*msquared + k3c*mfour + k3d*msix])
|
| 340 |
+
K0to3 *= qpower
|
| 341 |
+
K0to3 += coeffs
|
| 342 |
+
K0to3 *= q
|
| 343 |
+
K0to3 *= _SQRT2PI
|
| 344 |
+
# z**10 > 0 as z > 0.04
|
| 345 |
+
K0to3 /= np.array([z, 6 * zfour, 72 * z**7, 6480 * z**10])
|
| 346 |
+
|
| 347 |
+
# Now do the other sum over the other terms, all integers k
|
| 348 |
+
# K_2: (pi^2 k^2) q^(k^2),
|
| 349 |
+
# K_3: (3pi^2 k^2 z^2 - pi^4 k^4)*q^(k^2)
|
| 350 |
+
# Don't expect much subtractive cancellation so use direct calculation
|
| 351 |
+
q = np.exp(-_PI_SQUARED / 2 / zsquared)
|
| 352 |
+
ks = np.arange(maxk, 0, -1)
|
| 353 |
+
ksquared = ks ** 2
|
| 354 |
+
sqrt3z = _SQRT3 * z
|
| 355 |
+
kspi = np.pi * ks
|
| 356 |
+
qpwers = q ** ksquared
|
| 357 |
+
k2extra = np.sum(ksquared * qpwers)
|
| 358 |
+
k2extra *= _PI_SQUARED * _SQRT2PI/(-36 * zthree)
|
| 359 |
+
K0to3[2] += k2extra
|
| 360 |
+
k3extra = np.sum((sqrt3z + kspi) * (sqrt3z - kspi) * ksquared * qpwers)
|
| 361 |
+
k3extra *= _PI_SQUARED * _SQRT2PI/(216 * zsix)
|
| 362 |
+
K0to3[3] += k3extra
|
| 363 |
+
powers_of_n = np.power(n * 1.0, np.arange(len(K0to3)) / 2.0)
|
| 364 |
+
K0to3 /= powers_of_n
|
| 365 |
+
|
| 366 |
+
if not cdf:
|
| 367 |
+
K0to3 *= -1
|
| 368 |
+
K0to3[0] += 1
|
| 369 |
+
|
| 370 |
+
Ksum = sum(K0to3)
|
| 371 |
+
return Ksum
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def _kolmogn(n, x, cdf=True):
|
| 375 |
+
"""Computes the CDF(or SF) for the two-sided Kolmogorov-Smirnov statistic.
|
| 376 |
+
|
| 377 |
+
x must be of type float, n of type integer.
|
| 378 |
+
|
| 379 |
+
Simard & L'Ecuyer (2011) [7].
|
| 380 |
+
"""
|
| 381 |
+
if np.isnan(n):
|
| 382 |
+
return n # Keep the same type of nan
|
| 383 |
+
if int(n) != n or n <= 0:
|
| 384 |
+
return np.nan
|
| 385 |
+
if x >= 1.0:
|
| 386 |
+
return _select_and_clip_prob(1.0, 0.0, cdf=cdf)
|
| 387 |
+
if x <= 0.0:
|
| 388 |
+
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
|
| 389 |
+
t = n * x
|
| 390 |
+
if t <= 1.0: # Ruben-Gambino: 1/2n <= x <= 1/n
|
| 391 |
+
if t <= 0.5:
|
| 392 |
+
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
|
| 393 |
+
if n <= 140:
|
| 394 |
+
prob = np.prod(np.arange(1, n+1) * (1.0/n) * (2*t - 1))
|
| 395 |
+
else:
|
| 396 |
+
prob = np.exp(_log_nfactorial_div_n_pow_n(n) + n * np.log(2*t-1))
|
| 397 |
+
return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
|
| 398 |
+
if t >= n - 1: # Ruben-Gambino
|
| 399 |
+
prob = 2 * (1.0 - x)**n
|
| 400 |
+
return _select_and_clip_prob(1 - prob, prob, cdf=cdf)
|
| 401 |
+
if x >= 0.5: # Exact: 2 * smirnov
|
| 402 |
+
prob = 2 * scipy.special.smirnov(n, x)
|
| 403 |
+
return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf)
|
| 404 |
+
|
| 405 |
+
nxsquared = t * x
|
| 406 |
+
if n <= 140:
|
| 407 |
+
if nxsquared <= 0.754693:
|
| 408 |
+
prob = _kolmogn_DMTW(n, x, cdf=True)
|
| 409 |
+
return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
|
| 410 |
+
if nxsquared <= 4:
|
| 411 |
+
prob = _kolmogn_Pomeranz(n, x, cdf=True)
|
| 412 |
+
return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
|
| 413 |
+
# Now use Miller approximation of 2*smirnov
|
| 414 |
+
prob = 2 * scipy.special.smirnov(n, x)
|
| 415 |
+
return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf)
|
| 416 |
+
|
| 417 |
+
# Split CDF and SF as they have different cutoffs on nxsquared.
|
| 418 |
+
if not cdf:
|
| 419 |
+
if nxsquared >= 370.0:
|
| 420 |
+
return 0.0
|
| 421 |
+
if nxsquared >= 2.2:
|
| 422 |
+
prob = 2 * scipy.special.smirnov(n, x)
|
| 423 |
+
return _clip_prob(prob)
|
| 424 |
+
# Fall through and compute the SF as 1.0-CDF
|
| 425 |
+
if nxsquared >= 18.0:
|
| 426 |
+
cdfprob = 1.0
|
| 427 |
+
elif n <= 100000 and n * x**1.5 <= 1.4:
|
| 428 |
+
cdfprob = _kolmogn_DMTW(n, x, cdf=True)
|
| 429 |
+
else:
|
| 430 |
+
cdfprob = _kolmogn_PelzGood(n, x, cdf=True)
|
| 431 |
+
return _select_and_clip_prob(cdfprob, 1.0 - cdfprob, cdf=cdf)
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
def _kolmogn_p(n, x):
|
| 435 |
+
"""Computes the PDF for the two-sided Kolmogorov-Smirnov statistic.
|
| 436 |
+
|
| 437 |
+
x must be of type float, n of type integer.
|
| 438 |
+
"""
|
| 439 |
+
if np.isnan(n):
|
| 440 |
+
return n # Keep the same type of nan
|
| 441 |
+
if int(n) != n or n <= 0:
|
| 442 |
+
return np.nan
|
| 443 |
+
if x >= 1.0 or x <= 0:
|
| 444 |
+
return 0
|
| 445 |
+
t = n * x
|
| 446 |
+
if t <= 1.0:
|
| 447 |
+
# Ruben-Gambino: n!/n^n * (2t-1)^n -> 2 n!/n^n * n^2 * (2t-1)^(n-1)
|
| 448 |
+
if t <= 0.5:
|
| 449 |
+
return 0.0
|
| 450 |
+
if n <= 140:
|
| 451 |
+
prd = np.prod(np.arange(1, n) * (1.0 / n) * (2 * t - 1))
|
| 452 |
+
else:
|
| 453 |
+
prd = np.exp(_log_nfactorial_div_n_pow_n(n) + (n-1) * np.log(2 * t - 1))
|
| 454 |
+
return prd * 2 * n**2
|
| 455 |
+
if t >= n - 1:
|
| 456 |
+
# Ruben-Gambino : 1-2(1-x)**n -> 2n*(1-x)**(n-1)
|
| 457 |
+
return 2 * (1.0 - x) ** (n-1) * n
|
| 458 |
+
if x >= 0.5:
|
| 459 |
+
return 2 * scipy.stats.ksone.pdf(x, n)
|
| 460 |
+
|
| 461 |
+
# Just take a small delta.
|
| 462 |
+
# Ideally x +/- delta would stay within [i/n, (i+1)/n] for some integer a.
|
| 463 |
+
# as the CDF is a piecewise degree n polynomial.
|
| 464 |
+
# It has knots at 1/n, 2/n, ... (n-1)/n
|
| 465 |
+
# and is not a C-infinity function at the knots
|
| 466 |
+
delta = x / 2.0**16
|
| 467 |
+
delta = min(delta, x - 1.0/n)
|
| 468 |
+
delta = min(delta, 0.5 - x)
|
| 469 |
+
|
| 470 |
+
def _kk(_x):
|
| 471 |
+
return kolmogn(n, _x)
|
| 472 |
+
|
| 473 |
+
return _derivative(_kk, x, dx=delta, order=5)
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def _kolmogni(n, p, q):
|
| 477 |
+
"""Computes the PPF/ISF of kolmogn.
|
| 478 |
+
|
| 479 |
+
n of type integer, n>= 1
|
| 480 |
+
p is the CDF, q the SF, p+q=1
|
| 481 |
+
"""
|
| 482 |
+
if np.isnan(n):
|
| 483 |
+
return n # Keep the same type of nan
|
| 484 |
+
if int(n) != n or n <= 0:
|
| 485 |
+
return np.nan
|
| 486 |
+
if p <= 0:
|
| 487 |
+
return 1.0/n
|
| 488 |
+
if q <= 0:
|
| 489 |
+
return 1.0
|
| 490 |
+
delta = np.exp((np.log(p) - scipy.special.loggamma(n+1))/n)
|
| 491 |
+
if delta <= 1.0/n:
|
| 492 |
+
return (delta + 1.0 / n) / 2
|
| 493 |
+
x = -np.expm1(np.log(q/2.0)/n)
|
| 494 |
+
if x >= 1 - 1.0/n:
|
| 495 |
+
return x
|
| 496 |
+
x1 = scu._kolmogci(p)/np.sqrt(n)
|
| 497 |
+
x1 = min(x1, 1.0 - 1.0/n)
|
| 498 |
+
|
| 499 |
+
def _f(x):
|
| 500 |
+
return _kolmogn(n, x) - p
|
| 501 |
+
|
| 502 |
+
return scipy.optimize.brentq(_f, 1.0/n, x1, xtol=1e-14)
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def kolmogn(n, x, cdf=True):
|
| 506 |
+
"""Computes the CDF for the two-sided Kolmogorov-Smirnov distribution.
|
| 507 |
+
|
| 508 |
+
The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x),
|
| 509 |
+
for a sample of size n drawn from a distribution with CDF F(t), where
|
| 510 |
+
:math:`D_n &= sup_t |F_n(t) - F(t)|`, and
|
| 511 |
+
:math:`F_n(t)` is the Empirical Cumulative Distribution Function of the sample.
|
| 512 |
+
|
| 513 |
+
Parameters
|
| 514 |
+
----------
|
| 515 |
+
n : integer, array_like
|
| 516 |
+
the number of samples
|
| 517 |
+
x : float, array_like
|
| 518 |
+
The K-S statistic, float between 0 and 1
|
| 519 |
+
cdf : bool, optional
|
| 520 |
+
whether to compute the CDF(default=true) or the SF.
|
| 521 |
+
|
| 522 |
+
Returns
|
| 523 |
+
-------
|
| 524 |
+
cdf : ndarray
|
| 525 |
+
CDF (or SF it cdf is False) at the specified locations.
|
| 526 |
+
|
| 527 |
+
The return value has shape the result of numpy broadcasting n and x.
|
| 528 |
+
"""
|
| 529 |
+
it = np.nditer([n, x, cdf, None],
|
| 530 |
+
op_dtypes=[None, np.float64, np.bool_, np.float64])
|
| 531 |
+
for _n, _x, _cdf, z in it:
|
| 532 |
+
if np.isnan(_n):
|
| 533 |
+
z[...] = _n
|
| 534 |
+
continue
|
| 535 |
+
if int(_n) != _n:
|
| 536 |
+
raise ValueError(f'n is not integral: {_n}')
|
| 537 |
+
z[...] = _kolmogn(int(_n), _x, cdf=_cdf)
|
| 538 |
+
result = it.operands[-1]
|
| 539 |
+
return result
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def kolmognp(n, x):
|
| 543 |
+
"""Computes the PDF for the two-sided Kolmogorov-Smirnov distribution.
|
| 544 |
+
|
| 545 |
+
Parameters
|
| 546 |
+
----------
|
| 547 |
+
n : integer, array_like
|
| 548 |
+
the number of samples
|
| 549 |
+
x : float, array_like
|
| 550 |
+
The K-S statistic, float between 0 and 1
|
| 551 |
+
|
| 552 |
+
Returns
|
| 553 |
+
-------
|
| 554 |
+
pdf : ndarray
|
| 555 |
+
The PDF at the specified locations
|
| 556 |
+
|
| 557 |
+
The return value has shape the result of numpy broadcasting n and x.
|
| 558 |
+
"""
|
| 559 |
+
it = np.nditer([n, x, None])
|
| 560 |
+
for _n, _x, z in it:
|
| 561 |
+
if np.isnan(_n):
|
| 562 |
+
z[...] = _n
|
| 563 |
+
continue
|
| 564 |
+
if int(_n) != _n:
|
| 565 |
+
raise ValueError(f'n is not integral: {_n}')
|
| 566 |
+
z[...] = _kolmogn_p(int(_n), _x)
|
| 567 |
+
result = it.operands[-1]
|
| 568 |
+
return result
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
def kolmogni(n, q, cdf=True):
|
| 572 |
+
"""Computes the PPF(or ISF) for the two-sided Kolmogorov-Smirnov distribution.
|
| 573 |
+
|
| 574 |
+
Parameters
|
| 575 |
+
----------
|
| 576 |
+
n : integer, array_like
|
| 577 |
+
the number of samples
|
| 578 |
+
q : float, array_like
|
| 579 |
+
Probabilities, float between 0 and 1
|
| 580 |
+
cdf : bool, optional
|
| 581 |
+
whether to compute the PPF(default=true) or the ISF.
|
| 582 |
+
|
| 583 |
+
Returns
|
| 584 |
+
-------
|
| 585 |
+
ppf : ndarray
|
| 586 |
+
PPF (or ISF if cdf is False) at the specified locations
|
| 587 |
+
|
| 588 |
+
The return value has shape the result of numpy broadcasting n and x.
|
| 589 |
+
"""
|
| 590 |
+
it = np.nditer([n, q, cdf, None])
|
| 591 |
+
for _n, _q, _cdf, z in it:
|
| 592 |
+
if np.isnan(_n):
|
| 593 |
+
z[...] = _n
|
| 594 |
+
continue
|
| 595 |
+
if int(_n) != _n:
|
| 596 |
+
raise ValueError(f'n is not integral: {_n}')
|
| 597 |
+
_pcdf, _psf = (_q, 1-_q) if _cdf else (1-_q, _q)
|
| 598 |
+
z[...] = _kolmogni(int(_n), _pcdf, _psf)
|
| 599 |
+
result = it.operands[-1]
|
| 600 |
+
return result
|
parrot/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from collections import namedtuple
|
| 3 |
+
from scipy import special
|
| 4 |
+
from scipy import stats
|
| 5 |
+
from scipy.stats._stats_py import _rankdata
|
| 6 |
+
from ._axis_nan_policy import _axis_nan_policy_factory
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _broadcast_concatenate(x, y, axis):
|
| 10 |
+
'''Broadcast then concatenate arrays, leaving concatenation axis last'''
|
| 11 |
+
x = np.moveaxis(x, axis, -1)
|
| 12 |
+
y = np.moveaxis(y, axis, -1)
|
| 13 |
+
z = np.broadcast(x[..., 0], y[..., 0])
|
| 14 |
+
x = np.broadcast_to(x, z.shape + (x.shape[-1],))
|
| 15 |
+
y = np.broadcast_to(y, z.shape + (y.shape[-1],))
|
| 16 |
+
z = np.concatenate((x, y), axis=-1)
|
| 17 |
+
return x, y, z
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class _MWU:
|
| 21 |
+
'''Distribution of MWU statistic under the null hypothesis'''
|
| 22 |
+
|
| 23 |
+
def __init__(self, n1, n2):
|
| 24 |
+
self._reset(n1, n2)
|
| 25 |
+
|
| 26 |
+
def set_shapes(self, n1, n2):
|
| 27 |
+
n1, n2 = min(n1, n2), max(n1, n2)
|
| 28 |
+
if (n1, n2) == (self.n1, self.n2):
|
| 29 |
+
return
|
| 30 |
+
|
| 31 |
+
self.n1 = n1
|
| 32 |
+
self.n2 = n2
|
| 33 |
+
self.s_array = np.zeros(0, dtype=int)
|
| 34 |
+
self.configurations = np.zeros(0, dtype=np.uint64)
|
| 35 |
+
|
| 36 |
+
def reset(self):
|
| 37 |
+
self._reset(self.n1, self.n2)
|
| 38 |
+
|
| 39 |
+
def _reset(self, n1, n2):
|
| 40 |
+
self.n1 = None
|
| 41 |
+
self.n2 = None
|
| 42 |
+
self.set_shapes(n1, n2)
|
| 43 |
+
|
| 44 |
+
def pmf(self, k):
|
| 45 |
+
|
| 46 |
+
# In practice, `pmf` is never called with k > m*n/2.
|
| 47 |
+
# If it were, we'd exploit symmetry here:
|
| 48 |
+
# k = np.array(k, copy=True)
|
| 49 |
+
# k2 = m*n - k
|
| 50 |
+
# i = k2 < k
|
| 51 |
+
# k[i] = k2[i]
|
| 52 |
+
|
| 53 |
+
pmfs = self.build_u_freqs_array(np.max(k))
|
| 54 |
+
return pmfs[k]
|
| 55 |
+
|
| 56 |
+
def cdf(self, k):
|
| 57 |
+
'''Cumulative distribution function'''
|
| 58 |
+
|
| 59 |
+
# In practice, `cdf` is never called with k > m*n/2.
|
| 60 |
+
# If it were, we'd exploit symmetry here rather than in `sf`
|
| 61 |
+
pmfs = self.build_u_freqs_array(np.max(k))
|
| 62 |
+
cdfs = np.cumsum(pmfs)
|
| 63 |
+
return cdfs[k]
|
| 64 |
+
|
| 65 |
+
def sf(self, k):
|
| 66 |
+
'''Survival function'''
|
| 67 |
+
# Note that both CDF and SF include the PMF at k. The p-value is
|
| 68 |
+
# calculated from the SF and should include the mass at k, so this
|
| 69 |
+
# is desirable
|
| 70 |
+
|
| 71 |
+
# Use the fact that the distribution is symmetric and sum from the left
|
| 72 |
+
kc = np.asarray(self.n1*self.n2 - k) # complement of k
|
| 73 |
+
i = k < kc
|
| 74 |
+
if np.any(i):
|
| 75 |
+
kc[i] = k[i]
|
| 76 |
+
cdfs = np.asarray(self.cdf(kc))
|
| 77 |
+
cdfs[i] = 1. - cdfs[i] + self.pmf(kc[i])
|
| 78 |
+
else:
|
| 79 |
+
cdfs = np.asarray(self.cdf(kc))
|
| 80 |
+
return cdfs[()]
|
| 81 |
+
|
| 82 |
+
# build_sigma_array and build_u_freqs_array adapted from code
|
| 83 |
+
# by @toobaz with permission. Thanks to @andreasloe for the suggestion.
|
| 84 |
+
# See https://github.com/scipy/scipy/pull/4933#issuecomment-1898082691
|
| 85 |
+
def build_sigma_array(self, a):
|
| 86 |
+
n1, n2 = self.n1, self.n2
|
| 87 |
+
if a + 1 <= self.s_array.size:
|
| 88 |
+
return self.s_array[1:a+1]
|
| 89 |
+
|
| 90 |
+
s_array = np.zeros(a + 1, dtype=int)
|
| 91 |
+
|
| 92 |
+
for d in np.arange(1, n1 + 1):
|
| 93 |
+
# All multiples of d, except 0:
|
| 94 |
+
indices = np.arange(d, a + 1, d)
|
| 95 |
+
# \epsilon_d = 1:
|
| 96 |
+
s_array[indices] += d
|
| 97 |
+
|
| 98 |
+
for d in np.arange(n2 + 1, n2 + n1 + 1):
|
| 99 |
+
# All multiples of d, except 0:
|
| 100 |
+
indices = np.arange(d, a + 1, d)
|
| 101 |
+
# \epsilon_d = -1:
|
| 102 |
+
s_array[indices] -= d
|
| 103 |
+
|
| 104 |
+
# We don't need 0:
|
| 105 |
+
self.s_array = s_array
|
| 106 |
+
return s_array[1:]
|
| 107 |
+
|
| 108 |
+
def build_u_freqs_array(self, maxu):
|
| 109 |
+
"""
|
| 110 |
+
Build all the array of frequencies for u from 0 to maxu.
|
| 111 |
+
Assumptions:
|
| 112 |
+
n1 <= n2
|
| 113 |
+
maxu <= n1 * n2 / 2
|
| 114 |
+
"""
|
| 115 |
+
n1, n2 = self.n1, self.n2
|
| 116 |
+
total = special.binom(n1 + n2, n1)
|
| 117 |
+
|
| 118 |
+
if maxu + 1 <= self.configurations.size:
|
| 119 |
+
return self.configurations[:maxu + 1] / total
|
| 120 |
+
|
| 121 |
+
s_array = self.build_sigma_array(maxu)
|
| 122 |
+
|
| 123 |
+
# Start working with ints, for maximum precision and efficiency:
|
| 124 |
+
configurations = np.zeros(maxu + 1, dtype=np.uint64)
|
| 125 |
+
configurations_is_uint = True
|
| 126 |
+
uint_max = np.iinfo(np.uint64).max
|
| 127 |
+
# How many ways to have U=0? 1
|
| 128 |
+
configurations[0] = 1
|
| 129 |
+
|
| 130 |
+
for u in np.arange(1, maxu + 1):
|
| 131 |
+
coeffs = s_array[u - 1::-1]
|
| 132 |
+
new_val = np.dot(configurations[:u], coeffs) / u
|
| 133 |
+
if new_val > uint_max and configurations_is_uint:
|
| 134 |
+
# OK, we got into numbers too big for uint64.
|
| 135 |
+
# So now we start working with floats.
|
| 136 |
+
# By doing this since the beginning, we would have lost precision.
|
| 137 |
+
# (And working on python long ints would be unbearably slow)
|
| 138 |
+
configurations = configurations.astype(float)
|
| 139 |
+
configurations_is_uint = False
|
| 140 |
+
configurations[u] = new_val
|
| 141 |
+
|
| 142 |
+
self.configurations = configurations
|
| 143 |
+
return configurations / total
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
_mwu_state = _MWU(0, 0)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def _get_mwu_z(U, n1, n2, t, axis=0, continuity=True):
|
| 150 |
+
'''Standardized MWU statistic'''
|
| 151 |
+
# Follows mannwhitneyu [2]
|
| 152 |
+
mu = n1 * n2 / 2
|
| 153 |
+
n = n1 + n2
|
| 154 |
+
|
| 155 |
+
# Tie correction according to [2], "Normal approximation and tie correction"
|
| 156 |
+
# "A more computationally-efficient form..."
|
| 157 |
+
tie_term = (t**3 - t).sum(axis=-1)
|
| 158 |
+
s = np.sqrt(n1*n2/12 * ((n + 1) - tie_term/(n*(n-1))))
|
| 159 |
+
|
| 160 |
+
numerator = U - mu
|
| 161 |
+
|
| 162 |
+
# Continuity correction.
|
| 163 |
+
# Because SF is always used to calculate the p-value, we can always
|
| 164 |
+
# _subtract_ 0.5 for the continuity correction. This always increases the
|
| 165 |
+
# p-value to account for the rest of the probability mass _at_ q = U.
|
| 166 |
+
if continuity:
|
| 167 |
+
numerator -= 0.5
|
| 168 |
+
|
| 169 |
+
# no problem evaluating the norm SF at an infinity
|
| 170 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
| 171 |
+
z = numerator / s
|
| 172 |
+
return z
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def _mwu_input_validation(x, y, use_continuity, alternative, axis, method):
|
| 176 |
+
''' Input validation and standardization for mannwhitneyu '''
|
| 177 |
+
# Would use np.asarray_chkfinite, but infs are OK
|
| 178 |
+
x, y = np.atleast_1d(x), np.atleast_1d(y)
|
| 179 |
+
if np.isnan(x).any() or np.isnan(y).any():
|
| 180 |
+
raise ValueError('`x` and `y` must not contain NaNs.')
|
| 181 |
+
if np.size(x) == 0 or np.size(y) == 0:
|
| 182 |
+
raise ValueError('`x` and `y` must be of nonzero size.')
|
| 183 |
+
|
| 184 |
+
bools = {True, False}
|
| 185 |
+
if use_continuity not in bools:
|
| 186 |
+
raise ValueError(f'`use_continuity` must be one of {bools}.')
|
| 187 |
+
|
| 188 |
+
alternatives = {"two-sided", "less", "greater"}
|
| 189 |
+
alternative = alternative.lower()
|
| 190 |
+
if alternative not in alternatives:
|
| 191 |
+
raise ValueError(f'`alternative` must be one of {alternatives}.')
|
| 192 |
+
|
| 193 |
+
axis_int = int(axis)
|
| 194 |
+
if axis != axis_int:
|
| 195 |
+
raise ValueError('`axis` must be an integer.')
|
| 196 |
+
|
| 197 |
+
if not isinstance(method, stats.PermutationMethod):
|
| 198 |
+
methods = {"asymptotic", "exact", "auto"}
|
| 199 |
+
method = method.lower()
|
| 200 |
+
if method not in methods:
|
| 201 |
+
raise ValueError(f'`method` must be one of {methods}.')
|
| 202 |
+
|
| 203 |
+
return x, y, use_continuity, alternative, axis_int, method
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def _mwu_choose_method(n1, n2, ties):
|
| 207 |
+
"""Choose method 'asymptotic' or 'exact' depending on input size, ties"""
|
| 208 |
+
|
| 209 |
+
# if both inputs are large, asymptotic is OK
|
| 210 |
+
if n1 > 8 and n2 > 8:
|
| 211 |
+
return "asymptotic"
|
| 212 |
+
|
| 213 |
+
# if there are any ties, asymptotic is preferred
|
| 214 |
+
if ties:
|
| 215 |
+
return "asymptotic"
|
| 216 |
+
|
| 217 |
+
return "exact"
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
@_axis_nan_policy_factory(MannwhitneyuResult, n_samples=2)
|
| 224 |
+
def mannwhitneyu(x, y, use_continuity=True, alternative="two-sided",
|
| 225 |
+
axis=0, method="auto"):
|
| 226 |
+
r'''Perform the Mann-Whitney U rank test on two independent samples.
|
| 227 |
+
|
| 228 |
+
The Mann-Whitney U test is a nonparametric test of the null hypothesis
|
| 229 |
+
that the distribution underlying sample `x` is the same as the
|
| 230 |
+
distribution underlying sample `y`. It is often used as a test of
|
| 231 |
+
difference in location between distributions.
|
| 232 |
+
|
| 233 |
+
Parameters
|
| 234 |
+
----------
|
| 235 |
+
x, y : array-like
|
| 236 |
+
N-d arrays of samples. The arrays must be broadcastable except along
|
| 237 |
+
the dimension given by `axis`.
|
| 238 |
+
use_continuity : bool, optional
|
| 239 |
+
Whether a continuity correction (1/2) should be applied.
|
| 240 |
+
Default is True when `method` is ``'asymptotic'``; has no effect
|
| 241 |
+
otherwise.
|
| 242 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
| 243 |
+
Defines the alternative hypothesis. Default is 'two-sided'.
|
| 244 |
+
Let *F(u)* and *G(u)* be the cumulative distribution functions of the
|
| 245 |
+
distributions underlying `x` and `y`, respectively. Then the following
|
| 246 |
+
alternative hypotheses are available:
|
| 247 |
+
|
| 248 |
+
* 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for
|
| 249 |
+
at least one *u*.
|
| 250 |
+
* 'less': the distribution underlying `x` is stochastically less
|
| 251 |
+
than the distribution underlying `y`, i.e. *F(u) > G(u)* for all *u*.
|
| 252 |
+
* 'greater': the distribution underlying `x` is stochastically greater
|
| 253 |
+
than the distribution underlying `y`, i.e. *F(u) < G(u)* for all *u*.
|
| 254 |
+
|
| 255 |
+
Note that the mathematical expressions in the alternative hypotheses
|
| 256 |
+
above describe the CDFs of the underlying distributions. The directions
|
| 257 |
+
of the inequalities appear inconsistent with the natural language
|
| 258 |
+
description at first glance, but they are not. For example, suppose
|
| 259 |
+
*X* and *Y* are random variables that follow distributions with CDFs
|
| 260 |
+
*F* and *G*, respectively. If *F(u) > G(u)* for all *u*, samples drawn
|
| 261 |
+
from *X* tend to be less than those drawn from *Y*.
|
| 262 |
+
|
| 263 |
+
Under a more restrictive set of assumptions, the alternative hypotheses
|
| 264 |
+
can be expressed in terms of the locations of the distributions;
|
| 265 |
+
see [5] section 5.1.
|
| 266 |
+
axis : int, optional
|
| 267 |
+
Axis along which to perform the test. Default is 0.
|
| 268 |
+
method : {'auto', 'asymptotic', 'exact'} or `PermutationMethod` instance, optional
|
| 269 |
+
Selects the method used to calculate the *p*-value.
|
| 270 |
+
Default is 'auto'. The following options are available.
|
| 271 |
+
|
| 272 |
+
* ``'asymptotic'``: compares the standardized test statistic
|
| 273 |
+
against the normal distribution, correcting for ties.
|
| 274 |
+
* ``'exact'``: computes the exact *p*-value by comparing the observed
|
| 275 |
+
:math:`U` statistic against the exact distribution of the :math:`U`
|
| 276 |
+
statistic under the null hypothesis. No correction is made for ties.
|
| 277 |
+
* ``'auto'``: chooses ``'exact'`` when the size of one of the samples
|
| 278 |
+
is less than or equal to 8 and there are no ties;
|
| 279 |
+
chooses ``'asymptotic'`` otherwise.
|
| 280 |
+
* `PermutationMethod` instance. In this case, the p-value
|
| 281 |
+
is computed using `permutation_test` with the provided
|
| 282 |
+
configuration options and other appropriate settings.
|
| 283 |
+
|
| 284 |
+
Returns
|
| 285 |
+
-------
|
| 286 |
+
res : MannwhitneyuResult
|
| 287 |
+
An object containing attributes:
|
| 288 |
+
|
| 289 |
+
statistic : float
|
| 290 |
+
The Mann-Whitney U statistic corresponding with sample `x`. See
|
| 291 |
+
Notes for the test statistic corresponding with sample `y`.
|
| 292 |
+
pvalue : float
|
| 293 |
+
The associated *p*-value for the chosen `alternative`.
|
| 294 |
+
|
| 295 |
+
Notes
|
| 296 |
+
-----
|
| 297 |
+
If ``U1`` is the statistic corresponding with sample `x`, then the
|
| 298 |
+
statistic corresponding with sample `y` is
|
| 299 |
+
``U2 = x.shape[axis] * y.shape[axis] - U1``.
|
| 300 |
+
|
| 301 |
+
`mannwhitneyu` is for independent samples. For related / paired samples,
|
| 302 |
+
consider `scipy.stats.wilcoxon`.
|
| 303 |
+
|
| 304 |
+
`method` ``'exact'`` is recommended when there are no ties and when either
|
| 305 |
+
sample size is less than 8 [1]_. The implementation follows the algorithm
|
| 306 |
+
reported in [3]_.
|
| 307 |
+
Note that the exact method is *not* corrected for ties, but
|
| 308 |
+
`mannwhitneyu` will not raise errors or warnings if there are ties in the
|
| 309 |
+
data. If there are ties and either samples is small (fewer than ~10
|
| 310 |
+
observations), consider passing an instance of `PermutationMethod`
|
| 311 |
+
as the `method` to perform a permutation test.
|
| 312 |
+
|
| 313 |
+
The Mann-Whitney U test is a non-parametric version of the t-test for
|
| 314 |
+
independent samples. When the means of samples from the populations
|
| 315 |
+
are normally distributed, consider `scipy.stats.ttest_ind`.
|
| 316 |
+
|
| 317 |
+
See Also
|
| 318 |
+
--------
|
| 319 |
+
scipy.stats.wilcoxon, scipy.stats.ranksums, scipy.stats.ttest_ind
|
| 320 |
+
|
| 321 |
+
References
|
| 322 |
+
----------
|
| 323 |
+
.. [1] H.B. Mann and D.R. Whitney, "On a test of whether one of two random
|
| 324 |
+
variables is stochastically larger than the other", The Annals of
|
| 325 |
+
Mathematical Statistics, Vol. 18, pp. 50-60, 1947.
|
| 326 |
+
.. [2] Mann-Whitney U Test, Wikipedia,
|
| 327 |
+
http://en.wikipedia.org/wiki/Mann-Whitney_U_test
|
| 328 |
+
.. [3] Andreas Löffler,
|
| 329 |
+
"Über eine Partition der nat. Zahlen und ihr Anwendung beim U-Test",
|
| 330 |
+
Wiss. Z. Univ. Halle, XXXII'83 pp. 87-89.
|
| 331 |
+
.. [4] Rosie Shier, "Statistics: 2.3 The Mann-Whitney U Test", Mathematics
|
| 332 |
+
Learning Support Centre, 2004.
|
| 333 |
+
.. [5] Michael P. Fay and Michael A. Proschan. "Wilcoxon-Mann-Whitney
|
| 334 |
+
or t-test? On assumptions for hypothesis tests and multiple \
|
| 335 |
+
interpretations of decision rules." Statistics surveys, Vol. 4, pp.
|
| 336 |
+
1-39, 2010. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2857732/
|
| 337 |
+
|
| 338 |
+
Examples
|
| 339 |
+
--------
|
| 340 |
+
We follow the example from [4]_: nine randomly sampled young adults were
|
| 341 |
+
diagnosed with type II diabetes at the ages below.
|
| 342 |
+
|
| 343 |
+
>>> males = [19, 22, 16, 29, 24]
|
| 344 |
+
>>> females = [20, 11, 17, 12]
|
| 345 |
+
|
| 346 |
+
We use the Mann-Whitney U test to assess whether there is a statistically
|
| 347 |
+
significant difference in the diagnosis age of males and females.
|
| 348 |
+
The null hypothesis is that the distribution of male diagnosis ages is
|
| 349 |
+
the same as the distribution of female diagnosis ages. We decide
|
| 350 |
+
that a confidence level of 95% is required to reject the null hypothesis
|
| 351 |
+
in favor of the alternative that the distributions are different.
|
| 352 |
+
Since the number of samples is very small and there are no ties in the
|
| 353 |
+
data, we can compare the observed test statistic against the *exact*
|
| 354 |
+
distribution of the test statistic under the null hypothesis.
|
| 355 |
+
|
| 356 |
+
>>> from scipy.stats import mannwhitneyu
|
| 357 |
+
>>> U1, p = mannwhitneyu(males, females, method="exact")
|
| 358 |
+
>>> print(U1)
|
| 359 |
+
17.0
|
| 360 |
+
|
| 361 |
+
`mannwhitneyu` always reports the statistic associated with the first
|
| 362 |
+
sample, which, in this case, is males. This agrees with :math:`U_M = 17`
|
| 363 |
+
reported in [4]_. The statistic associated with the second statistic
|
| 364 |
+
can be calculated:
|
| 365 |
+
|
| 366 |
+
>>> nx, ny = len(males), len(females)
|
| 367 |
+
>>> U2 = nx*ny - U1
|
| 368 |
+
>>> print(U2)
|
| 369 |
+
3.0
|
| 370 |
+
|
| 371 |
+
This agrees with :math:`U_F = 3` reported in [4]_. The two-sided
|
| 372 |
+
*p*-value can be calculated from either statistic, and the value produced
|
| 373 |
+
by `mannwhitneyu` agrees with :math:`p = 0.11` reported in [4]_.
|
| 374 |
+
|
| 375 |
+
>>> print(p)
|
| 376 |
+
0.1111111111111111
|
| 377 |
+
|
| 378 |
+
The exact distribution of the test statistic is asymptotically normal, so
|
| 379 |
+
the example continues by comparing the exact *p*-value against the
|
| 380 |
+
*p*-value produced using the normal approximation.
|
| 381 |
+
|
| 382 |
+
>>> _, pnorm = mannwhitneyu(males, females, method="asymptotic")
|
| 383 |
+
>>> print(pnorm)
|
| 384 |
+
0.11134688653314041
|
| 385 |
+
|
| 386 |
+
Here `mannwhitneyu`'s reported *p*-value appears to conflict with the
|
| 387 |
+
value :math:`p = 0.09` given in [4]_. The reason is that [4]_
|
| 388 |
+
does not apply the continuity correction performed by `mannwhitneyu`;
|
| 389 |
+
`mannwhitneyu` reduces the distance between the test statistic and the
|
| 390 |
+
mean :math:`\mu = n_x n_y / 2` by 0.5 to correct for the fact that the
|
| 391 |
+
discrete statistic is being compared against a continuous distribution.
|
| 392 |
+
Here, the :math:`U` statistic used is less than the mean, so we reduce
|
| 393 |
+
the distance by adding 0.5 in the numerator.
|
| 394 |
+
|
| 395 |
+
>>> import numpy as np
|
| 396 |
+
>>> from scipy.stats import norm
|
| 397 |
+
>>> U = min(U1, U2)
|
| 398 |
+
>>> N = nx + ny
|
| 399 |
+
>>> z = (U - nx*ny/2 + 0.5) / np.sqrt(nx*ny * (N + 1)/ 12)
|
| 400 |
+
>>> p = 2 * norm.cdf(z) # use CDF to get p-value from smaller statistic
|
| 401 |
+
>>> print(p)
|
| 402 |
+
0.11134688653314041
|
| 403 |
+
|
| 404 |
+
If desired, we can disable the continuity correction to get a result
|
| 405 |
+
that agrees with that reported in [4]_.
|
| 406 |
+
|
| 407 |
+
>>> _, pnorm = mannwhitneyu(males, females, use_continuity=False,
|
| 408 |
+
... method="asymptotic")
|
| 409 |
+
>>> print(pnorm)
|
| 410 |
+
0.0864107329737
|
| 411 |
+
|
| 412 |
+
Regardless of whether we perform an exact or asymptotic test, the
|
| 413 |
+
probability of the test statistic being as extreme or more extreme by
|
| 414 |
+
chance exceeds 5%, so we do not consider the results statistically
|
| 415 |
+
significant.
|
| 416 |
+
|
| 417 |
+
Suppose that, before seeing the data, we had hypothesized that females
|
| 418 |
+
would tend to be diagnosed at a younger age than males.
|
| 419 |
+
In that case, it would be natural to provide the female ages as the
|
| 420 |
+
first input, and we would have performed a one-sided test using
|
| 421 |
+
``alternative = 'less'``: females are diagnosed at an age that is
|
| 422 |
+
stochastically less than that of males.
|
| 423 |
+
|
| 424 |
+
>>> res = mannwhitneyu(females, males, alternative="less", method="exact")
|
| 425 |
+
>>> print(res)
|
| 426 |
+
MannwhitneyuResult(statistic=3.0, pvalue=0.05555555555555555)
|
| 427 |
+
|
| 428 |
+
Again, the probability of getting a sufficiently low value of the
|
| 429 |
+
test statistic by chance under the null hypothesis is greater than 5%,
|
| 430 |
+
so we do not reject the null hypothesis in favor of our alternative.
|
| 431 |
+
|
| 432 |
+
If it is reasonable to assume that the means of samples from the
|
| 433 |
+
populations are normally distributed, we could have used a t-test to
|
| 434 |
+
perform the analysis.
|
| 435 |
+
|
| 436 |
+
>>> from scipy.stats import ttest_ind
|
| 437 |
+
>>> res = ttest_ind(females, males, alternative="less")
|
| 438 |
+
>>> print(res)
|
| 439 |
+
TtestResult(statistic=-2.239334696520584,
|
| 440 |
+
pvalue=0.030068441095757924,
|
| 441 |
+
df=7.0)
|
| 442 |
+
|
| 443 |
+
Under this assumption, the *p*-value would be low enough to reject the
|
| 444 |
+
null hypothesis in favor of the alternative.
|
| 445 |
+
|
| 446 |
+
'''
|
| 447 |
+
|
| 448 |
+
x, y, use_continuity, alternative, axis_int, method = (
|
| 449 |
+
_mwu_input_validation(x, y, use_continuity, alternative, axis, method))
|
| 450 |
+
|
| 451 |
+
x, y, xy = _broadcast_concatenate(x, y, axis)
|
| 452 |
+
|
| 453 |
+
n1, n2 = x.shape[-1], y.shape[-1]
|
| 454 |
+
|
| 455 |
+
# Follows [2]
|
| 456 |
+
ranks, t = _rankdata(xy, 'average', return_ties=True) # method 2, step 1
|
| 457 |
+
R1 = ranks[..., :n1].sum(axis=-1) # method 2, step 2
|
| 458 |
+
U1 = R1 - n1*(n1+1)/2 # method 2, step 3
|
| 459 |
+
U2 = n1 * n2 - U1 # as U1 + U2 = n1 * n2
|
| 460 |
+
|
| 461 |
+
if alternative == "greater":
|
| 462 |
+
U, f = U1, 1 # U is the statistic to use for p-value, f is a factor
|
| 463 |
+
elif alternative == "less":
|
| 464 |
+
U, f = U2, 1 # Due to symmetry, use SF of U2 rather than CDF of U1
|
| 465 |
+
else:
|
| 466 |
+
U, f = np.maximum(U1, U2), 2 # multiply SF by two for two-sided test
|
| 467 |
+
|
| 468 |
+
if method == "auto":
|
| 469 |
+
method = _mwu_choose_method(n1, n2, np.any(t > 1))
|
| 470 |
+
|
| 471 |
+
if method == "exact":
|
| 472 |
+
_mwu_state.set_shapes(n1, n2)
|
| 473 |
+
p = _mwu_state.sf(U.astype(int))
|
| 474 |
+
elif method == "asymptotic":
|
| 475 |
+
z = _get_mwu_z(U, n1, n2, t, continuity=use_continuity)
|
| 476 |
+
p = stats.norm.sf(z)
|
| 477 |
+
else: # `PermutationMethod` instance (already validated)
|
| 478 |
+
def statistic(x, y, axis):
|
| 479 |
+
return mannwhitneyu(x, y, use_continuity=use_continuity,
|
| 480 |
+
alternative=alternative, axis=axis,
|
| 481 |
+
method="asymptotic").statistic
|
| 482 |
+
|
| 483 |
+
res = stats.permutation_test((x, y), statistic, axis=axis,
|
| 484 |
+
**method._asdict(), alternative=alternative)
|
| 485 |
+
p = res.pvalue
|
| 486 |
+
f = 1
|
| 487 |
+
|
| 488 |
+
p *= f
|
| 489 |
+
|
| 490 |
+
# Ensure that test statistic is not greater than 1
|
| 491 |
+
# This could happen for exact test when U = m*n/2
|
| 492 |
+
p = np.clip(p, 0, 1)
|
| 493 |
+
|
| 494 |
+
return MannwhitneyuResult(U1, p)
|
parrot/lib/python3.10/site-packages/scipy/stats/_morestats.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/scipy/stats/_mstats_basic.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/scipy/stats/_multicomp.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import warnings
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from typing import TYPE_CHECKING
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from scipy import stats
|
| 10 |
+
from scipy.optimize import minimize_scalar
|
| 11 |
+
from scipy.stats._common import ConfidenceInterval
|
| 12 |
+
from scipy.stats._qmc import check_random_state
|
| 13 |
+
from scipy.stats._stats_py import _var
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
import numpy.typing as npt
|
| 17 |
+
from scipy._lib._util import DecimalNumber, SeedType
|
| 18 |
+
from typing import Literal, Sequence # noqa: UP035
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
__all__ = [
|
| 22 |
+
'dunnett'
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class DunnettResult:
|
| 28 |
+
"""Result object returned by `scipy.stats.dunnett`.
|
| 29 |
+
|
| 30 |
+
Attributes
|
| 31 |
+
----------
|
| 32 |
+
statistic : float ndarray
|
| 33 |
+
The computed statistic of the test for each comparison. The element
|
| 34 |
+
at index ``i`` is the statistic for the comparison between
|
| 35 |
+
groups ``i`` and the control.
|
| 36 |
+
pvalue : float ndarray
|
| 37 |
+
The computed p-value of the test for each comparison. The element
|
| 38 |
+
at index ``i`` is the p-value for the comparison between
|
| 39 |
+
group ``i`` and the control.
|
| 40 |
+
"""
|
| 41 |
+
statistic: np.ndarray
|
| 42 |
+
pvalue: np.ndarray
|
| 43 |
+
_alternative: Literal['two-sided', 'less', 'greater'] = field(repr=False)
|
| 44 |
+
_rho: np.ndarray = field(repr=False)
|
| 45 |
+
_df: int = field(repr=False)
|
| 46 |
+
_std: float = field(repr=False)
|
| 47 |
+
_mean_samples: np.ndarray = field(repr=False)
|
| 48 |
+
_mean_control: np.ndarray = field(repr=False)
|
| 49 |
+
_n_samples: np.ndarray = field(repr=False)
|
| 50 |
+
_n_control: int = field(repr=False)
|
| 51 |
+
_rng: SeedType = field(repr=False)
|
| 52 |
+
_ci: ConfidenceInterval | None = field(default=None, repr=False)
|
| 53 |
+
_ci_cl: DecimalNumber | None = field(default=None, repr=False)
|
| 54 |
+
|
| 55 |
+
def __str__(self):
|
| 56 |
+
# Note: `__str__` prints the confidence intervals from the most
|
| 57 |
+
# recent call to `confidence_interval`. If it has not been called,
|
| 58 |
+
# it will be called with the default CL of .95.
|
| 59 |
+
if self._ci is None:
|
| 60 |
+
self.confidence_interval(confidence_level=.95)
|
| 61 |
+
s = (
|
| 62 |
+
"Dunnett's test"
|
| 63 |
+
f" ({self._ci_cl*100:.1f}% Confidence Interval)\n"
|
| 64 |
+
"Comparison Statistic p-value Lower CI Upper CI\n"
|
| 65 |
+
)
|
| 66 |
+
for i in range(self.pvalue.size):
|
| 67 |
+
s += (f" (Sample {i} - Control) {self.statistic[i]:>10.3f}"
|
| 68 |
+
f"{self.pvalue[i]:>10.3f}"
|
| 69 |
+
f"{self._ci.low[i]:>10.3f}"
|
| 70 |
+
f"{self._ci.high[i]:>10.3f}\n")
|
| 71 |
+
|
| 72 |
+
return s
|
| 73 |
+
|
| 74 |
+
def _allowance(
|
| 75 |
+
self, confidence_level: DecimalNumber = 0.95, tol: DecimalNumber = 1e-3
|
| 76 |
+
) -> float:
|
| 77 |
+
"""Allowance.
|
| 78 |
+
|
| 79 |
+
It is the quantity to add/subtract from the observed difference
|
| 80 |
+
between the means of observed groups and the mean of the control
|
| 81 |
+
group. The result gives confidence limits.
|
| 82 |
+
|
| 83 |
+
Parameters
|
| 84 |
+
----------
|
| 85 |
+
confidence_level : float, optional
|
| 86 |
+
Confidence level for the computed confidence interval.
|
| 87 |
+
Default is .95.
|
| 88 |
+
tol : float, optional
|
| 89 |
+
A tolerance for numerical optimization: the allowance will produce
|
| 90 |
+
a confidence within ``10*tol*(1 - confidence_level)`` of the
|
| 91 |
+
specified level, or a warning will be emitted. Tight tolerances
|
| 92 |
+
may be impractical due to noisy evaluation of the objective.
|
| 93 |
+
Default is 1e-3.
|
| 94 |
+
|
| 95 |
+
Returns
|
| 96 |
+
-------
|
| 97 |
+
allowance : float
|
| 98 |
+
Allowance around the mean.
|
| 99 |
+
"""
|
| 100 |
+
alpha = 1 - confidence_level
|
| 101 |
+
|
| 102 |
+
def pvalue_from_stat(statistic):
|
| 103 |
+
statistic = np.array(statistic)
|
| 104 |
+
sf = _pvalue_dunnett(
|
| 105 |
+
rho=self._rho, df=self._df,
|
| 106 |
+
statistic=statistic, alternative=self._alternative,
|
| 107 |
+
rng=self._rng
|
| 108 |
+
)
|
| 109 |
+
return abs(sf - alpha)/alpha
|
| 110 |
+
|
| 111 |
+
# Evaluation of `pvalue_from_stat` is noisy due to the use of RQMC to
|
| 112 |
+
# evaluate `multivariate_t.cdf`. `minimize_scalar` is not designed
|
| 113 |
+
# to tolerate a noisy objective function and may fail to find the
|
| 114 |
+
# minimum accurately. We mitigate this possibility with the validation
|
| 115 |
+
# step below, but implementation of a noise-tolerant root finder or
|
| 116 |
+
# minimizer would be a welcome enhancement. See gh-18150.
|
| 117 |
+
res = minimize_scalar(pvalue_from_stat, method='brent', tol=tol)
|
| 118 |
+
critical_value = res.x
|
| 119 |
+
|
| 120 |
+
# validation
|
| 121 |
+
# tol*10 because tol=1e-3 means we tolerate a 1% change at most
|
| 122 |
+
if res.success is False or res.fun >= tol*10:
|
| 123 |
+
warnings.warn(
|
| 124 |
+
"Computation of the confidence interval did not converge to "
|
| 125 |
+
"the desired level. The confidence level corresponding with "
|
| 126 |
+
f"the returned interval is approximately {alpha*(1+res.fun)}.",
|
| 127 |
+
stacklevel=3
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
# From [1] p. 1101 between (1) and (3)
|
| 131 |
+
allowance = critical_value*self._std*np.sqrt(
|
| 132 |
+
1/self._n_samples + 1/self._n_control
|
| 133 |
+
)
|
| 134 |
+
return abs(allowance)
|
| 135 |
+
|
| 136 |
+
def confidence_interval(
|
| 137 |
+
self, confidence_level: DecimalNumber = 0.95
|
| 138 |
+
) -> ConfidenceInterval:
|
| 139 |
+
"""Compute the confidence interval for the specified confidence level.
|
| 140 |
+
|
| 141 |
+
Parameters
|
| 142 |
+
----------
|
| 143 |
+
confidence_level : float, optional
|
| 144 |
+
Confidence level for the computed confidence interval.
|
| 145 |
+
Default is .95.
|
| 146 |
+
|
| 147 |
+
Returns
|
| 148 |
+
-------
|
| 149 |
+
ci : ``ConfidenceInterval`` object
|
| 150 |
+
The object has attributes ``low`` and ``high`` that hold the
|
| 151 |
+
lower and upper bounds of the confidence intervals for each
|
| 152 |
+
comparison. The high and low values are accessible for each
|
| 153 |
+
comparison at index ``i`` for each group ``i``.
|
| 154 |
+
|
| 155 |
+
"""
|
| 156 |
+
# check to see if the supplied confidence level matches that of the
|
| 157 |
+
# previously computed CI.
|
| 158 |
+
if (self._ci is not None) and (confidence_level == self._ci_cl):
|
| 159 |
+
return self._ci
|
| 160 |
+
|
| 161 |
+
if not (0 < confidence_level < 1):
|
| 162 |
+
raise ValueError("Confidence level must be between 0 and 1.")
|
| 163 |
+
|
| 164 |
+
allowance = self._allowance(confidence_level=confidence_level)
|
| 165 |
+
diff_means = self._mean_samples - self._mean_control
|
| 166 |
+
|
| 167 |
+
low = diff_means-allowance
|
| 168 |
+
high = diff_means+allowance
|
| 169 |
+
|
| 170 |
+
if self._alternative == 'greater':
|
| 171 |
+
high = [np.inf] * len(diff_means)
|
| 172 |
+
elif self._alternative == 'less':
|
| 173 |
+
low = [-np.inf] * len(diff_means)
|
| 174 |
+
|
| 175 |
+
self._ci_cl = confidence_level
|
| 176 |
+
self._ci = ConfidenceInterval(
|
| 177 |
+
low=low,
|
| 178 |
+
high=high
|
| 179 |
+
)
|
| 180 |
+
return self._ci
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def dunnett(
|
| 184 |
+
*samples: npt.ArrayLike, # noqa: D417
|
| 185 |
+
control: npt.ArrayLike,
|
| 186 |
+
alternative: Literal['two-sided', 'less', 'greater'] = "two-sided",
|
| 187 |
+
random_state: SeedType = None
|
| 188 |
+
) -> DunnettResult:
|
| 189 |
+
"""Dunnett's test: multiple comparisons of means against a control group.
|
| 190 |
+
|
| 191 |
+
This is an implementation of Dunnett's original, single-step test as
|
| 192 |
+
described in [1]_.
|
| 193 |
+
|
| 194 |
+
Parameters
|
| 195 |
+
----------
|
| 196 |
+
sample1, sample2, ... : 1D array_like
|
| 197 |
+
The sample measurements for each experimental group.
|
| 198 |
+
control : 1D array_like
|
| 199 |
+
The sample measurements for the control group.
|
| 200 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
| 201 |
+
Defines the alternative hypothesis.
|
| 202 |
+
|
| 203 |
+
The null hypothesis is that the means of the distributions underlying
|
| 204 |
+
the samples and control are equal. The following alternative
|
| 205 |
+
hypotheses are available (default is 'two-sided'):
|
| 206 |
+
|
| 207 |
+
* 'two-sided': the means of the distributions underlying the samples
|
| 208 |
+
and control are unequal.
|
| 209 |
+
* 'less': the means of the distributions underlying the samples
|
| 210 |
+
are less than the mean of the distribution underlying the control.
|
| 211 |
+
* 'greater': the means of the distributions underlying the
|
| 212 |
+
samples are greater than the mean of the distribution underlying
|
| 213 |
+
the control.
|
| 214 |
+
random_state : {None, int, `numpy.random.Generator`}, optional
|
| 215 |
+
If `random_state` is an int or None, a new `numpy.random.Generator` is
|
| 216 |
+
created using ``np.random.default_rng(random_state)``.
|
| 217 |
+
If `random_state` is already a ``Generator`` instance, then the
|
| 218 |
+
provided instance is used.
|
| 219 |
+
|
| 220 |
+
The random number generator is used to control the randomized
|
| 221 |
+
Quasi-Monte Carlo integration of the multivariate-t distribution.
|
| 222 |
+
|
| 223 |
+
Returns
|
| 224 |
+
-------
|
| 225 |
+
res : `~scipy.stats._result_classes.DunnettResult`
|
| 226 |
+
An object containing attributes:
|
| 227 |
+
|
| 228 |
+
statistic : float ndarray
|
| 229 |
+
The computed statistic of the test for each comparison. The element
|
| 230 |
+
at index ``i`` is the statistic for the comparison between
|
| 231 |
+
groups ``i`` and the control.
|
| 232 |
+
pvalue : float ndarray
|
| 233 |
+
The computed p-value of the test for each comparison. The element
|
| 234 |
+
at index ``i`` is the p-value for the comparison between
|
| 235 |
+
group ``i`` and the control.
|
| 236 |
+
|
| 237 |
+
And the following method:
|
| 238 |
+
|
| 239 |
+
confidence_interval(confidence_level=0.95) :
|
| 240 |
+
Compute the difference in means of the groups
|
| 241 |
+
with the control +- the allowance.
|
| 242 |
+
|
| 243 |
+
See Also
|
| 244 |
+
--------
|
| 245 |
+
tukey_hsd : performs pairwise comparison of means.
|
| 246 |
+
|
| 247 |
+
Notes
|
| 248 |
+
-----
|
| 249 |
+
Like the independent-sample t-test, Dunnett's test [1]_ is used to make
|
| 250 |
+
inferences about the means of distributions from which samples were drawn.
|
| 251 |
+
However, when multiple t-tests are performed at a fixed significance level,
|
| 252 |
+
the "family-wise error rate" - the probability of incorrectly rejecting the
|
| 253 |
+
null hypothesis in at least one test - will exceed the significance level.
|
| 254 |
+
Dunnett's test is designed to perform multiple comparisons while
|
| 255 |
+
controlling the family-wise error rate.
|
| 256 |
+
|
| 257 |
+
Dunnett's test compares the means of multiple experimental groups
|
| 258 |
+
against a single control group. Tukey's Honestly Significant Difference Test
|
| 259 |
+
is another multiple-comparison test that controls the family-wise error
|
| 260 |
+
rate, but `tukey_hsd` performs *all* pairwise comparisons between groups.
|
| 261 |
+
When pairwise comparisons between experimental groups are not needed,
|
| 262 |
+
Dunnett's test is preferable due to its higher power.
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
The use of this test relies on several assumptions.
|
| 266 |
+
|
| 267 |
+
1. The observations are independent within and among groups.
|
| 268 |
+
2. The observations within each group are normally distributed.
|
| 269 |
+
3. The distributions from which the samples are drawn have the same finite
|
| 270 |
+
variance.
|
| 271 |
+
|
| 272 |
+
References
|
| 273 |
+
----------
|
| 274 |
+
.. [1] Charles W. Dunnett. "A Multiple Comparison Procedure for Comparing
|
| 275 |
+
Several Treatments with a Control."
|
| 276 |
+
Journal of the American Statistical Association, 50:272, 1096-1121,
|
| 277 |
+
:doi:`10.1080/01621459.1955.10501294`, 1955.
|
| 278 |
+
|
| 279 |
+
Examples
|
| 280 |
+
--------
|
| 281 |
+
In [1]_, the influence of drugs on blood count measurements on three groups
|
| 282 |
+
of animal is investigated.
|
| 283 |
+
|
| 284 |
+
The following table summarizes the results of the experiment in which
|
| 285 |
+
two groups received different drugs, and one group acted as a control.
|
| 286 |
+
Blood counts (in millions of cells per cubic millimeter) were recorded::
|
| 287 |
+
|
| 288 |
+
>>> import numpy as np
|
| 289 |
+
>>> control = np.array([7.40, 8.50, 7.20, 8.24, 9.84, 8.32])
|
| 290 |
+
>>> drug_a = np.array([9.76, 8.80, 7.68, 9.36])
|
| 291 |
+
>>> drug_b = np.array([12.80, 9.68, 12.16, 9.20, 10.55])
|
| 292 |
+
|
| 293 |
+
We would like to see if the means between any of the groups are
|
| 294 |
+
significantly different. First, visually examine a box and whisker plot.
|
| 295 |
+
|
| 296 |
+
>>> import matplotlib.pyplot as plt
|
| 297 |
+
>>> fig, ax = plt.subplots(1, 1)
|
| 298 |
+
>>> ax.boxplot([control, drug_a, drug_b])
|
| 299 |
+
>>> ax.set_xticklabels(["Control", "Drug A", "Drug B"]) # doctest: +SKIP
|
| 300 |
+
>>> ax.set_ylabel("mean") # doctest: +SKIP
|
| 301 |
+
>>> plt.show()
|
| 302 |
+
|
| 303 |
+
Note the overlapping interquartile ranges of the drug A group and control
|
| 304 |
+
group and the apparent separation between the drug B group and control
|
| 305 |
+
group.
|
| 306 |
+
|
| 307 |
+
Next, we will use Dunnett's test to assess whether the difference
|
| 308 |
+
between group means is significant while controlling the family-wise error
|
| 309 |
+
rate: the probability of making any false discoveries.
|
| 310 |
+
Let the null hypothesis be that the experimental groups have the same
|
| 311 |
+
mean as the control and the alternative be that an experimental group does
|
| 312 |
+
not have the same mean as the control. We will consider a 5% family-wise
|
| 313 |
+
error rate to be acceptable, and therefore we choose 0.05 as the threshold
|
| 314 |
+
for significance.
|
| 315 |
+
|
| 316 |
+
>>> from scipy.stats import dunnett
|
| 317 |
+
>>> res = dunnett(drug_a, drug_b, control=control)
|
| 318 |
+
>>> res.pvalue
|
| 319 |
+
array([0.62004941, 0.0059035 ]) # may vary
|
| 320 |
+
|
| 321 |
+
The p-value corresponding with the comparison between group A and control
|
| 322 |
+
exceeds 0.05, so we do not reject the null hypothesis for that comparison.
|
| 323 |
+
However, the p-value corresponding with the comparison between group B
|
| 324 |
+
and control is less than 0.05, so we consider the experimental results
|
| 325 |
+
to be evidence against the null hypothesis in favor of the alternative:
|
| 326 |
+
group B has a different mean than the control group.
|
| 327 |
+
|
| 328 |
+
"""
|
| 329 |
+
samples_, control_, rng = _iv_dunnett(
|
| 330 |
+
samples=samples, control=control,
|
| 331 |
+
alternative=alternative, random_state=random_state
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
rho, df, n_group, n_samples, n_control = _params_dunnett(
|
| 335 |
+
samples=samples_, control=control_
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
statistic, std, mean_control, mean_samples = _statistic_dunnett(
|
| 339 |
+
samples_, control_, df, n_samples, n_control
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
pvalue = _pvalue_dunnett(
|
| 343 |
+
rho=rho, df=df, statistic=statistic, alternative=alternative, rng=rng
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
return DunnettResult(
|
| 347 |
+
statistic=statistic, pvalue=pvalue,
|
| 348 |
+
_alternative=alternative,
|
| 349 |
+
_rho=rho, _df=df, _std=std,
|
| 350 |
+
_mean_samples=mean_samples,
|
| 351 |
+
_mean_control=mean_control,
|
| 352 |
+
_n_samples=n_samples,
|
| 353 |
+
_n_control=n_control,
|
| 354 |
+
_rng=rng
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def _iv_dunnett(
|
| 359 |
+
samples: Sequence[npt.ArrayLike],
|
| 360 |
+
control: npt.ArrayLike,
|
| 361 |
+
alternative: Literal['two-sided', 'less', 'greater'],
|
| 362 |
+
random_state: SeedType
|
| 363 |
+
) -> tuple[list[np.ndarray], np.ndarray, SeedType]:
|
| 364 |
+
"""Input validation for Dunnett's test."""
|
| 365 |
+
rng = check_random_state(random_state)
|
| 366 |
+
|
| 367 |
+
if alternative not in {'two-sided', 'less', 'greater'}:
|
| 368 |
+
raise ValueError(
|
| 369 |
+
"alternative must be 'less', 'greater' or 'two-sided'"
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
ndim_msg = "Control and samples groups must be 1D arrays"
|
| 373 |
+
n_obs_msg = "Control and samples groups must have at least 1 observation"
|
| 374 |
+
|
| 375 |
+
control = np.asarray(control)
|
| 376 |
+
samples_ = [np.asarray(sample) for sample in samples]
|
| 377 |
+
|
| 378 |
+
# samples checks
|
| 379 |
+
samples_control: list[np.ndarray] = samples_ + [control]
|
| 380 |
+
for sample in samples_control:
|
| 381 |
+
if sample.ndim > 1:
|
| 382 |
+
raise ValueError(ndim_msg)
|
| 383 |
+
|
| 384 |
+
if sample.size < 1:
|
| 385 |
+
raise ValueError(n_obs_msg)
|
| 386 |
+
|
| 387 |
+
return samples_, control, rng
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def _params_dunnett(
|
| 391 |
+
samples: list[np.ndarray], control: np.ndarray
|
| 392 |
+
) -> tuple[np.ndarray, int, int, np.ndarray, int]:
|
| 393 |
+
"""Specific parameters for Dunnett's test.
|
| 394 |
+
|
| 395 |
+
Degree of freedom is the number of observations minus the number of groups
|
| 396 |
+
including the control.
|
| 397 |
+
"""
|
| 398 |
+
n_samples = np.array([sample.size for sample in samples])
|
| 399 |
+
|
| 400 |
+
# From [1] p. 1100 d.f. = (sum N)-(p+1)
|
| 401 |
+
n_sample = n_samples.sum()
|
| 402 |
+
n_control = control.size
|
| 403 |
+
n = n_sample + n_control
|
| 404 |
+
n_groups = len(samples)
|
| 405 |
+
df = n - n_groups - 1
|
| 406 |
+
|
| 407 |
+
# From [1] p. 1103 rho_ij = 1/sqrt((N0/Ni+1)(N0/Nj+1))
|
| 408 |
+
rho = n_control/n_samples + 1
|
| 409 |
+
rho = 1/np.sqrt(rho[:, None] * rho[None, :])
|
| 410 |
+
np.fill_diagonal(rho, 1)
|
| 411 |
+
|
| 412 |
+
return rho, df, n_groups, n_samples, n_control
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def _statistic_dunnett(
|
| 416 |
+
samples: list[np.ndarray], control: np.ndarray, df: int,
|
| 417 |
+
n_samples: np.ndarray, n_control: int
|
| 418 |
+
) -> tuple[np.ndarray, float, np.ndarray, np.ndarray]:
|
| 419 |
+
"""Statistic of Dunnett's test.
|
| 420 |
+
|
| 421 |
+
Computation based on the original single-step test from [1].
|
| 422 |
+
"""
|
| 423 |
+
mean_control = np.mean(control)
|
| 424 |
+
mean_samples = np.array([np.mean(sample) for sample in samples])
|
| 425 |
+
all_samples = [control] + samples
|
| 426 |
+
all_means = np.concatenate([[mean_control], mean_samples])
|
| 427 |
+
|
| 428 |
+
# Variance estimate s^2 from [1] Eq. 1
|
| 429 |
+
s2 = np.sum([_var(sample, mean=mean)*sample.size
|
| 430 |
+
for sample, mean in zip(all_samples, all_means)]) / df
|
| 431 |
+
std = np.sqrt(s2)
|
| 432 |
+
|
| 433 |
+
# z score inferred from [1] unlabeled equation after Eq. 1
|
| 434 |
+
z = (mean_samples - mean_control) / np.sqrt(1/n_samples + 1/n_control)
|
| 435 |
+
|
| 436 |
+
return z / std, std, mean_control, mean_samples
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def _pvalue_dunnett(
|
| 440 |
+
rho: np.ndarray, df: int, statistic: np.ndarray,
|
| 441 |
+
alternative: Literal['two-sided', 'less', 'greater'],
|
| 442 |
+
rng: SeedType = None
|
| 443 |
+
) -> np.ndarray:
|
| 444 |
+
"""pvalue from the multivariate t-distribution.
|
| 445 |
+
|
| 446 |
+
Critical values come from the multivariate student-t distribution.
|
| 447 |
+
"""
|
| 448 |
+
statistic = statistic.reshape(-1, 1)
|
| 449 |
+
|
| 450 |
+
mvt = stats.multivariate_t(shape=rho, df=df, seed=rng)
|
| 451 |
+
if alternative == "two-sided":
|
| 452 |
+
statistic = abs(statistic)
|
| 453 |
+
pvalue = 1 - mvt.cdf(statistic, lower_limit=-statistic)
|
| 454 |
+
elif alternative == "greater":
|
| 455 |
+
pvalue = 1 - mvt.cdf(statistic, lower_limit=-np.inf)
|
| 456 |
+
else:
|
| 457 |
+
pvalue = 1 - mvt.cdf(np.inf, lower_limit=statistic)
|
| 458 |
+
|
| 459 |
+
return np.atleast_1d(pvalue)
|
parrot/lib/python3.10/site-packages/scipy/stats/_multivariate.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/scipy/stats/_mvn.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (85 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py
ADDED
|
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from scipy.special import ndtri
|
| 4 |
+
from scipy.optimize import brentq
|
| 5 |
+
from ._discrete_distns import nchypergeom_fisher
|
| 6 |
+
from ._common import ConfidenceInterval
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _sample_odds_ratio(table):
|
| 10 |
+
"""
|
| 11 |
+
Given a table [[a, b], [c, d]], compute a*d/(b*c).
|
| 12 |
+
|
| 13 |
+
Return nan if the numerator and denominator are 0.
|
| 14 |
+
Return inf if just the denominator is 0.
|
| 15 |
+
"""
|
| 16 |
+
# table must be a 2x2 numpy array.
|
| 17 |
+
if table[1, 0] > 0 and table[0, 1] > 0:
|
| 18 |
+
oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1])
|
| 19 |
+
elif table[0, 0] == 0 or table[1, 1] == 0:
|
| 20 |
+
oddsratio = np.nan
|
| 21 |
+
else:
|
| 22 |
+
oddsratio = np.inf
|
| 23 |
+
return oddsratio
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _solve(func):
|
| 27 |
+
"""
|
| 28 |
+
Solve func(nc) = 0. func must be an increasing function.
|
| 29 |
+
"""
|
| 30 |
+
# We could just as well call the variable `x` instead of `nc`, but we
|
| 31 |
+
# always call this function with functions for which nc (the noncentrality
|
| 32 |
+
# parameter) is the variable for which we are solving.
|
| 33 |
+
nc = 1.0
|
| 34 |
+
value = func(nc)
|
| 35 |
+
if value == 0:
|
| 36 |
+
return nc
|
| 37 |
+
|
| 38 |
+
# Multiplicative factor by which to increase or decrease nc when
|
| 39 |
+
# searching for a bracketing interval.
|
| 40 |
+
factor = 2.0
|
| 41 |
+
# Find a bracketing interval.
|
| 42 |
+
if value > 0:
|
| 43 |
+
nc /= factor
|
| 44 |
+
while func(nc) > 0:
|
| 45 |
+
nc /= factor
|
| 46 |
+
lo = nc
|
| 47 |
+
hi = factor*nc
|
| 48 |
+
else:
|
| 49 |
+
nc *= factor
|
| 50 |
+
while func(nc) < 0:
|
| 51 |
+
nc *= factor
|
| 52 |
+
lo = nc/factor
|
| 53 |
+
hi = nc
|
| 54 |
+
|
| 55 |
+
# lo and hi bracket the solution for nc.
|
| 56 |
+
nc = brentq(func, lo, hi, xtol=1e-13)
|
| 57 |
+
return nc
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _nc_hypergeom_mean_inverse(x, M, n, N):
|
| 61 |
+
"""
|
| 62 |
+
For the given noncentral hypergeometric parameters x, M, n,and N
|
| 63 |
+
(table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2
|
| 64 |
+
contingency table), find the noncentrality parameter of Fisher's
|
| 65 |
+
noncentral hypergeometric distribution whose mean is x.
|
| 66 |
+
"""
|
| 67 |
+
nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x)
|
| 68 |
+
return nc
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _hypergeom_params_from_table(table):
|
| 72 |
+
# The notation M, n and N is consistent with stats.hypergeom and
|
| 73 |
+
# stats.nchypergeom_fisher.
|
| 74 |
+
x = table[0, 0]
|
| 75 |
+
M = table.sum()
|
| 76 |
+
n = table[0].sum()
|
| 77 |
+
N = table[:, 0].sum()
|
| 78 |
+
return x, M, n, N
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _ci_upper(table, alpha):
|
| 82 |
+
"""
|
| 83 |
+
Compute the upper end of the confidence interval.
|
| 84 |
+
"""
|
| 85 |
+
if _sample_odds_ratio(table) == np.inf:
|
| 86 |
+
return np.inf
|
| 87 |
+
|
| 88 |
+
x, M, n, N = _hypergeom_params_from_table(table)
|
| 89 |
+
|
| 90 |
+
# nchypergeom_fisher.cdf is a decreasing function of nc, so we negate
|
| 91 |
+
# it in the lambda expression.
|
| 92 |
+
nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha)
|
| 93 |
+
return nc
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _ci_lower(table, alpha):
|
| 97 |
+
"""
|
| 98 |
+
Compute the lower end of the confidence interval.
|
| 99 |
+
"""
|
| 100 |
+
if _sample_odds_ratio(table) == 0:
|
| 101 |
+
return 0
|
| 102 |
+
|
| 103 |
+
x, M, n, N = _hypergeom_params_from_table(table)
|
| 104 |
+
|
| 105 |
+
nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha)
|
| 106 |
+
return nc
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _conditional_oddsratio(table):
|
| 110 |
+
"""
|
| 111 |
+
Conditional MLE of the odds ratio for the 2x2 contingency table.
|
| 112 |
+
"""
|
| 113 |
+
x, M, n, N = _hypergeom_params_from_table(table)
|
| 114 |
+
# Get the bounds of the support. The support of the noncentral
|
| 115 |
+
# hypergeometric distribution with parameters M, n, and N is the same
|
| 116 |
+
# for all values of the noncentrality parameter, so we can use 1 here.
|
| 117 |
+
lo, hi = nchypergeom_fisher.support(M, n, N, 1)
|
| 118 |
+
|
| 119 |
+
# Check if x is at one of the extremes of the support. If so, we know
|
| 120 |
+
# the odds ratio is either 0 or inf.
|
| 121 |
+
if x == lo:
|
| 122 |
+
# x is at the low end of the support.
|
| 123 |
+
return 0
|
| 124 |
+
if x == hi:
|
| 125 |
+
# x is at the high end of the support.
|
| 126 |
+
return np.inf
|
| 127 |
+
|
| 128 |
+
nc = _nc_hypergeom_mean_inverse(x, M, n, N)
|
| 129 |
+
return nc
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _conditional_oddsratio_ci(table, confidence_level=0.95,
|
| 133 |
+
alternative='two-sided'):
|
| 134 |
+
"""
|
| 135 |
+
Conditional exact confidence interval for the odds ratio.
|
| 136 |
+
"""
|
| 137 |
+
if alternative == 'two-sided':
|
| 138 |
+
alpha = 0.5*(1 - confidence_level)
|
| 139 |
+
lower = _ci_lower(table, alpha)
|
| 140 |
+
upper = _ci_upper(table, alpha)
|
| 141 |
+
elif alternative == 'less':
|
| 142 |
+
lower = 0.0
|
| 143 |
+
upper = _ci_upper(table, 1 - confidence_level)
|
| 144 |
+
else:
|
| 145 |
+
# alternative == 'greater'
|
| 146 |
+
lower = _ci_lower(table, 1 - confidence_level)
|
| 147 |
+
upper = np.inf
|
| 148 |
+
|
| 149 |
+
return lower, upper
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def _sample_odds_ratio_ci(table, confidence_level=0.95,
|
| 153 |
+
alternative='two-sided'):
|
| 154 |
+
oddsratio = _sample_odds_ratio(table)
|
| 155 |
+
log_or = np.log(oddsratio)
|
| 156 |
+
se = np.sqrt((1/table).sum())
|
| 157 |
+
if alternative == 'less':
|
| 158 |
+
z = ndtri(confidence_level)
|
| 159 |
+
loglow = -np.inf
|
| 160 |
+
loghigh = log_or + z*se
|
| 161 |
+
elif alternative == 'greater':
|
| 162 |
+
z = ndtri(confidence_level)
|
| 163 |
+
loglow = log_or - z*se
|
| 164 |
+
loghigh = np.inf
|
| 165 |
+
else:
|
| 166 |
+
# alternative is 'two-sided'
|
| 167 |
+
z = ndtri(0.5*confidence_level + 0.5)
|
| 168 |
+
loglow = log_or - z*se
|
| 169 |
+
loghigh = log_or + z*se
|
| 170 |
+
|
| 171 |
+
return np.exp(loglow), np.exp(loghigh)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class OddsRatioResult:
|
| 175 |
+
"""
|
| 176 |
+
Result of `scipy.stats.contingency.odds_ratio`. See the
|
| 177 |
+
docstring for `odds_ratio` for more details.
|
| 178 |
+
|
| 179 |
+
Attributes
|
| 180 |
+
----------
|
| 181 |
+
statistic : float
|
| 182 |
+
The computed odds ratio.
|
| 183 |
+
|
| 184 |
+
* If `kind` is ``'sample'``, this is sample (or unconditional)
|
| 185 |
+
estimate, given by
|
| 186 |
+
``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
|
| 187 |
+
* If `kind` is ``'conditional'``, this is the conditional
|
| 188 |
+
maximum likelihood estimate for the odds ratio. It is
|
| 189 |
+
the noncentrality parameter of Fisher's noncentral
|
| 190 |
+
hypergeometric distribution with the same hypergeometric
|
| 191 |
+
parameters as `table` and whose mean is ``table[0, 0]``.
|
| 192 |
+
|
| 193 |
+
Methods
|
| 194 |
+
-------
|
| 195 |
+
confidence_interval :
|
| 196 |
+
Confidence interval for the odds ratio.
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
def __init__(self, _table, _kind, statistic):
|
| 200 |
+
# for now, no need to make _table and _kind public, since this sort of
|
| 201 |
+
# information is returned in very few `scipy.stats` results
|
| 202 |
+
self._table = _table
|
| 203 |
+
self._kind = _kind
|
| 204 |
+
self.statistic = statistic
|
| 205 |
+
|
| 206 |
+
def __repr__(self):
|
| 207 |
+
return f"OddsRatioResult(statistic={self.statistic})"
|
| 208 |
+
|
| 209 |
+
def confidence_interval(self, confidence_level=0.95,
|
| 210 |
+
alternative='two-sided'):
|
| 211 |
+
"""
|
| 212 |
+
Confidence interval for the odds ratio.
|
| 213 |
+
|
| 214 |
+
Parameters
|
| 215 |
+
----------
|
| 216 |
+
confidence_level: float
|
| 217 |
+
Desired confidence level for the confidence interval.
|
| 218 |
+
The value must be given as a fraction between 0 and 1.
|
| 219 |
+
Default is 0.95 (meaning 95%).
|
| 220 |
+
|
| 221 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
| 222 |
+
The alternative hypothesis of the hypothesis test to which the
|
| 223 |
+
confidence interval corresponds. That is, suppose the null
|
| 224 |
+
hypothesis is that the true odds ratio equals ``OR`` and the
|
| 225 |
+
confidence interval is ``(low, high)``. Then the following options
|
| 226 |
+
for `alternative` are available (default is 'two-sided'):
|
| 227 |
+
|
| 228 |
+
* 'two-sided': the true odds ratio is not equal to ``OR``. There
|
| 229 |
+
is evidence against the null hypothesis at the chosen
|
| 230 |
+
`confidence_level` if ``high < OR`` or ``low > OR``.
|
| 231 |
+
* 'less': the true odds ratio is less than ``OR``. The ``low`` end
|
| 232 |
+
of the confidence interval is 0, and there is evidence against
|
| 233 |
+
the null hypothesis at the chosen `confidence_level` if
|
| 234 |
+
``high < OR``.
|
| 235 |
+
* 'greater': the true odds ratio is greater than ``OR``. The
|
| 236 |
+
``high`` end of the confidence interval is ``np.inf``, and there
|
| 237 |
+
is evidence against the null hypothesis at the chosen
|
| 238 |
+
`confidence_level` if ``low > OR``.
|
| 239 |
+
|
| 240 |
+
Returns
|
| 241 |
+
-------
|
| 242 |
+
ci : ``ConfidenceInterval`` instance
|
| 243 |
+
The confidence interval, represented as an object with
|
| 244 |
+
attributes ``low`` and ``high``.
|
| 245 |
+
|
| 246 |
+
Notes
|
| 247 |
+
-----
|
| 248 |
+
When `kind` is ``'conditional'``, the limits of the confidence
|
| 249 |
+
interval are the conditional "exact confidence limits" as described
|
| 250 |
+
by Fisher [1]_. The conditional odds ratio and confidence interval are
|
| 251 |
+
also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_.
|
| 252 |
+
|
| 253 |
+
When `kind` is ``'sample'``, the confidence interval is computed
|
| 254 |
+
under the assumption that the logarithm of the odds ratio is normally
|
| 255 |
+
distributed with standard error given by::
|
| 256 |
+
|
| 257 |
+
se = sqrt(1/a + 1/b + 1/c + 1/d)
|
| 258 |
+
|
| 259 |
+
where ``a``, ``b``, ``c`` and ``d`` are the elements of the
|
| 260 |
+
contingency table. (See, for example, [2]_, section 3.1.3.2,
|
| 261 |
+
or [3]_, section 2.3.3).
|
| 262 |
+
|
| 263 |
+
References
|
| 264 |
+
----------
|
| 265 |
+
.. [1] R. A. Fisher (1935), The logic of inductive inference,
|
| 266 |
+
Journal of the Royal Statistical Society, Vol. 98, No. 1,
|
| 267 |
+
pp. 39-82.
|
| 268 |
+
.. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
|
| 269 |
+
Methods, Techniques, and Applications, CRC Press LLC, Boca
|
| 270 |
+
Raton, Florida.
|
| 271 |
+
.. [3] Alan Agresti, An Introduction to Categorical Data Analysis
|
| 272 |
+
(second edition), Wiley, Hoboken, NJ, USA (2007).
|
| 273 |
+
"""
|
| 274 |
+
if alternative not in ['two-sided', 'less', 'greater']:
|
| 275 |
+
raise ValueError("`alternative` must be 'two-sided', 'less' or "
|
| 276 |
+
"'greater'.")
|
| 277 |
+
|
| 278 |
+
if confidence_level < 0 or confidence_level > 1:
|
| 279 |
+
raise ValueError('confidence_level must be between 0 and 1')
|
| 280 |
+
|
| 281 |
+
if self._kind == 'conditional':
|
| 282 |
+
ci = self._conditional_odds_ratio_ci(confidence_level, alternative)
|
| 283 |
+
else:
|
| 284 |
+
ci = self._sample_odds_ratio_ci(confidence_level, alternative)
|
| 285 |
+
return ci
|
| 286 |
+
|
| 287 |
+
def _conditional_odds_ratio_ci(self, confidence_level=0.95,
|
| 288 |
+
alternative='two-sided'):
|
| 289 |
+
"""
|
| 290 |
+
Confidence interval for the conditional odds ratio.
|
| 291 |
+
"""
|
| 292 |
+
|
| 293 |
+
table = self._table
|
| 294 |
+
if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
|
| 295 |
+
# If both values in a row or column are zero, the p-value is 1,
|
| 296 |
+
# the odds ratio is NaN and the confidence interval is (0, inf).
|
| 297 |
+
ci = (0, np.inf)
|
| 298 |
+
else:
|
| 299 |
+
ci = _conditional_oddsratio_ci(table,
|
| 300 |
+
confidence_level=confidence_level,
|
| 301 |
+
alternative=alternative)
|
| 302 |
+
return ConfidenceInterval(low=ci[0], high=ci[1])
|
| 303 |
+
|
| 304 |
+
def _sample_odds_ratio_ci(self, confidence_level=0.95,
|
| 305 |
+
alternative='two-sided'):
|
| 306 |
+
"""
|
| 307 |
+
Confidence interval for the sample odds ratio.
|
| 308 |
+
"""
|
| 309 |
+
if confidence_level < 0 or confidence_level > 1:
|
| 310 |
+
raise ValueError('confidence_level must be between 0 and 1')
|
| 311 |
+
|
| 312 |
+
table = self._table
|
| 313 |
+
if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
|
| 314 |
+
# If both values in a row or column are zero, the p-value is 1,
|
| 315 |
+
# the odds ratio is NaN and the confidence interval is (0, inf).
|
| 316 |
+
ci = (0, np.inf)
|
| 317 |
+
else:
|
| 318 |
+
ci = _sample_odds_ratio_ci(table,
|
| 319 |
+
confidence_level=confidence_level,
|
| 320 |
+
alternative=alternative)
|
| 321 |
+
return ConfidenceInterval(low=ci[0], high=ci[1])
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def odds_ratio(table, *, kind='conditional'):
|
| 325 |
+
r"""
|
| 326 |
+
Compute the odds ratio for a 2x2 contingency table.
|
| 327 |
+
|
| 328 |
+
Parameters
|
| 329 |
+
----------
|
| 330 |
+
table : array_like of ints
|
| 331 |
+
A 2x2 contingency table. Elements must be non-negative integers.
|
| 332 |
+
kind : str, optional
|
| 333 |
+
Which kind of odds ratio to compute, either the sample
|
| 334 |
+
odds ratio (``kind='sample'``) or the conditional odds ratio
|
| 335 |
+
(``kind='conditional'``). Default is ``'conditional'``.
|
| 336 |
+
|
| 337 |
+
Returns
|
| 338 |
+
-------
|
| 339 |
+
result : `~scipy.stats._result_classes.OddsRatioResult` instance
|
| 340 |
+
The returned object has two computed attributes:
|
| 341 |
+
|
| 342 |
+
statistic : float
|
| 343 |
+
* If `kind` is ``'sample'``, this is sample (or unconditional)
|
| 344 |
+
estimate, given by
|
| 345 |
+
``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
|
| 346 |
+
* If `kind` is ``'conditional'``, this is the conditional
|
| 347 |
+
maximum likelihood estimate for the odds ratio. It is
|
| 348 |
+
the noncentrality parameter of Fisher's noncentral
|
| 349 |
+
hypergeometric distribution with the same hypergeometric
|
| 350 |
+
parameters as `table` and whose mean is ``table[0, 0]``.
|
| 351 |
+
|
| 352 |
+
The object has the method `confidence_interval` that computes
|
| 353 |
+
the confidence interval of the odds ratio.
|
| 354 |
+
|
| 355 |
+
See Also
|
| 356 |
+
--------
|
| 357 |
+
scipy.stats.fisher_exact
|
| 358 |
+
relative_risk
|
| 359 |
+
|
| 360 |
+
Notes
|
| 361 |
+
-----
|
| 362 |
+
The conditional odds ratio was discussed by Fisher (see "Example 1"
|
| 363 |
+
of [1]_). Texts that cover the odds ratio include [2]_ and [3]_.
|
| 364 |
+
|
| 365 |
+
.. versionadded:: 1.10.0
|
| 366 |
+
|
| 367 |
+
References
|
| 368 |
+
----------
|
| 369 |
+
.. [1] R. A. Fisher (1935), The logic of inductive inference,
|
| 370 |
+
Journal of the Royal Statistical Society, Vol. 98, No. 1,
|
| 371 |
+
pp. 39-82.
|
| 372 |
+
.. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research.
|
| 373 |
+
Volume I - The analysis of case-control studies. IARC Sci Publ.
|
| 374 |
+
(32):5-338. PMID: 7216345. (See section 4.2.)
|
| 375 |
+
.. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
|
| 376 |
+
Methods, Techniques, and Applications, CRC Press LLC, Boca
|
| 377 |
+
Raton, Florida.
|
| 378 |
+
.. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of
|
| 379 |
+
Cardiovascular Events in Women and Men: A Sex-Specific
|
| 380 |
+
Meta-analysis of Randomized Controlled Trials."
|
| 381 |
+
JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006.
|
| 382 |
+
|
| 383 |
+
Examples
|
| 384 |
+
--------
|
| 385 |
+
In epidemiology, individuals are classified as "exposed" or
|
| 386 |
+
"unexposed" to some factor or treatment. If the occurrence of some
|
| 387 |
+
illness is under study, those who have the illness are often
|
| 388 |
+
classified as "cases", and those without it are "noncases". The
|
| 389 |
+
counts of the occurrences of these classes gives a contingency
|
| 390 |
+
table::
|
| 391 |
+
|
| 392 |
+
exposed unexposed
|
| 393 |
+
cases a b
|
| 394 |
+
noncases c d
|
| 395 |
+
|
| 396 |
+
The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can
|
| 397 |
+
be interpreted as the odds of a case occurring in the exposed group,
|
| 398 |
+
and ``b/d`` as the odds of a case occurring in the unexposed group.
|
| 399 |
+
The sample odds ratio is the ratio of these odds. If the odds ratio
|
| 400 |
+
is greater than 1, it suggests that there is a positive association
|
| 401 |
+
between being exposed and being a case.
|
| 402 |
+
|
| 403 |
+
Interchanging the rows or columns of the contingency table inverts
|
| 404 |
+
the odds ratio, so it is important to understand the meaning of labels
|
| 405 |
+
given to the rows and columns of the table when interpreting the
|
| 406 |
+
odds ratio.
|
| 407 |
+
|
| 408 |
+
In [4]_, the use of aspirin to prevent cardiovascular events in women
|
| 409 |
+
and men was investigated. The study notably concluded:
|
| 410 |
+
|
| 411 |
+
...aspirin therapy reduced the risk of a composite of
|
| 412 |
+
cardiovascular events due to its effect on reducing the risk of
|
| 413 |
+
ischemic stroke in women [...]
|
| 414 |
+
|
| 415 |
+
The article lists studies of various cardiovascular events. Let's
|
| 416 |
+
focus on the ischemic stoke in women.
|
| 417 |
+
|
| 418 |
+
The following table summarizes the results of the experiment in which
|
| 419 |
+
participants took aspirin or a placebo on a regular basis for several
|
| 420 |
+
years. Cases of ischemic stroke were recorded::
|
| 421 |
+
|
| 422 |
+
Aspirin Control/Placebo
|
| 423 |
+
Ischemic stroke 176 230
|
| 424 |
+
No stroke 21035 21018
|
| 425 |
+
|
| 426 |
+
The question we ask is "Is there evidence that the aspirin reduces the
|
| 427 |
+
risk of ischemic stroke?"
|
| 428 |
+
|
| 429 |
+
Compute the odds ratio:
|
| 430 |
+
|
| 431 |
+
>>> from scipy.stats.contingency import odds_ratio
|
| 432 |
+
>>> res = odds_ratio([[176, 230], [21035, 21018]])
|
| 433 |
+
>>> res.statistic
|
| 434 |
+
0.7646037659999126
|
| 435 |
+
|
| 436 |
+
For this sample, the odds of getting an ischemic stroke for those who have
|
| 437 |
+
been taking aspirin are 0.76 times that of those
|
| 438 |
+
who have received the placebo.
|
| 439 |
+
|
| 440 |
+
To make statistical inferences about the population under study,
|
| 441 |
+
we can compute the 95% confidence interval for the odds ratio:
|
| 442 |
+
|
| 443 |
+
>>> res.confidence_interval(confidence_level=0.95)
|
| 444 |
+
ConfidenceInterval(low=0.6241234078749812, high=0.9354102892100372)
|
| 445 |
+
|
| 446 |
+
The 95% confidence interval for the conditional odds ratio is
|
| 447 |
+
approximately (0.62, 0.94).
|
| 448 |
+
|
| 449 |
+
The fact that the entire 95% confidence interval falls below 1 supports
|
| 450 |
+
the authors' conclusion that the aspirin was associated with a
|
| 451 |
+
statistically significant reduction in ischemic stroke.
|
| 452 |
+
"""
|
| 453 |
+
if kind not in ['conditional', 'sample']:
|
| 454 |
+
raise ValueError("`kind` must be 'conditional' or 'sample'.")
|
| 455 |
+
|
| 456 |
+
c = np.asarray(table)
|
| 457 |
+
|
| 458 |
+
if c.shape != (2, 2):
|
| 459 |
+
raise ValueError(f"Invalid shape {c.shape}. The input `table` must be "
|
| 460 |
+
"of shape (2, 2).")
|
| 461 |
+
|
| 462 |
+
if not np.issubdtype(c.dtype, np.integer):
|
| 463 |
+
raise ValueError("`table` must be an array of integers, but got "
|
| 464 |
+
f"type {c.dtype}")
|
| 465 |
+
c = c.astype(np.int64)
|
| 466 |
+
|
| 467 |
+
if np.any(c < 0):
|
| 468 |
+
raise ValueError("All values in `table` must be nonnegative.")
|
| 469 |
+
|
| 470 |
+
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
|
| 471 |
+
# If both values in a row or column are zero, the p-value is NaN and
|
| 472 |
+
# the odds ratio is NaN.
|
| 473 |
+
result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan)
|
| 474 |
+
return result
|
| 475 |
+
|
| 476 |
+
if kind == 'sample':
|
| 477 |
+
oddsratio = _sample_odds_ratio(c)
|
| 478 |
+
else: # kind is 'conditional'
|
| 479 |
+
oddsratio = _conditional_oddsratio(c)
|
| 480 |
+
|
| 481 |
+
result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio)
|
| 482 |
+
return result
|