Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py +795 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_censored_data.py +459 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_constants.py +39 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_covariance.py +633 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_crosstab.py +204 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py +0 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_mgc.py +550 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py +482 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_relative_risk.py +263 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_resampling.py +0 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_sampling.py +1314 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/_sensitivity_analysis.py +712 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/contingency.py +468 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/distributions.py +24 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/mstats.py +140 -0
- llava_next/lib/python3.10/site-packages/scipy/stats/mstats_extras.py +25 -0
- parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/RECORD +99 -0
- parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/REQUESTED +0 -0
- parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/top_level.txt +1 -0
- parrot/lib/python3.10/site-packages/httpcore/__init__.py +139 -0
- parrot/lib/python3.10/site-packages/httpcore/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/__pycache__/_api.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/__pycache__/_models.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/__pycache__/_ssl.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/__pycache__/_synchronization.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/__pycache__/_trace.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/__pycache__/_utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_api.py +92 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/__init__.py +39 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http11.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http2.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/interfaces.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/connection.py +215 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/connection_pool.py +356 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/http11.py +331 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/http2.py +589 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/http_proxy.py +350 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/interfaces.py +135 -0
- parrot/lib/python3.10/site-packages/httpcore/_async/socks_proxy.py +340 -0
- parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/auto.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/mock.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/sync.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/trio.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/httpcore/_backends/sync.py +133 -0
.gitattributes
CHANGED
|
@@ -339,3 +339,4 @@ parrot/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.
|
|
| 339 |
parrot/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 340 |
parrot/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 341 |
parrot/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 339 |
parrot/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 340 |
parrot/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 341 |
parrot/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 342 |
+
parrot/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py
ADDED
|
@@ -0,0 +1,795 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
from warnings import catch_warnings, simplefilter
|
| 3 |
+
import numpy as np
|
| 4 |
+
from operator import index
|
| 5 |
+
from collections import namedtuple
|
| 6 |
+
|
| 7 |
+
__all__ = ['binned_statistic',
|
| 8 |
+
'binned_statistic_2d',
|
| 9 |
+
'binned_statistic_dd']
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
|
| 13 |
+
('statistic', 'bin_edges', 'binnumber'))
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def binned_statistic(x, values, statistic='mean',
|
| 17 |
+
bins=10, range=None):
|
| 18 |
+
"""
|
| 19 |
+
Compute a binned statistic for one or more sets of data.
|
| 20 |
+
|
| 21 |
+
This is a generalization of a histogram function. A histogram divides
|
| 22 |
+
the space into bins, and returns the count of the number of points in
|
| 23 |
+
each bin. This function allows the computation of the sum, mean, median,
|
| 24 |
+
or other statistic of the values (or set of values) within each bin.
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
x : (N,) array_like
|
| 29 |
+
A sequence of values to be binned.
|
| 30 |
+
values : (N,) array_like or list of (N,) array_like
|
| 31 |
+
The data on which the statistic will be computed. This must be
|
| 32 |
+
the same shape as `x`, or a set of sequences - each the same shape as
|
| 33 |
+
`x`. If `values` is a set of sequences, the statistic will be computed
|
| 34 |
+
on each independently.
|
| 35 |
+
statistic : string or callable, optional
|
| 36 |
+
The statistic to compute (default is 'mean').
|
| 37 |
+
The following statistics are available:
|
| 38 |
+
|
| 39 |
+
* 'mean' : compute the mean of values for points within each bin.
|
| 40 |
+
Empty bins will be represented by NaN.
|
| 41 |
+
* 'std' : compute the standard deviation within each bin. This
|
| 42 |
+
is implicitly calculated with ddof=0.
|
| 43 |
+
* 'median' : compute the median of values for points within each
|
| 44 |
+
bin. Empty bins will be represented by NaN.
|
| 45 |
+
* 'count' : compute the count of points within each bin. This is
|
| 46 |
+
identical to an unweighted histogram. `values` array is not
|
| 47 |
+
referenced.
|
| 48 |
+
* 'sum' : compute the sum of values for points within each bin.
|
| 49 |
+
This is identical to a weighted histogram.
|
| 50 |
+
* 'min' : compute the minimum of values for points within each bin.
|
| 51 |
+
Empty bins will be represented by NaN.
|
| 52 |
+
* 'max' : compute the maximum of values for point within each bin.
|
| 53 |
+
Empty bins will be represented by NaN.
|
| 54 |
+
* function : a user-defined function which takes a 1D array of
|
| 55 |
+
values, and outputs a single numerical statistic. This function
|
| 56 |
+
will be called on the values in each bin. Empty bins will be
|
| 57 |
+
represented by function([]), or NaN if this returns an error.
|
| 58 |
+
|
| 59 |
+
bins : int or sequence of scalars, optional
|
| 60 |
+
If `bins` is an int, it defines the number of equal-width bins in the
|
| 61 |
+
given range (10 by default). If `bins` is a sequence, it defines the
|
| 62 |
+
bin edges, including the rightmost edge, allowing for non-uniform bin
|
| 63 |
+
widths. Values in `x` that are smaller than lowest bin edge are
|
| 64 |
+
assigned to bin number 0, values beyond the highest bin are assigned to
|
| 65 |
+
``bins[-1]``. If the bin edges are specified, the number of bins will
|
| 66 |
+
be, (nx = len(bins)-1).
|
| 67 |
+
range : (float, float) or [(float, float)], optional
|
| 68 |
+
The lower and upper range of the bins. If not provided, range
|
| 69 |
+
is simply ``(x.min(), x.max())``. Values outside the range are
|
| 70 |
+
ignored.
|
| 71 |
+
|
| 72 |
+
Returns
|
| 73 |
+
-------
|
| 74 |
+
statistic : array
|
| 75 |
+
The values of the selected statistic in each bin.
|
| 76 |
+
bin_edges : array of dtype float
|
| 77 |
+
Return the bin edges ``(length(statistic)+1)``.
|
| 78 |
+
binnumber: 1-D ndarray of ints
|
| 79 |
+
Indices of the bins (corresponding to `bin_edges`) in which each value
|
| 80 |
+
of `x` belongs. Same length as `values`. A binnumber of `i` means the
|
| 81 |
+
corresponding value is between (bin_edges[i-1], bin_edges[i]).
|
| 82 |
+
|
| 83 |
+
See Also
|
| 84 |
+
--------
|
| 85 |
+
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
|
| 86 |
+
|
| 87 |
+
Notes
|
| 88 |
+
-----
|
| 89 |
+
All but the last (righthand-most) bin is half-open. In other words, if
|
| 90 |
+
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
|
| 91 |
+
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
|
| 92 |
+
``[3, 4]``, which *includes* 4.
|
| 93 |
+
|
| 94 |
+
.. versionadded:: 0.11.0
|
| 95 |
+
|
| 96 |
+
Examples
|
| 97 |
+
--------
|
| 98 |
+
>>> import numpy as np
|
| 99 |
+
>>> from scipy import stats
|
| 100 |
+
>>> import matplotlib.pyplot as plt
|
| 101 |
+
|
| 102 |
+
First some basic examples:
|
| 103 |
+
|
| 104 |
+
Create two evenly spaced bins in the range of the given sample, and sum the
|
| 105 |
+
corresponding values in each of those bins:
|
| 106 |
+
|
| 107 |
+
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
|
| 108 |
+
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
|
| 109 |
+
BinnedStatisticResult(statistic=array([4. , 4.5]),
|
| 110 |
+
bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))
|
| 111 |
+
|
| 112 |
+
Multiple arrays of values can also be passed. The statistic is calculated
|
| 113 |
+
on each set independently:
|
| 114 |
+
|
| 115 |
+
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
|
| 116 |
+
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
|
| 117 |
+
BinnedStatisticResult(statistic=array([[4. , 4.5],
|
| 118 |
+
[8. , 9. ]]), bin_edges=array([1., 4., 7.]),
|
| 119 |
+
binnumber=array([1, 1, 1, 2, 2]))
|
| 120 |
+
|
| 121 |
+
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
|
| 122 |
+
... bins=3)
|
| 123 |
+
BinnedStatisticResult(statistic=array([1., 2., 4.]),
|
| 124 |
+
bin_edges=array([1., 2., 3., 4.]),
|
| 125 |
+
binnumber=array([1, 2, 1, 2, 3]))
|
| 126 |
+
|
| 127 |
+
As a second example, we now generate some random data of sailing boat speed
|
| 128 |
+
as a function of wind speed, and then determine how fast our boat is for
|
| 129 |
+
certain wind speeds:
|
| 130 |
+
|
| 131 |
+
>>> rng = np.random.default_rng()
|
| 132 |
+
>>> windspeed = 8 * rng.random(500)
|
| 133 |
+
>>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500)
|
| 134 |
+
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
|
| 135 |
+
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
|
| 136 |
+
>>> plt.figure()
|
| 137 |
+
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
|
| 138 |
+
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
|
| 139 |
+
... label='binned statistic of data')
|
| 140 |
+
>>> plt.legend()
|
| 141 |
+
|
| 142 |
+
Now we can use ``binnumber`` to select all datapoints with a windspeed
|
| 143 |
+
below 1:
|
| 144 |
+
|
| 145 |
+
>>> low_boatspeed = boatspeed[binnumber == 0]
|
| 146 |
+
|
| 147 |
+
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
|
| 148 |
+
plot of a distribution that shows the mean and distribution around that
|
| 149 |
+
mean per bin, on top of a regular histogram and the probability
|
| 150 |
+
distribution function:
|
| 151 |
+
|
| 152 |
+
>>> x = np.linspace(0, 5, num=500)
|
| 153 |
+
>>> x_pdf = stats.maxwell.pdf(x)
|
| 154 |
+
>>> samples = stats.maxwell.rvs(size=10000)
|
| 155 |
+
|
| 156 |
+
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
|
| 157 |
+
... statistic='mean', bins=25)
|
| 158 |
+
>>> bin_width = (bin_edges[1] - bin_edges[0])
|
| 159 |
+
>>> bin_centers = bin_edges[1:] - bin_width/2
|
| 160 |
+
|
| 161 |
+
>>> plt.figure()
|
| 162 |
+
>>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
|
| 163 |
+
... alpha=0.2, label='histogram of data')
|
| 164 |
+
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
|
| 165 |
+
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
|
| 166 |
+
... label='binned statistic of data')
|
| 167 |
+
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
|
| 168 |
+
>>> plt.legend(fontsize=10)
|
| 169 |
+
>>> plt.show()
|
| 170 |
+
|
| 171 |
+
"""
|
| 172 |
+
try:
|
| 173 |
+
N = len(bins)
|
| 174 |
+
except TypeError:
|
| 175 |
+
N = 1
|
| 176 |
+
|
| 177 |
+
if N != 1:
|
| 178 |
+
bins = [np.asarray(bins, float)]
|
| 179 |
+
|
| 180 |
+
if range is not None:
|
| 181 |
+
if len(range) == 2:
|
| 182 |
+
range = [range]
|
| 183 |
+
|
| 184 |
+
medians, edges, binnumbers = binned_statistic_dd(
|
| 185 |
+
[x], values, statistic, bins, range)
|
| 186 |
+
|
| 187 |
+
return BinnedStatisticResult(medians, edges[0], binnumbers)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
|
| 191 |
+
('statistic', 'x_edge', 'y_edge',
|
| 192 |
+
'binnumber'))
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def binned_statistic_2d(x, y, values, statistic='mean',
|
| 196 |
+
bins=10, range=None, expand_binnumbers=False):
|
| 197 |
+
"""
|
| 198 |
+
Compute a bidimensional binned statistic for one or more sets of data.
|
| 199 |
+
|
| 200 |
+
This is a generalization of a histogram2d function. A histogram divides
|
| 201 |
+
the space into bins, and returns the count of the number of points in
|
| 202 |
+
each bin. This function allows the computation of the sum, mean, median,
|
| 203 |
+
or other statistic of the values (or set of values) within each bin.
|
| 204 |
+
|
| 205 |
+
Parameters
|
| 206 |
+
----------
|
| 207 |
+
x : (N,) array_like
|
| 208 |
+
A sequence of values to be binned along the first dimension.
|
| 209 |
+
y : (N,) array_like
|
| 210 |
+
A sequence of values to be binned along the second dimension.
|
| 211 |
+
values : (N,) array_like or list of (N,) array_like
|
| 212 |
+
The data on which the statistic will be computed. This must be
|
| 213 |
+
the same shape as `x`, or a list of sequences - each with the same
|
| 214 |
+
shape as `x`. If `values` is such a list, the statistic will be
|
| 215 |
+
computed on each independently.
|
| 216 |
+
statistic : string or callable, optional
|
| 217 |
+
The statistic to compute (default is 'mean').
|
| 218 |
+
The following statistics are available:
|
| 219 |
+
|
| 220 |
+
* 'mean' : compute the mean of values for points within each bin.
|
| 221 |
+
Empty bins will be represented by NaN.
|
| 222 |
+
* 'std' : compute the standard deviation within each bin. This
|
| 223 |
+
is implicitly calculated with ddof=0.
|
| 224 |
+
* 'median' : compute the median of values for points within each
|
| 225 |
+
bin. Empty bins will be represented by NaN.
|
| 226 |
+
* 'count' : compute the count of points within each bin. This is
|
| 227 |
+
identical to an unweighted histogram. `values` array is not
|
| 228 |
+
referenced.
|
| 229 |
+
* 'sum' : compute the sum of values for points within each bin.
|
| 230 |
+
This is identical to a weighted histogram.
|
| 231 |
+
* 'min' : compute the minimum of values for points within each bin.
|
| 232 |
+
Empty bins will be represented by NaN.
|
| 233 |
+
* 'max' : compute the maximum of values for point within each bin.
|
| 234 |
+
Empty bins will be represented by NaN.
|
| 235 |
+
* function : a user-defined function which takes a 1D array of
|
| 236 |
+
values, and outputs a single numerical statistic. This function
|
| 237 |
+
will be called on the values in each bin. Empty bins will be
|
| 238 |
+
represented by function([]), or NaN if this returns an error.
|
| 239 |
+
|
| 240 |
+
bins : int or [int, int] or array_like or [array, array], optional
|
| 241 |
+
The bin specification:
|
| 242 |
+
|
| 243 |
+
* the number of bins for the two dimensions (nx = ny = bins),
|
| 244 |
+
* the number of bins in each dimension (nx, ny = bins),
|
| 245 |
+
* the bin edges for the two dimensions (x_edge = y_edge = bins),
|
| 246 |
+
* the bin edges in each dimension (x_edge, y_edge = bins).
|
| 247 |
+
|
| 248 |
+
If the bin edges are specified, the number of bins will be,
|
| 249 |
+
(nx = len(x_edge)-1, ny = len(y_edge)-1).
|
| 250 |
+
|
| 251 |
+
range : (2,2) array_like, optional
|
| 252 |
+
The leftmost and rightmost edges of the bins along each dimension
|
| 253 |
+
(if not specified explicitly in the `bins` parameters):
|
| 254 |
+
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
|
| 255 |
+
considered outliers and not tallied in the histogram.
|
| 256 |
+
expand_binnumbers : bool, optional
|
| 257 |
+
'False' (default): the returned `binnumber` is a shape (N,) array of
|
| 258 |
+
linearized bin indices.
|
| 259 |
+
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
|
| 260 |
+
ndarray, where each row gives the bin numbers in the corresponding
|
| 261 |
+
dimension.
|
| 262 |
+
See the `binnumber` returned value, and the `Examples` section.
|
| 263 |
+
|
| 264 |
+
.. versionadded:: 0.17.0
|
| 265 |
+
|
| 266 |
+
Returns
|
| 267 |
+
-------
|
| 268 |
+
statistic : (nx, ny) ndarray
|
| 269 |
+
The values of the selected statistic in each two-dimensional bin.
|
| 270 |
+
x_edge : (nx + 1) ndarray
|
| 271 |
+
The bin edges along the first dimension.
|
| 272 |
+
y_edge : (ny + 1) ndarray
|
| 273 |
+
The bin edges along the second dimension.
|
| 274 |
+
binnumber : (N,) array of ints or (2,N) ndarray of ints
|
| 275 |
+
This assigns to each element of `sample` an integer that represents the
|
| 276 |
+
bin in which this observation falls. The representation depends on the
|
| 277 |
+
`expand_binnumbers` argument. See `Notes` for details.
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
See Also
|
| 281 |
+
--------
|
| 282 |
+
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
|
| 283 |
+
|
| 284 |
+
Notes
|
| 285 |
+
-----
|
| 286 |
+
Binedges:
|
| 287 |
+
All but the last (righthand-most) bin is half-open. In other words, if
|
| 288 |
+
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
|
| 289 |
+
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
|
| 290 |
+
``[3, 4]``, which *includes* 4.
|
| 291 |
+
|
| 292 |
+
`binnumber`:
|
| 293 |
+
This returned argument assigns to each element of `sample` an integer that
|
| 294 |
+
represents the bin in which it belongs. The representation depends on the
|
| 295 |
+
`expand_binnumbers` argument. If 'False' (default): The returned
|
| 296 |
+
`binnumber` is a shape (N,) array of linearized indices mapping each
|
| 297 |
+
element of `sample` to its corresponding bin (using row-major ordering).
|
| 298 |
+
Note that the returned linearized bin indices are used for an array with
|
| 299 |
+
extra bins on the outer binedges to capture values outside of the defined
|
| 300 |
+
bin bounds.
|
| 301 |
+
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
|
| 302 |
+
each row indicates bin placements for each dimension respectively. In each
|
| 303 |
+
dimension, a binnumber of `i` means the corresponding value is between
|
| 304 |
+
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
|
| 305 |
+
|
| 306 |
+
.. versionadded:: 0.11.0
|
| 307 |
+
|
| 308 |
+
Examples
|
| 309 |
+
--------
|
| 310 |
+
>>> from scipy import stats
|
| 311 |
+
|
| 312 |
+
Calculate the counts with explicit bin-edges:
|
| 313 |
+
|
| 314 |
+
>>> x = [0.1, 0.1, 0.1, 0.6]
|
| 315 |
+
>>> y = [2.1, 2.6, 2.1, 2.1]
|
| 316 |
+
>>> binx = [0.0, 0.5, 1.0]
|
| 317 |
+
>>> biny = [2.0, 2.5, 3.0]
|
| 318 |
+
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny])
|
| 319 |
+
>>> ret.statistic
|
| 320 |
+
array([[2., 1.],
|
| 321 |
+
[1., 0.]])
|
| 322 |
+
|
| 323 |
+
The bin in which each sample is placed is given by the `binnumber`
|
| 324 |
+
returned parameter. By default, these are the linearized bin indices:
|
| 325 |
+
|
| 326 |
+
>>> ret.binnumber
|
| 327 |
+
array([5, 6, 5, 9])
|
| 328 |
+
|
| 329 |
+
The bin indices can also be expanded into separate entries for each
|
| 330 |
+
dimension using the `expand_binnumbers` parameter:
|
| 331 |
+
|
| 332 |
+
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny],
|
| 333 |
+
... expand_binnumbers=True)
|
| 334 |
+
>>> ret.binnumber
|
| 335 |
+
array([[1, 1, 1, 2],
|
| 336 |
+
[1, 2, 1, 1]])
|
| 337 |
+
|
| 338 |
+
Which shows that the first three elements belong in the xbin 1, and the
|
| 339 |
+
fourth into xbin 2; and so on for y.
|
| 340 |
+
|
| 341 |
+
"""
|
| 342 |
+
|
| 343 |
+
# This code is based on np.histogram2d
|
| 344 |
+
try:
|
| 345 |
+
N = len(bins)
|
| 346 |
+
except TypeError:
|
| 347 |
+
N = 1
|
| 348 |
+
|
| 349 |
+
if N != 1 and N != 2:
|
| 350 |
+
xedges = yedges = np.asarray(bins, float)
|
| 351 |
+
bins = [xedges, yedges]
|
| 352 |
+
|
| 353 |
+
medians, edges, binnumbers = binned_statistic_dd(
|
| 354 |
+
[x, y], values, statistic, bins, range,
|
| 355 |
+
expand_binnumbers=expand_binnumbers)
|
| 356 |
+
|
| 357 |
+
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
|
| 361 |
+
('statistic', 'bin_edges',
|
| 362 |
+
'binnumber'))
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def _bincount(x, weights):
|
| 366 |
+
if np.iscomplexobj(weights):
|
| 367 |
+
a = np.bincount(x, np.real(weights))
|
| 368 |
+
b = np.bincount(x, np.imag(weights))
|
| 369 |
+
z = a + b*1j
|
| 370 |
+
|
| 371 |
+
else:
|
| 372 |
+
z = np.bincount(x, weights)
|
| 373 |
+
return z
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def binned_statistic_dd(sample, values, statistic='mean',
|
| 377 |
+
bins=10, range=None, expand_binnumbers=False,
|
| 378 |
+
binned_statistic_result=None):
|
| 379 |
+
"""
|
| 380 |
+
Compute a multidimensional binned statistic for a set of data.
|
| 381 |
+
|
| 382 |
+
This is a generalization of a histogramdd function. A histogram divides
|
| 383 |
+
the space into bins, and returns the count of the number of points in
|
| 384 |
+
each bin. This function allows the computation of the sum, mean, median,
|
| 385 |
+
or other statistic of the values within each bin.
|
| 386 |
+
|
| 387 |
+
Parameters
|
| 388 |
+
----------
|
| 389 |
+
sample : array_like
|
| 390 |
+
Data to histogram passed as a sequence of N arrays of length D, or
|
| 391 |
+
as an (N,D) array.
|
| 392 |
+
values : (N,) array_like or list of (N,) array_like
|
| 393 |
+
The data on which the statistic will be computed. This must be
|
| 394 |
+
the same shape as `sample`, or a list of sequences - each with the
|
| 395 |
+
same shape as `sample`. If `values` is such a list, the statistic
|
| 396 |
+
will be computed on each independently.
|
| 397 |
+
statistic : string or callable, optional
|
| 398 |
+
The statistic to compute (default is 'mean').
|
| 399 |
+
The following statistics are available:
|
| 400 |
+
|
| 401 |
+
* 'mean' : compute the mean of values for points within each bin.
|
| 402 |
+
Empty bins will be represented by NaN.
|
| 403 |
+
* 'median' : compute the median of values for points within each
|
| 404 |
+
bin. Empty bins will be represented by NaN.
|
| 405 |
+
* 'count' : compute the count of points within each bin. This is
|
| 406 |
+
identical to an unweighted histogram. `values` array is not
|
| 407 |
+
referenced.
|
| 408 |
+
* 'sum' : compute the sum of values for points within each bin.
|
| 409 |
+
This is identical to a weighted histogram.
|
| 410 |
+
* 'std' : compute the standard deviation within each bin. This
|
| 411 |
+
is implicitly calculated with ddof=0. If the number of values
|
| 412 |
+
within a given bin is 0 or 1, the computed standard deviation value
|
| 413 |
+
will be 0 for the bin.
|
| 414 |
+
* 'min' : compute the minimum of values for points within each bin.
|
| 415 |
+
Empty bins will be represented by NaN.
|
| 416 |
+
* 'max' : compute the maximum of values for point within each bin.
|
| 417 |
+
Empty bins will be represented by NaN.
|
| 418 |
+
* function : a user-defined function which takes a 1D array of
|
| 419 |
+
values, and outputs a single numerical statistic. This function
|
| 420 |
+
will be called on the values in each bin. Empty bins will be
|
| 421 |
+
represented by function([]), or NaN if this returns an error.
|
| 422 |
+
|
| 423 |
+
bins : sequence or positive int, optional
|
| 424 |
+
The bin specification must be in one of the following forms:
|
| 425 |
+
|
| 426 |
+
* A sequence of arrays describing the bin edges along each dimension.
|
| 427 |
+
* The number of bins for each dimension (nx, ny, ... = bins).
|
| 428 |
+
* The number of bins for all dimensions (nx = ny = ... = bins).
|
| 429 |
+
range : sequence, optional
|
| 430 |
+
A sequence of lower and upper bin edges to be used if the edges are
|
| 431 |
+
not given explicitly in `bins`. Defaults to the minimum and maximum
|
| 432 |
+
values along each dimension.
|
| 433 |
+
expand_binnumbers : bool, optional
|
| 434 |
+
'False' (default): the returned `binnumber` is a shape (N,) array of
|
| 435 |
+
linearized bin indices.
|
| 436 |
+
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
|
| 437 |
+
ndarray, where each row gives the bin numbers in the corresponding
|
| 438 |
+
dimension.
|
| 439 |
+
See the `binnumber` returned value, and the `Examples` section of
|
| 440 |
+
`binned_statistic_2d`.
|
| 441 |
+
binned_statistic_result : binnedStatisticddResult
|
| 442 |
+
Result of a previous call to the function in order to reuse bin edges
|
| 443 |
+
and bin numbers with new values and/or a different statistic.
|
| 444 |
+
To reuse bin numbers, `expand_binnumbers` must have been set to False
|
| 445 |
+
(the default)
|
| 446 |
+
|
| 447 |
+
.. versionadded:: 0.17.0
|
| 448 |
+
|
| 449 |
+
Returns
|
| 450 |
+
-------
|
| 451 |
+
statistic : ndarray, shape(nx1, nx2, nx3,...)
|
| 452 |
+
The values of the selected statistic in each two-dimensional bin.
|
| 453 |
+
bin_edges : list of ndarrays
|
| 454 |
+
A list of D arrays describing the (nxi + 1) bin edges for each
|
| 455 |
+
dimension.
|
| 456 |
+
binnumber : (N,) array of ints or (D,N) ndarray of ints
|
| 457 |
+
This assigns to each element of `sample` an integer that represents the
|
| 458 |
+
bin in which this observation falls. The representation depends on the
|
| 459 |
+
`expand_binnumbers` argument. See `Notes` for details.
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
See Also
|
| 463 |
+
--------
|
| 464 |
+
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
|
| 465 |
+
|
| 466 |
+
Notes
|
| 467 |
+
-----
|
| 468 |
+
Binedges:
|
| 469 |
+
All but the last (righthand-most) bin is half-open in each dimension. In
|
| 470 |
+
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
|
| 471 |
+
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
|
| 472 |
+
last bin, however, is ``[3, 4]``, which *includes* 4.
|
| 473 |
+
|
| 474 |
+
`binnumber`:
|
| 475 |
+
This returned argument assigns to each element of `sample` an integer that
|
| 476 |
+
represents the bin in which it belongs. The representation depends on the
|
| 477 |
+
`expand_binnumbers` argument. If 'False' (default): The returned
|
| 478 |
+
`binnumber` is a shape (N,) array of linearized indices mapping each
|
| 479 |
+
element of `sample` to its corresponding bin (using row-major ordering).
|
| 480 |
+
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
|
| 481 |
+
each row indicates bin placements for each dimension respectively. In each
|
| 482 |
+
dimension, a binnumber of `i` means the corresponding value is between
|
| 483 |
+
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
|
| 484 |
+
|
| 485 |
+
.. versionadded:: 0.11.0
|
| 486 |
+
|
| 487 |
+
Examples
|
| 488 |
+
--------
|
| 489 |
+
>>> import numpy as np
|
| 490 |
+
>>> from scipy import stats
|
| 491 |
+
>>> import matplotlib.pyplot as plt
|
| 492 |
+
>>> from mpl_toolkits.mplot3d import Axes3D
|
| 493 |
+
|
| 494 |
+
Take an array of 600 (x, y) coordinates as an example.
|
| 495 |
+
`binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
|
| 496 |
+
of dimension `D+1` is required.
|
| 497 |
+
|
| 498 |
+
>>> mu = np.array([0., 1.])
|
| 499 |
+
>>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
|
| 500 |
+
>>> multinormal = stats.multivariate_normal(mu, sigma)
|
| 501 |
+
>>> data = multinormal.rvs(size=600, random_state=235412)
|
| 502 |
+
>>> data.shape
|
| 503 |
+
(600, 2)
|
| 504 |
+
|
| 505 |
+
Create bins and count how many arrays fall in each bin:
|
| 506 |
+
|
| 507 |
+
>>> N = 60
|
| 508 |
+
>>> x = np.linspace(-3, 3, N)
|
| 509 |
+
>>> y = np.linspace(-3, 4, N)
|
| 510 |
+
>>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
|
| 511 |
+
... statistic='count')
|
| 512 |
+
>>> bincounts = ret.statistic
|
| 513 |
+
|
| 514 |
+
Set the volume and the location of bars:
|
| 515 |
+
|
| 516 |
+
>>> dx = x[1] - x[0]
|
| 517 |
+
>>> dy = y[1] - y[0]
|
| 518 |
+
>>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
|
| 519 |
+
>>> z = 0
|
| 520 |
+
|
| 521 |
+
>>> bincounts = bincounts.ravel()
|
| 522 |
+
>>> x = x.ravel()
|
| 523 |
+
>>> y = y.ravel()
|
| 524 |
+
|
| 525 |
+
>>> fig = plt.figure()
|
| 526 |
+
>>> ax = fig.add_subplot(111, projection='3d')
|
| 527 |
+
>>> with np.errstate(divide='ignore'): # silence random axes3d warning
|
| 528 |
+
... ax.bar3d(x, y, z, dx, dy, bincounts)
|
| 529 |
+
|
| 530 |
+
Reuse bin numbers and bin edges with new values:
|
| 531 |
+
|
| 532 |
+
>>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),
|
| 533 |
+
... binned_statistic_result=ret,
|
| 534 |
+
... statistic='mean')
|
| 535 |
+
"""
|
| 536 |
+
known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max']
|
| 537 |
+
if not callable(statistic) and statistic not in known_stats:
|
| 538 |
+
raise ValueError(f'invalid statistic {statistic!r}')
|
| 539 |
+
|
| 540 |
+
try:
|
| 541 |
+
bins = index(bins)
|
| 542 |
+
except TypeError:
|
| 543 |
+
# bins is not an integer
|
| 544 |
+
pass
|
| 545 |
+
# If bins was an integer-like object, now it is an actual Python int.
|
| 546 |
+
|
| 547 |
+
# NOTE: for _bin_edges(), see e.g. gh-11365
|
| 548 |
+
if isinstance(bins, int) and not np.isfinite(sample).all():
|
| 549 |
+
raise ValueError(f'{sample!r} contains non-finite values.')
|
| 550 |
+
|
| 551 |
+
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
|
| 552 |
+
# `Dlen` is the length of elements along each dimension.
|
| 553 |
+
# This code is based on np.histogramdd
|
| 554 |
+
try:
|
| 555 |
+
# `sample` is an ND-array.
|
| 556 |
+
Dlen, Ndim = sample.shape
|
| 557 |
+
except (AttributeError, ValueError):
|
| 558 |
+
# `sample` is a sequence of 1D arrays.
|
| 559 |
+
sample = np.atleast_2d(sample).T
|
| 560 |
+
Dlen, Ndim = sample.shape
|
| 561 |
+
|
| 562 |
+
# Store initial shape of `values` to preserve it in the output
|
| 563 |
+
values = np.asarray(values)
|
| 564 |
+
input_shape = list(values.shape)
|
| 565 |
+
# Make sure that `values` is 2D to iterate over rows
|
| 566 |
+
values = np.atleast_2d(values)
|
| 567 |
+
Vdim, Vlen = values.shape
|
| 568 |
+
|
| 569 |
+
# Make sure `values` match `sample`
|
| 570 |
+
if statistic != 'count' and Vlen != Dlen:
|
| 571 |
+
raise AttributeError('The number of `values` elements must match the '
|
| 572 |
+
'length of each `sample` dimension.')
|
| 573 |
+
|
| 574 |
+
try:
|
| 575 |
+
M = len(bins)
|
| 576 |
+
if M != Ndim:
|
| 577 |
+
raise AttributeError('The dimension of bins must be equal '
|
| 578 |
+
'to the dimension of the sample x.')
|
| 579 |
+
except TypeError:
|
| 580 |
+
bins = Ndim * [bins]
|
| 581 |
+
|
| 582 |
+
if binned_statistic_result is None:
|
| 583 |
+
nbin, edges, dedges = _bin_edges(sample, bins, range)
|
| 584 |
+
binnumbers = _bin_numbers(sample, nbin, edges, dedges)
|
| 585 |
+
else:
|
| 586 |
+
edges = binned_statistic_result.bin_edges
|
| 587 |
+
nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)])
|
| 588 |
+
# +1 for outlier bins
|
| 589 |
+
dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)]
|
| 590 |
+
binnumbers = binned_statistic_result.binnumber
|
| 591 |
+
|
| 592 |
+
# Avoid overflow with double precision. Complex `values` -> `complex128`.
|
| 593 |
+
result_type = np.result_type(values, np.float64)
|
| 594 |
+
result = np.empty([Vdim, nbin.prod()], dtype=result_type)
|
| 595 |
+
|
| 596 |
+
if statistic in {'mean', np.mean}:
|
| 597 |
+
result.fill(np.nan)
|
| 598 |
+
flatcount = _bincount(binnumbers, None)
|
| 599 |
+
a = flatcount.nonzero()
|
| 600 |
+
for vv in builtins.range(Vdim):
|
| 601 |
+
flatsum = _bincount(binnumbers, values[vv])
|
| 602 |
+
result[vv, a] = flatsum[a] / flatcount[a]
|
| 603 |
+
elif statistic in {'std', np.std}:
|
| 604 |
+
result.fill(np.nan)
|
| 605 |
+
flatcount = _bincount(binnumbers, None)
|
| 606 |
+
a = flatcount.nonzero()
|
| 607 |
+
for vv in builtins.range(Vdim):
|
| 608 |
+
flatsum = _bincount(binnumbers, values[vv])
|
| 609 |
+
delta = values[vv] - flatsum[binnumbers] / flatcount[binnumbers]
|
| 610 |
+
std = np.sqrt(
|
| 611 |
+
_bincount(binnumbers, delta*np.conj(delta))[a] / flatcount[a]
|
| 612 |
+
)
|
| 613 |
+
result[vv, a] = std
|
| 614 |
+
result = np.real(result)
|
| 615 |
+
elif statistic == 'count':
|
| 616 |
+
result = np.empty([Vdim, nbin.prod()], dtype=np.float64)
|
| 617 |
+
result.fill(0)
|
| 618 |
+
flatcount = _bincount(binnumbers, None)
|
| 619 |
+
a = np.arange(len(flatcount))
|
| 620 |
+
result[:, a] = flatcount[np.newaxis, :]
|
| 621 |
+
elif statistic in {'sum', np.sum}:
|
| 622 |
+
result.fill(0)
|
| 623 |
+
for vv in builtins.range(Vdim):
|
| 624 |
+
flatsum = _bincount(binnumbers, values[vv])
|
| 625 |
+
a = np.arange(len(flatsum))
|
| 626 |
+
result[vv, a] = flatsum
|
| 627 |
+
elif statistic in {'median', np.median}:
|
| 628 |
+
result.fill(np.nan)
|
| 629 |
+
for vv in builtins.range(Vdim):
|
| 630 |
+
i = np.lexsort((values[vv], binnumbers))
|
| 631 |
+
_, j, counts = np.unique(binnumbers[i],
|
| 632 |
+
return_index=True, return_counts=True)
|
| 633 |
+
mid = j + (counts - 1) / 2
|
| 634 |
+
mid_a = values[vv, i][np.floor(mid).astype(int)]
|
| 635 |
+
mid_b = values[vv, i][np.ceil(mid).astype(int)]
|
| 636 |
+
medians = (mid_a + mid_b) / 2
|
| 637 |
+
result[vv, binnumbers[i][j]] = medians
|
| 638 |
+
elif statistic in {'min', np.min}:
|
| 639 |
+
result.fill(np.nan)
|
| 640 |
+
for vv in builtins.range(Vdim):
|
| 641 |
+
i = np.argsort(values[vv])[::-1] # Reversed so the min is last
|
| 642 |
+
result[vv, binnumbers[i]] = values[vv, i]
|
| 643 |
+
elif statistic in {'max', np.max}:
|
| 644 |
+
result.fill(np.nan)
|
| 645 |
+
for vv in builtins.range(Vdim):
|
| 646 |
+
i = np.argsort(values[vv])
|
| 647 |
+
result[vv, binnumbers[i]] = values[vv, i]
|
| 648 |
+
elif callable(statistic):
|
| 649 |
+
with np.errstate(invalid='ignore'), catch_warnings():
|
| 650 |
+
simplefilter("ignore", RuntimeWarning)
|
| 651 |
+
try:
|
| 652 |
+
null = statistic([])
|
| 653 |
+
except Exception:
|
| 654 |
+
null = np.nan
|
| 655 |
+
if np.iscomplexobj(null):
|
| 656 |
+
result = result.astype(np.complex128)
|
| 657 |
+
result.fill(null)
|
| 658 |
+
try:
|
| 659 |
+
_calc_binned_statistic(
|
| 660 |
+
Vdim, binnumbers, result, values, statistic
|
| 661 |
+
)
|
| 662 |
+
except ValueError:
|
| 663 |
+
result = result.astype(np.complex128)
|
| 664 |
+
_calc_binned_statistic(
|
| 665 |
+
Vdim, binnumbers, result, values, statistic
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
# Shape into a proper matrix
|
| 669 |
+
result = result.reshape(np.append(Vdim, nbin))
|
| 670 |
+
|
| 671 |
+
# Remove outliers (indices 0 and -1 for each bin-dimension).
|
| 672 |
+
core = tuple([slice(None)] + Ndim * [slice(1, -1)])
|
| 673 |
+
result = result[core]
|
| 674 |
+
|
| 675 |
+
# Unravel binnumbers into an ndarray, each row the bins for each dimension
|
| 676 |
+
if expand_binnumbers and Ndim > 1:
|
| 677 |
+
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
|
| 678 |
+
|
| 679 |
+
if np.any(result.shape[1:] != nbin - 2):
|
| 680 |
+
raise RuntimeError('Internal Shape Error')
|
| 681 |
+
|
| 682 |
+
# Reshape to have output (`result`) match input (`values`) shape
|
| 683 |
+
result = result.reshape(input_shape[:-1] + list(nbin-2))
|
| 684 |
+
|
| 685 |
+
return BinnedStatisticddResult(result, edges, binnumbers)
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func):
|
| 689 |
+
unique_bin_numbers = np.unique(bin_numbers)
|
| 690 |
+
for vv in builtins.range(Vdim):
|
| 691 |
+
bin_map = _create_binned_data(bin_numbers, unique_bin_numbers,
|
| 692 |
+
values, vv)
|
| 693 |
+
for i in unique_bin_numbers:
|
| 694 |
+
stat = stat_func(np.array(bin_map[i]))
|
| 695 |
+
if np.iscomplexobj(stat) and not np.iscomplexobj(result):
|
| 696 |
+
raise ValueError("The statistic function returns complex ")
|
| 697 |
+
result[vv, i] = stat
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv):
|
| 701 |
+
""" Create hashmap of bin ids to values in bins
|
| 702 |
+
key: bin number
|
| 703 |
+
value: list of binned data
|
| 704 |
+
"""
|
| 705 |
+
bin_map = dict()
|
| 706 |
+
for i in unique_bin_numbers:
|
| 707 |
+
bin_map[i] = []
|
| 708 |
+
for i in builtins.range(len(bin_numbers)):
|
| 709 |
+
bin_map[bin_numbers[i]].append(values[vv, i])
|
| 710 |
+
return bin_map
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
def _bin_edges(sample, bins=None, range=None):
|
| 714 |
+
""" Create edge arrays
|
| 715 |
+
"""
|
| 716 |
+
Dlen, Ndim = sample.shape
|
| 717 |
+
|
| 718 |
+
nbin = np.empty(Ndim, int) # Number of bins in each dimension
|
| 719 |
+
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
|
| 720 |
+
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
|
| 721 |
+
|
| 722 |
+
# Select range for each dimension
|
| 723 |
+
# Used only if number of bins is given.
|
| 724 |
+
if range is None:
|
| 725 |
+
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
|
| 726 |
+
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
|
| 727 |
+
else:
|
| 728 |
+
if len(range) != Ndim:
|
| 729 |
+
raise ValueError(
|
| 730 |
+
f"range given for {len(range)} dimensions; {Ndim} required")
|
| 731 |
+
smin = np.empty(Ndim)
|
| 732 |
+
smax = np.empty(Ndim)
|
| 733 |
+
for i in builtins.range(Ndim):
|
| 734 |
+
if range[i][1] < range[i][0]:
|
| 735 |
+
raise ValueError(
|
| 736 |
+
"In {}range, start must be <= stop".format(
|
| 737 |
+
f"dimension {i + 1} of " if Ndim > 1 else ""))
|
| 738 |
+
smin[i], smax[i] = range[i]
|
| 739 |
+
|
| 740 |
+
# Make sure the bins have a finite width.
|
| 741 |
+
for i in builtins.range(len(smin)):
|
| 742 |
+
if smin[i] == smax[i]:
|
| 743 |
+
smin[i] = smin[i] - .5
|
| 744 |
+
smax[i] = smax[i] + .5
|
| 745 |
+
|
| 746 |
+
# Preserve sample floating point precision in bin edges
|
| 747 |
+
edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating)
|
| 748 |
+
else float)
|
| 749 |
+
|
| 750 |
+
# Create edge arrays
|
| 751 |
+
for i in builtins.range(Ndim):
|
| 752 |
+
if np.isscalar(bins[i]):
|
| 753 |
+
nbin[i] = bins[i] + 2 # +2 for outlier bins
|
| 754 |
+
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1,
|
| 755 |
+
dtype=edges_dtype)
|
| 756 |
+
else:
|
| 757 |
+
edges[i] = np.asarray(bins[i], edges_dtype)
|
| 758 |
+
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
|
| 759 |
+
dedges[i] = np.diff(edges[i])
|
| 760 |
+
|
| 761 |
+
nbin = np.asarray(nbin)
|
| 762 |
+
|
| 763 |
+
return nbin, edges, dedges
|
| 764 |
+
|
| 765 |
+
|
| 766 |
+
def _bin_numbers(sample, nbin, edges, dedges):
|
| 767 |
+
"""Compute the bin number each sample falls into, in each dimension
|
| 768 |
+
"""
|
| 769 |
+
Dlen, Ndim = sample.shape
|
| 770 |
+
|
| 771 |
+
sampBin = [
|
| 772 |
+
np.digitize(sample[:, i], edges[i])
|
| 773 |
+
for i in range(Ndim)
|
| 774 |
+
]
|
| 775 |
+
|
| 776 |
+
# Using `digitize`, values that fall on an edge are put in the right bin.
|
| 777 |
+
# For the rightmost bin, we want values equal to the right
|
| 778 |
+
# edge to be counted in the last bin, and not as an outlier.
|
| 779 |
+
for i in range(Ndim):
|
| 780 |
+
# Find the rounding precision
|
| 781 |
+
dedges_min = dedges[i].min()
|
| 782 |
+
if dedges_min == 0:
|
| 783 |
+
raise ValueError('The smallest edge difference is numerically 0.')
|
| 784 |
+
decimal = int(-np.log10(dedges_min)) + 6
|
| 785 |
+
# Find which points are on the rightmost edge.
|
| 786 |
+
on_edge = np.where((sample[:, i] >= edges[i][-1]) &
|
| 787 |
+
(np.around(sample[:, i], decimal) ==
|
| 788 |
+
np.around(edges[i][-1], decimal)))[0]
|
| 789 |
+
# Shift these points one bin to the left.
|
| 790 |
+
sampBin[i][on_edge] -= 1
|
| 791 |
+
|
| 792 |
+
# Compute the sample indices in the flattened statistic matrix.
|
| 793 |
+
binnumbers = np.ravel_multi_index(sampBin, nbin)
|
| 794 |
+
|
| 795 |
+
return binnumbers
|
llava_next/lib/python3.10/site-packages/scipy/stats/_censored_data.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def _validate_1d(a, name, allow_inf=False):
|
| 5 |
+
if np.ndim(a) != 1:
|
| 6 |
+
raise ValueError(f'`{name}` must be a one-dimensional sequence.')
|
| 7 |
+
if np.isnan(a).any():
|
| 8 |
+
raise ValueError(f'`{name}` must not contain nan.')
|
| 9 |
+
if not allow_inf and np.isinf(a).any():
|
| 10 |
+
raise ValueError(f'`{name}` must contain only finite values.')
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _validate_interval(interval):
|
| 14 |
+
interval = np.asarray(interval)
|
| 15 |
+
if interval.shape == (0,):
|
| 16 |
+
# The input was a sequence with length 0.
|
| 17 |
+
interval = interval.reshape((0, 2))
|
| 18 |
+
if interval.ndim != 2 or interval.shape[-1] != 2:
|
| 19 |
+
raise ValueError('`interval` must be a two-dimensional array with '
|
| 20 |
+
'shape (m, 2), where m is the number of '
|
| 21 |
+
'interval-censored values, but got shape '
|
| 22 |
+
f'{interval.shape}')
|
| 23 |
+
|
| 24 |
+
if np.isnan(interval).any():
|
| 25 |
+
raise ValueError('`interval` must not contain nan.')
|
| 26 |
+
if np.isinf(interval).all(axis=1).any():
|
| 27 |
+
raise ValueError('In each row in `interval`, both values must not'
|
| 28 |
+
' be infinite.')
|
| 29 |
+
if (interval[:, 0] > interval[:, 1]).any():
|
| 30 |
+
raise ValueError('In each row of `interval`, the left value must not'
|
| 31 |
+
' exceed the right value.')
|
| 32 |
+
|
| 33 |
+
uncensored_mask = interval[:, 0] == interval[:, 1]
|
| 34 |
+
left_mask = np.isinf(interval[:, 0])
|
| 35 |
+
right_mask = np.isinf(interval[:, 1])
|
| 36 |
+
interval_mask = np.isfinite(interval).all(axis=1) & ~uncensored_mask
|
| 37 |
+
|
| 38 |
+
uncensored2 = interval[uncensored_mask, 0]
|
| 39 |
+
left2 = interval[left_mask, 1]
|
| 40 |
+
right2 = interval[right_mask, 0]
|
| 41 |
+
interval2 = interval[interval_mask]
|
| 42 |
+
|
| 43 |
+
return uncensored2, left2, right2, interval2
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _validate_x_censored(x, censored):
|
| 47 |
+
x = np.asarray(x)
|
| 48 |
+
if x.ndim != 1:
|
| 49 |
+
raise ValueError('`x` must be one-dimensional.')
|
| 50 |
+
censored = np.asarray(censored)
|
| 51 |
+
if censored.ndim != 1:
|
| 52 |
+
raise ValueError('`censored` must be one-dimensional.')
|
| 53 |
+
if (~np.isfinite(x)).any():
|
| 54 |
+
raise ValueError('`x` must not contain nan or inf.')
|
| 55 |
+
if censored.size != x.size:
|
| 56 |
+
raise ValueError('`x` and `censored` must have the same length.')
|
| 57 |
+
return x, censored.astype(bool)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class CensoredData:
|
| 61 |
+
"""
|
| 62 |
+
Instances of this class represent censored data.
|
| 63 |
+
|
| 64 |
+
Instances may be passed to the ``fit`` method of continuous
|
| 65 |
+
univariate SciPy distributions for maximum likelihood estimation.
|
| 66 |
+
The *only* method of the univariate continuous distributions that
|
| 67 |
+
understands `CensoredData` is the ``fit`` method. An instance of
|
| 68 |
+
`CensoredData` can not be passed to methods such as ``pdf`` and
|
| 69 |
+
``cdf``.
|
| 70 |
+
|
| 71 |
+
An observation is said to be *censored* when the precise value is unknown,
|
| 72 |
+
but it has a known upper and/or lower bound. The conventional terminology
|
| 73 |
+
is:
|
| 74 |
+
|
| 75 |
+
* left-censored: an observation is below a certain value but it is
|
| 76 |
+
unknown by how much.
|
| 77 |
+
* right-censored: an observation is above a certain value but it is
|
| 78 |
+
unknown by how much.
|
| 79 |
+
* interval-censored: an observation lies somewhere on an interval between
|
| 80 |
+
two values.
|
| 81 |
+
|
| 82 |
+
Left-, right-, and interval-censored data can be represented by
|
| 83 |
+
`CensoredData`.
|
| 84 |
+
|
| 85 |
+
For convenience, the class methods ``left_censored`` and
|
| 86 |
+
``right_censored`` are provided to create a `CensoredData`
|
| 87 |
+
instance from a single one-dimensional array of measurements
|
| 88 |
+
and a corresponding boolean array to indicate which measurements
|
| 89 |
+
are censored. The class method ``interval_censored`` accepts two
|
| 90 |
+
one-dimensional arrays that hold the lower and upper bounds of the
|
| 91 |
+
intervals.
|
| 92 |
+
|
| 93 |
+
Parameters
|
| 94 |
+
----------
|
| 95 |
+
uncensored : array_like, 1D
|
| 96 |
+
Uncensored observations.
|
| 97 |
+
left : array_like, 1D
|
| 98 |
+
Left-censored observations.
|
| 99 |
+
right : array_like, 1D
|
| 100 |
+
Right-censored observations.
|
| 101 |
+
interval : array_like, 2D, with shape (m, 2)
|
| 102 |
+
Interval-censored observations. Each row ``interval[k, :]``
|
| 103 |
+
represents the interval for the kth interval-censored observation.
|
| 104 |
+
|
| 105 |
+
Notes
|
| 106 |
+
-----
|
| 107 |
+
In the input array `interval`, the lower bound of the interval may
|
| 108 |
+
be ``-inf``, and the upper bound may be ``inf``, but at least one must be
|
| 109 |
+
finite. When the lower bound is ``-inf``, the row represents a left-
|
| 110 |
+
censored observation, and when the upper bound is ``inf``, the row
|
| 111 |
+
represents a right-censored observation. If the length of an interval
|
| 112 |
+
is 0 (i.e. ``interval[k, 0] == interval[k, 1]``, the observation is
|
| 113 |
+
treated as uncensored. So one can represent all the types of censored
|
| 114 |
+
and uncensored data in ``interval``, but it is generally more convenient
|
| 115 |
+
to use `uncensored`, `left` and `right` for uncensored, left-censored and
|
| 116 |
+
right-censored observations, respectively.
|
| 117 |
+
|
| 118 |
+
Examples
|
| 119 |
+
--------
|
| 120 |
+
In the most general case, a censored data set may contain values that
|
| 121 |
+
are left-censored, right-censored, interval-censored, and uncensored.
|
| 122 |
+
For example, here we create a data set with five observations. Two
|
| 123 |
+
are uncensored (values 1 and 1.5), one is a left-censored observation
|
| 124 |
+
of 0, one is a right-censored observation of 10 and one is
|
| 125 |
+
interval-censored in the interval [2, 3].
|
| 126 |
+
|
| 127 |
+
>>> import numpy as np
|
| 128 |
+
>>> from scipy.stats import CensoredData
|
| 129 |
+
>>> data = CensoredData(uncensored=[1, 1.5], left=[0], right=[10],
|
| 130 |
+
... interval=[[2, 3]])
|
| 131 |
+
>>> print(data)
|
| 132 |
+
CensoredData(5 values: 2 not censored, 1 left-censored,
|
| 133 |
+
1 right-censored, 1 interval-censored)
|
| 134 |
+
|
| 135 |
+
Equivalently,
|
| 136 |
+
|
| 137 |
+
>>> data = CensoredData(interval=[[1, 1],
|
| 138 |
+
... [1.5, 1.5],
|
| 139 |
+
... [-np.inf, 0],
|
| 140 |
+
... [10, np.inf],
|
| 141 |
+
... [2, 3]])
|
| 142 |
+
>>> print(data)
|
| 143 |
+
CensoredData(5 values: 2 not censored, 1 left-censored,
|
| 144 |
+
1 right-censored, 1 interval-censored)
|
| 145 |
+
|
| 146 |
+
A common case is to have a mix of uncensored observations and censored
|
| 147 |
+
observations that are all right-censored (or all left-censored). For
|
| 148 |
+
example, consider an experiment in which six devices are started at
|
| 149 |
+
various times and left running until they fail. Assume that time is
|
| 150 |
+
measured in hours, and the experiment is stopped after 30 hours, even
|
| 151 |
+
if all the devices have not failed by that time. We might end up with
|
| 152 |
+
data such as this::
|
| 153 |
+
|
| 154 |
+
Device Start-time Fail-time Time-to-failure
|
| 155 |
+
1 0 13 13
|
| 156 |
+
2 2 24 22
|
| 157 |
+
3 5 22 17
|
| 158 |
+
4 8 23 15
|
| 159 |
+
5 10 *** >20
|
| 160 |
+
6 12 *** >18
|
| 161 |
+
|
| 162 |
+
Two of the devices had not failed when the experiment was stopped;
|
| 163 |
+
the observations of the time-to-failure for these two devices are
|
| 164 |
+
right-censored. We can represent this data with
|
| 165 |
+
|
| 166 |
+
>>> data = CensoredData(uncensored=[13, 22, 17, 15], right=[20, 18])
|
| 167 |
+
>>> print(data)
|
| 168 |
+
CensoredData(6 values: 4 not censored, 2 right-censored)
|
| 169 |
+
|
| 170 |
+
Alternatively, we can use the method `CensoredData.right_censored` to
|
| 171 |
+
create a representation of this data. The time-to-failure observations
|
| 172 |
+
are put the list ``ttf``. The ``censored`` list indicates which values
|
| 173 |
+
in ``ttf`` are censored.
|
| 174 |
+
|
| 175 |
+
>>> ttf = [13, 22, 17, 15, 20, 18]
|
| 176 |
+
>>> censored = [False, False, False, False, True, True]
|
| 177 |
+
|
| 178 |
+
Pass these lists to `CensoredData.right_censored` to create an
|
| 179 |
+
instance of `CensoredData`.
|
| 180 |
+
|
| 181 |
+
>>> data = CensoredData.right_censored(ttf, censored)
|
| 182 |
+
>>> print(data)
|
| 183 |
+
CensoredData(6 values: 4 not censored, 2 right-censored)
|
| 184 |
+
|
| 185 |
+
If the input data is interval censored and already stored in two
|
| 186 |
+
arrays, one holding the low end of the intervals and another
|
| 187 |
+
holding the high ends, the class method ``interval_censored`` can
|
| 188 |
+
be used to create the `CensoredData` instance.
|
| 189 |
+
|
| 190 |
+
This example creates an instance with four interval-censored values.
|
| 191 |
+
The intervals are [10, 11], [0.5, 1], [2, 3], and [12.5, 13.5].
|
| 192 |
+
|
| 193 |
+
>>> a = [10, 0.5, 2, 12.5] # Low ends of the intervals
|
| 194 |
+
>>> b = [11, 1.0, 3, 13.5] # High ends of the intervals
|
| 195 |
+
>>> data = CensoredData.interval_censored(low=a, high=b)
|
| 196 |
+
>>> print(data)
|
| 197 |
+
CensoredData(4 values: 0 not censored, 4 interval-censored)
|
| 198 |
+
|
| 199 |
+
Finally, we create and censor some data from the `weibull_min`
|
| 200 |
+
distribution, and then fit `weibull_min` to that data. We'll assume
|
| 201 |
+
that the location parameter is known to be 0.
|
| 202 |
+
|
| 203 |
+
>>> from scipy.stats import weibull_min
|
| 204 |
+
>>> rng = np.random.default_rng()
|
| 205 |
+
|
| 206 |
+
Create the random data set.
|
| 207 |
+
|
| 208 |
+
>>> x = weibull_min.rvs(2.5, loc=0, scale=30, size=250, random_state=rng)
|
| 209 |
+
>>> x[x > 40] = 40 # Right-censor values greater or equal to 40.
|
| 210 |
+
|
| 211 |
+
Create the `CensoredData` instance with the `right_censored` method.
|
| 212 |
+
The censored values are those where the value is 40.
|
| 213 |
+
|
| 214 |
+
>>> data = CensoredData.right_censored(x, x == 40)
|
| 215 |
+
>>> print(data)
|
| 216 |
+
CensoredData(250 values: 215 not censored, 35 right-censored)
|
| 217 |
+
|
| 218 |
+
35 values have been right-censored.
|
| 219 |
+
|
| 220 |
+
Fit `weibull_min` to the censored data. We expect to shape and scale
|
| 221 |
+
to be approximately 2.5 and 30, respectively.
|
| 222 |
+
|
| 223 |
+
>>> weibull_min.fit(data, floc=0)
|
| 224 |
+
(2.3575922823897315, 0, 30.40650074451254)
|
| 225 |
+
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
def __init__(self, uncensored=None, *, left=None, right=None,
|
| 229 |
+
interval=None):
|
| 230 |
+
if uncensored is None:
|
| 231 |
+
uncensored = []
|
| 232 |
+
if left is None:
|
| 233 |
+
left = []
|
| 234 |
+
if right is None:
|
| 235 |
+
right = []
|
| 236 |
+
if interval is None:
|
| 237 |
+
interval = np.empty((0, 2))
|
| 238 |
+
|
| 239 |
+
_validate_1d(uncensored, 'uncensored')
|
| 240 |
+
_validate_1d(left, 'left')
|
| 241 |
+
_validate_1d(right, 'right')
|
| 242 |
+
uncensored2, left2, right2, interval2 = _validate_interval(interval)
|
| 243 |
+
|
| 244 |
+
self._uncensored = np.concatenate((uncensored, uncensored2))
|
| 245 |
+
self._left = np.concatenate((left, left2))
|
| 246 |
+
self._right = np.concatenate((right, right2))
|
| 247 |
+
# Note that by construction, the private attribute _interval
|
| 248 |
+
# will be a 2D array that contains only finite values representing
|
| 249 |
+
# intervals with nonzero but finite length.
|
| 250 |
+
self._interval = interval2
|
| 251 |
+
|
| 252 |
+
def __repr__(self):
|
| 253 |
+
uncensored_str = " ".join(np.array_repr(self._uncensored).split())
|
| 254 |
+
left_str = " ".join(np.array_repr(self._left).split())
|
| 255 |
+
right_str = " ".join(np.array_repr(self._right).split())
|
| 256 |
+
interval_str = " ".join(np.array_repr(self._interval).split())
|
| 257 |
+
return (f"CensoredData(uncensored={uncensored_str}, left={left_str}, "
|
| 258 |
+
f"right={right_str}, interval={interval_str})")
|
| 259 |
+
|
| 260 |
+
def __str__(self):
|
| 261 |
+
num_nc = len(self._uncensored)
|
| 262 |
+
num_lc = len(self._left)
|
| 263 |
+
num_rc = len(self._right)
|
| 264 |
+
num_ic = len(self._interval)
|
| 265 |
+
n = num_nc + num_lc + num_rc + num_ic
|
| 266 |
+
parts = [f'{num_nc} not censored']
|
| 267 |
+
if num_lc > 0:
|
| 268 |
+
parts.append(f'{num_lc} left-censored')
|
| 269 |
+
if num_rc > 0:
|
| 270 |
+
parts.append(f'{num_rc} right-censored')
|
| 271 |
+
if num_ic > 0:
|
| 272 |
+
parts.append(f'{num_ic} interval-censored')
|
| 273 |
+
return f'CensoredData({n} values: ' + ', '.join(parts) + ')'
|
| 274 |
+
|
| 275 |
+
# This is not a complete implementation of the arithmetic operators.
|
| 276 |
+
# All we need is subtracting a scalar and dividing by a scalar.
|
| 277 |
+
|
| 278 |
+
def __sub__(self, other):
|
| 279 |
+
return CensoredData(uncensored=self._uncensored - other,
|
| 280 |
+
left=self._left - other,
|
| 281 |
+
right=self._right - other,
|
| 282 |
+
interval=self._interval - other)
|
| 283 |
+
|
| 284 |
+
def __truediv__(self, other):
|
| 285 |
+
return CensoredData(uncensored=self._uncensored / other,
|
| 286 |
+
left=self._left / other,
|
| 287 |
+
right=self._right / other,
|
| 288 |
+
interval=self._interval / other)
|
| 289 |
+
|
| 290 |
+
def __len__(self):
|
| 291 |
+
"""
|
| 292 |
+
The number of values (censored and not censored).
|
| 293 |
+
"""
|
| 294 |
+
return (len(self._uncensored) + len(self._left) + len(self._right)
|
| 295 |
+
+ len(self._interval))
|
| 296 |
+
|
| 297 |
+
def num_censored(self):
|
| 298 |
+
"""
|
| 299 |
+
Number of censored values.
|
| 300 |
+
"""
|
| 301 |
+
return len(self._left) + len(self._right) + len(self._interval)
|
| 302 |
+
|
| 303 |
+
@classmethod
|
| 304 |
+
def right_censored(cls, x, censored):
|
| 305 |
+
"""
|
| 306 |
+
Create a `CensoredData` instance of right-censored data.
|
| 307 |
+
|
| 308 |
+
Parameters
|
| 309 |
+
----------
|
| 310 |
+
x : array_like
|
| 311 |
+
`x` is the array of observed data or measurements.
|
| 312 |
+
`x` must be a one-dimensional sequence of finite numbers.
|
| 313 |
+
censored : array_like of bool
|
| 314 |
+
`censored` must be a one-dimensional sequence of boolean
|
| 315 |
+
values. If ``censored[k]`` is True, the corresponding value
|
| 316 |
+
in `x` is right-censored. That is, the value ``x[k]``
|
| 317 |
+
is the lower bound of the true (but unknown) value.
|
| 318 |
+
|
| 319 |
+
Returns
|
| 320 |
+
-------
|
| 321 |
+
data : `CensoredData`
|
| 322 |
+
An instance of `CensoredData` that represents the
|
| 323 |
+
collection of uncensored and right-censored values.
|
| 324 |
+
|
| 325 |
+
Examples
|
| 326 |
+
--------
|
| 327 |
+
>>> from scipy.stats import CensoredData
|
| 328 |
+
|
| 329 |
+
Two uncensored values (4 and 10) and two right-censored values
|
| 330 |
+
(24 and 25).
|
| 331 |
+
|
| 332 |
+
>>> data = CensoredData.right_censored([4, 10, 24, 25],
|
| 333 |
+
... [False, False, True, True])
|
| 334 |
+
>>> data
|
| 335 |
+
CensoredData(uncensored=array([ 4., 10.]),
|
| 336 |
+
left=array([], dtype=float64), right=array([24., 25.]),
|
| 337 |
+
interval=array([], shape=(0, 2), dtype=float64))
|
| 338 |
+
>>> print(data)
|
| 339 |
+
CensoredData(4 values: 2 not censored, 2 right-censored)
|
| 340 |
+
"""
|
| 341 |
+
x, censored = _validate_x_censored(x, censored)
|
| 342 |
+
return cls(uncensored=x[~censored], right=x[censored])
|
| 343 |
+
|
| 344 |
+
@classmethod
|
| 345 |
+
def left_censored(cls, x, censored):
|
| 346 |
+
"""
|
| 347 |
+
Create a `CensoredData` instance of left-censored data.
|
| 348 |
+
|
| 349 |
+
Parameters
|
| 350 |
+
----------
|
| 351 |
+
x : array_like
|
| 352 |
+
`x` is the array of observed data or measurements.
|
| 353 |
+
`x` must be a one-dimensional sequence of finite numbers.
|
| 354 |
+
censored : array_like of bool
|
| 355 |
+
`censored` must be a one-dimensional sequence of boolean
|
| 356 |
+
values. If ``censored[k]`` is True, the corresponding value
|
| 357 |
+
in `x` is left-censored. That is, the value ``x[k]``
|
| 358 |
+
is the upper bound of the true (but unknown) value.
|
| 359 |
+
|
| 360 |
+
Returns
|
| 361 |
+
-------
|
| 362 |
+
data : `CensoredData`
|
| 363 |
+
An instance of `CensoredData` that represents the
|
| 364 |
+
collection of uncensored and left-censored values.
|
| 365 |
+
|
| 366 |
+
Examples
|
| 367 |
+
--------
|
| 368 |
+
>>> from scipy.stats import CensoredData
|
| 369 |
+
|
| 370 |
+
Two uncensored values (0.12 and 0.033) and two left-censored values
|
| 371 |
+
(both 1e-3).
|
| 372 |
+
|
| 373 |
+
>>> data = CensoredData.left_censored([0.12, 0.033, 1e-3, 1e-3],
|
| 374 |
+
... [False, False, True, True])
|
| 375 |
+
>>> data
|
| 376 |
+
CensoredData(uncensored=array([0.12 , 0.033]),
|
| 377 |
+
left=array([0.001, 0.001]), right=array([], dtype=float64),
|
| 378 |
+
interval=array([], shape=(0, 2), dtype=float64))
|
| 379 |
+
>>> print(data)
|
| 380 |
+
CensoredData(4 values: 2 not censored, 2 left-censored)
|
| 381 |
+
"""
|
| 382 |
+
x, censored = _validate_x_censored(x, censored)
|
| 383 |
+
return cls(uncensored=x[~censored], left=x[censored])
|
| 384 |
+
|
| 385 |
+
@classmethod
|
| 386 |
+
def interval_censored(cls, low, high):
|
| 387 |
+
"""
|
| 388 |
+
Create a `CensoredData` instance of interval-censored data.
|
| 389 |
+
|
| 390 |
+
This method is useful when all the data is interval-censored, and
|
| 391 |
+
the low and high ends of the intervals are already stored in
|
| 392 |
+
separate one-dimensional arrays.
|
| 393 |
+
|
| 394 |
+
Parameters
|
| 395 |
+
----------
|
| 396 |
+
low : array_like
|
| 397 |
+
The one-dimensional array containing the low ends of the
|
| 398 |
+
intervals.
|
| 399 |
+
high : array_like
|
| 400 |
+
The one-dimensional array containing the high ends of the
|
| 401 |
+
intervals.
|
| 402 |
+
|
| 403 |
+
Returns
|
| 404 |
+
-------
|
| 405 |
+
data : `CensoredData`
|
| 406 |
+
An instance of `CensoredData` that represents the
|
| 407 |
+
collection of censored values.
|
| 408 |
+
|
| 409 |
+
Examples
|
| 410 |
+
--------
|
| 411 |
+
>>> import numpy as np
|
| 412 |
+
>>> from scipy.stats import CensoredData
|
| 413 |
+
|
| 414 |
+
``a`` and ``b`` are the low and high ends of a collection of
|
| 415 |
+
interval-censored values.
|
| 416 |
+
|
| 417 |
+
>>> a = [0.5, 2.0, 3.0, 5.5]
|
| 418 |
+
>>> b = [1.0, 2.5, 3.5, 7.0]
|
| 419 |
+
>>> data = CensoredData.interval_censored(low=a, high=b)
|
| 420 |
+
>>> print(data)
|
| 421 |
+
CensoredData(4 values: 0 not censored, 4 interval-censored)
|
| 422 |
+
"""
|
| 423 |
+
_validate_1d(low, 'low', allow_inf=True)
|
| 424 |
+
_validate_1d(high, 'high', allow_inf=True)
|
| 425 |
+
if len(low) != len(high):
|
| 426 |
+
raise ValueError('`low` and `high` must have the same length.')
|
| 427 |
+
interval = np.column_stack((low, high))
|
| 428 |
+
uncensored, left, right, interval = _validate_interval(interval)
|
| 429 |
+
return cls(uncensored=uncensored, left=left, right=right,
|
| 430 |
+
interval=interval)
|
| 431 |
+
|
| 432 |
+
def _uncensor(self):
|
| 433 |
+
"""
|
| 434 |
+
This function is used when a non-censored version of the data
|
| 435 |
+
is needed to create a rough estimate of the parameters of a
|
| 436 |
+
distribution via the method of moments or some similar method.
|
| 437 |
+
The data is "uncensored" by taking the given endpoints as the
|
| 438 |
+
data for the left- or right-censored data, and the mean for the
|
| 439 |
+
interval-censored data.
|
| 440 |
+
"""
|
| 441 |
+
data = np.concatenate((self._uncensored, self._left, self._right,
|
| 442 |
+
self._interval.mean(axis=1)))
|
| 443 |
+
return data
|
| 444 |
+
|
| 445 |
+
def _supported(self, a, b):
|
| 446 |
+
"""
|
| 447 |
+
Return a subset of self containing the values that are in
|
| 448 |
+
(or overlap with) the interval (a, b).
|
| 449 |
+
"""
|
| 450 |
+
uncensored = self._uncensored
|
| 451 |
+
uncensored = uncensored[(a < uncensored) & (uncensored < b)]
|
| 452 |
+
left = self._left
|
| 453 |
+
left = left[a < left]
|
| 454 |
+
right = self._right
|
| 455 |
+
right = right[right < b]
|
| 456 |
+
interval = self._interval
|
| 457 |
+
interval = interval[(a < interval[:, 1]) & (interval[:, 0] < b)]
|
| 458 |
+
return CensoredData(uncensored, left=left, right=right,
|
| 459 |
+
interval=interval)
|
llava_next/lib/python3.10/site-packages/scipy/stats/_constants.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Statistics-related constants.
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# The smallest representable positive number such that 1.0 + _EPS != 1.0.
|
| 9 |
+
_EPS = np.finfo(float).eps
|
| 10 |
+
|
| 11 |
+
# The largest [in magnitude] usable floating value.
|
| 12 |
+
_XMAX = np.finfo(float).max
|
| 13 |
+
|
| 14 |
+
# The log of the largest usable floating value; useful for knowing
|
| 15 |
+
# when exp(something) will overflow
|
| 16 |
+
_LOGXMAX = np.log(_XMAX)
|
| 17 |
+
|
| 18 |
+
# The smallest [in magnitude] usable (i.e. not subnormal) double precision
|
| 19 |
+
# floating value.
|
| 20 |
+
_XMIN = np.finfo(float).tiny
|
| 21 |
+
|
| 22 |
+
# The log of the smallest [in magnitude] usable (i.e not subnormal)
|
| 23 |
+
# double precision floating value.
|
| 24 |
+
_LOGXMIN = np.log(_XMIN)
|
| 25 |
+
|
| 26 |
+
# -special.psi(1)
|
| 27 |
+
_EULER = 0.577215664901532860606512090082402431042
|
| 28 |
+
|
| 29 |
+
# special.zeta(3, 1) Apery's constant
|
| 30 |
+
_ZETA3 = 1.202056903159594285399738161511449990765
|
| 31 |
+
|
| 32 |
+
# sqrt(pi)
|
| 33 |
+
_SQRT_PI = 1.772453850905516027298167483341145182798
|
| 34 |
+
|
| 35 |
+
# sqrt(2/pi)
|
| 36 |
+
_SQRT_2_OVER_PI = 0.7978845608028654
|
| 37 |
+
|
| 38 |
+
# log(sqrt(2/pi))
|
| 39 |
+
_LOG_SQRT_2_OVER_PI = -0.22579135264472744
|
llava_next/lib/python3.10/site-packages/scipy/stats/_covariance.py
ADDED
|
@@ -0,0 +1,633 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import cached_property
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy import linalg
|
| 5 |
+
from scipy.stats import _multivariate
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = ["Covariance"]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Covariance:
|
| 12 |
+
"""
|
| 13 |
+
Representation of a covariance matrix
|
| 14 |
+
|
| 15 |
+
Calculations involving covariance matrices (e.g. data whitening,
|
| 16 |
+
multivariate normal function evaluation) are often performed more
|
| 17 |
+
efficiently using a decomposition of the covariance matrix instead of the
|
| 18 |
+
covariance matrix itself. This class allows the user to construct an
|
| 19 |
+
object representing a covariance matrix using any of several
|
| 20 |
+
decompositions and perform calculations using a common interface.
|
| 21 |
+
|
| 22 |
+
.. note::
|
| 23 |
+
|
| 24 |
+
The `Covariance` class cannot be instantiated directly. Instead, use
|
| 25 |
+
one of the factory methods (e.g. `Covariance.from_diagonal`).
|
| 26 |
+
|
| 27 |
+
Examples
|
| 28 |
+
--------
|
| 29 |
+
The `Covariance` class is used by calling one of its
|
| 30 |
+
factory methods to create a `Covariance` object, then pass that
|
| 31 |
+
representation of the `Covariance` matrix as a shape parameter of a
|
| 32 |
+
multivariate distribution.
|
| 33 |
+
|
| 34 |
+
For instance, the multivariate normal distribution can accept an array
|
| 35 |
+
representing a covariance matrix:
|
| 36 |
+
|
| 37 |
+
>>> from scipy import stats
|
| 38 |
+
>>> import numpy as np
|
| 39 |
+
>>> d = [1, 2, 3]
|
| 40 |
+
>>> A = np.diag(d) # a diagonal covariance matrix
|
| 41 |
+
>>> x = [4, -2, 5] # a point of interest
|
| 42 |
+
>>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=A)
|
| 43 |
+
>>> dist.pdf(x)
|
| 44 |
+
4.9595685102808205e-08
|
| 45 |
+
|
| 46 |
+
but the calculations are performed in a very generic way that does not
|
| 47 |
+
take advantage of any special properties of the covariance matrix. Because
|
| 48 |
+
our covariance matrix is diagonal, we can use ``Covariance.from_diagonal``
|
| 49 |
+
to create an object representing the covariance matrix, and
|
| 50 |
+
`multivariate_normal` can use this to compute the probability density
|
| 51 |
+
function more efficiently.
|
| 52 |
+
|
| 53 |
+
>>> cov = stats.Covariance.from_diagonal(d)
|
| 54 |
+
>>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=cov)
|
| 55 |
+
>>> dist.pdf(x)
|
| 56 |
+
4.9595685102808205e-08
|
| 57 |
+
|
| 58 |
+
"""
|
| 59 |
+
def __init__(self):
|
| 60 |
+
message = ("The `Covariance` class cannot be instantiated directly. "
|
| 61 |
+
"Please use one of the factory methods "
|
| 62 |
+
"(e.g. `Covariance.from_diagonal`).")
|
| 63 |
+
raise NotImplementedError(message)
|
| 64 |
+
|
| 65 |
+
@staticmethod
|
| 66 |
+
def from_diagonal(diagonal):
|
| 67 |
+
r"""
|
| 68 |
+
Return a representation of a covariance matrix from its diagonal.
|
| 69 |
+
|
| 70 |
+
Parameters
|
| 71 |
+
----------
|
| 72 |
+
diagonal : array_like
|
| 73 |
+
The diagonal elements of a diagonal matrix.
|
| 74 |
+
|
| 75 |
+
Notes
|
| 76 |
+
-----
|
| 77 |
+
Let the diagonal elements of a diagonal covariance matrix :math:`D` be
|
| 78 |
+
stored in the vector :math:`d`.
|
| 79 |
+
|
| 80 |
+
When all elements of :math:`d` are strictly positive, whitening of a
|
| 81 |
+
data point :math:`x` is performed by computing
|
| 82 |
+
:math:`x \cdot d^{-1/2}`, where the inverse square root can be taken
|
| 83 |
+
element-wise.
|
| 84 |
+
:math:`\log\det{D}` is calculated as :math:`-2 \sum(\log{d})`,
|
| 85 |
+
where the :math:`\log` operation is performed element-wise.
|
| 86 |
+
|
| 87 |
+
This `Covariance` class supports singular covariance matrices. When
|
| 88 |
+
computing ``_log_pdet``, non-positive elements of :math:`d` are
|
| 89 |
+
ignored. Whitening is not well defined when the point to be whitened
|
| 90 |
+
does not lie in the span of the columns of the covariance matrix. The
|
| 91 |
+
convention taken here is to treat the inverse square root of
|
| 92 |
+
non-positive elements of :math:`d` as zeros.
|
| 93 |
+
|
| 94 |
+
Examples
|
| 95 |
+
--------
|
| 96 |
+
Prepare a symmetric positive definite covariance matrix ``A`` and a
|
| 97 |
+
data point ``x``.
|
| 98 |
+
|
| 99 |
+
>>> import numpy as np
|
| 100 |
+
>>> from scipy import stats
|
| 101 |
+
>>> rng = np.random.default_rng()
|
| 102 |
+
>>> n = 5
|
| 103 |
+
>>> A = np.diag(rng.random(n))
|
| 104 |
+
>>> x = rng.random(size=n)
|
| 105 |
+
|
| 106 |
+
Extract the diagonal from ``A`` and create the `Covariance` object.
|
| 107 |
+
|
| 108 |
+
>>> d = np.diag(A)
|
| 109 |
+
>>> cov = stats.Covariance.from_diagonal(d)
|
| 110 |
+
|
| 111 |
+
Compare the functionality of the `Covariance` object against a
|
| 112 |
+
reference implementations.
|
| 113 |
+
|
| 114 |
+
>>> res = cov.whiten(x)
|
| 115 |
+
>>> ref = np.diag(d**-0.5) @ x
|
| 116 |
+
>>> np.allclose(res, ref)
|
| 117 |
+
True
|
| 118 |
+
>>> res = cov.log_pdet
|
| 119 |
+
>>> ref = np.linalg.slogdet(A)[-1]
|
| 120 |
+
>>> np.allclose(res, ref)
|
| 121 |
+
True
|
| 122 |
+
|
| 123 |
+
"""
|
| 124 |
+
return CovViaDiagonal(diagonal)
|
| 125 |
+
|
| 126 |
+
@staticmethod
|
| 127 |
+
def from_precision(precision, covariance=None):
|
| 128 |
+
r"""
|
| 129 |
+
Return a representation of a covariance from its precision matrix.
|
| 130 |
+
|
| 131 |
+
Parameters
|
| 132 |
+
----------
|
| 133 |
+
precision : array_like
|
| 134 |
+
The precision matrix; that is, the inverse of a square, symmetric,
|
| 135 |
+
positive definite covariance matrix.
|
| 136 |
+
covariance : array_like, optional
|
| 137 |
+
The square, symmetric, positive definite covariance matrix. If not
|
| 138 |
+
provided, this may need to be calculated (e.g. to evaluate the
|
| 139 |
+
cumulative distribution function of
|
| 140 |
+
`scipy.stats.multivariate_normal`) by inverting `precision`.
|
| 141 |
+
|
| 142 |
+
Notes
|
| 143 |
+
-----
|
| 144 |
+
Let the covariance matrix be :math:`A`, its precision matrix be
|
| 145 |
+
:math:`P = A^{-1}`, and :math:`L` be the lower Cholesky factor such
|
| 146 |
+
that :math:`L L^T = P`.
|
| 147 |
+
Whitening of a data point :math:`x` is performed by computing
|
| 148 |
+
:math:`x^T L`. :math:`\log\det{A}` is calculated as
|
| 149 |
+
:math:`-2tr(\log{L})`, where the :math:`\log` operation is performed
|
| 150 |
+
element-wise.
|
| 151 |
+
|
| 152 |
+
This `Covariance` class does not support singular covariance matrices
|
| 153 |
+
because the precision matrix does not exist for a singular covariance
|
| 154 |
+
matrix.
|
| 155 |
+
|
| 156 |
+
Examples
|
| 157 |
+
--------
|
| 158 |
+
Prepare a symmetric positive definite precision matrix ``P`` and a
|
| 159 |
+
data point ``x``. (If the precision matrix is not already available,
|
| 160 |
+
consider the other factory methods of the `Covariance` class.)
|
| 161 |
+
|
| 162 |
+
>>> import numpy as np
|
| 163 |
+
>>> from scipy import stats
|
| 164 |
+
>>> rng = np.random.default_rng()
|
| 165 |
+
>>> n = 5
|
| 166 |
+
>>> P = rng.random(size=(n, n))
|
| 167 |
+
>>> P = P @ P.T # a precision matrix must be positive definite
|
| 168 |
+
>>> x = rng.random(size=n)
|
| 169 |
+
|
| 170 |
+
Create the `Covariance` object.
|
| 171 |
+
|
| 172 |
+
>>> cov = stats.Covariance.from_precision(P)
|
| 173 |
+
|
| 174 |
+
Compare the functionality of the `Covariance` object against
|
| 175 |
+
reference implementations.
|
| 176 |
+
|
| 177 |
+
>>> res = cov.whiten(x)
|
| 178 |
+
>>> ref = x @ np.linalg.cholesky(P)
|
| 179 |
+
>>> np.allclose(res, ref)
|
| 180 |
+
True
|
| 181 |
+
>>> res = cov.log_pdet
|
| 182 |
+
>>> ref = -np.linalg.slogdet(P)[-1]
|
| 183 |
+
>>> np.allclose(res, ref)
|
| 184 |
+
True
|
| 185 |
+
|
| 186 |
+
"""
|
| 187 |
+
return CovViaPrecision(precision, covariance)
|
| 188 |
+
|
| 189 |
+
@staticmethod
|
| 190 |
+
def from_cholesky(cholesky):
|
| 191 |
+
r"""
|
| 192 |
+
Representation of a covariance provided via the (lower) Cholesky factor
|
| 193 |
+
|
| 194 |
+
Parameters
|
| 195 |
+
----------
|
| 196 |
+
cholesky : array_like
|
| 197 |
+
The lower triangular Cholesky factor of the covariance matrix.
|
| 198 |
+
|
| 199 |
+
Notes
|
| 200 |
+
-----
|
| 201 |
+
Let the covariance matrix be :math:`A` and :math:`L` be the lower
|
| 202 |
+
Cholesky factor such that :math:`L L^T = A`.
|
| 203 |
+
Whitening of a data point :math:`x` is performed by computing
|
| 204 |
+
:math:`L^{-1} x`. :math:`\log\det{A}` is calculated as
|
| 205 |
+
:math:`2tr(\log{L})`, where the :math:`\log` operation is performed
|
| 206 |
+
element-wise.
|
| 207 |
+
|
| 208 |
+
This `Covariance` class does not support singular covariance matrices
|
| 209 |
+
because the Cholesky decomposition does not exist for a singular
|
| 210 |
+
covariance matrix.
|
| 211 |
+
|
| 212 |
+
Examples
|
| 213 |
+
--------
|
| 214 |
+
Prepare a symmetric positive definite covariance matrix ``A`` and a
|
| 215 |
+
data point ``x``.
|
| 216 |
+
|
| 217 |
+
>>> import numpy as np
|
| 218 |
+
>>> from scipy import stats
|
| 219 |
+
>>> rng = np.random.default_rng()
|
| 220 |
+
>>> n = 5
|
| 221 |
+
>>> A = rng.random(size=(n, n))
|
| 222 |
+
>>> A = A @ A.T # make the covariance symmetric positive definite
|
| 223 |
+
>>> x = rng.random(size=n)
|
| 224 |
+
|
| 225 |
+
Perform the Cholesky decomposition of ``A`` and create the
|
| 226 |
+
`Covariance` object.
|
| 227 |
+
|
| 228 |
+
>>> L = np.linalg.cholesky(A)
|
| 229 |
+
>>> cov = stats.Covariance.from_cholesky(L)
|
| 230 |
+
|
| 231 |
+
Compare the functionality of the `Covariance` object against
|
| 232 |
+
reference implementation.
|
| 233 |
+
|
| 234 |
+
>>> from scipy.linalg import solve_triangular
|
| 235 |
+
>>> res = cov.whiten(x)
|
| 236 |
+
>>> ref = solve_triangular(L, x, lower=True)
|
| 237 |
+
>>> np.allclose(res, ref)
|
| 238 |
+
True
|
| 239 |
+
>>> res = cov.log_pdet
|
| 240 |
+
>>> ref = np.linalg.slogdet(A)[-1]
|
| 241 |
+
>>> np.allclose(res, ref)
|
| 242 |
+
True
|
| 243 |
+
|
| 244 |
+
"""
|
| 245 |
+
return CovViaCholesky(cholesky)
|
| 246 |
+
|
| 247 |
+
@staticmethod
|
| 248 |
+
def from_eigendecomposition(eigendecomposition):
|
| 249 |
+
r"""
|
| 250 |
+
Representation of a covariance provided via eigendecomposition
|
| 251 |
+
|
| 252 |
+
Parameters
|
| 253 |
+
----------
|
| 254 |
+
eigendecomposition : sequence
|
| 255 |
+
A sequence (nominally a tuple) containing the eigenvalue and
|
| 256 |
+
eigenvector arrays as computed by `scipy.linalg.eigh` or
|
| 257 |
+
`numpy.linalg.eigh`.
|
| 258 |
+
|
| 259 |
+
Notes
|
| 260 |
+
-----
|
| 261 |
+
Let the covariance matrix be :math:`A`, let :math:`V` be matrix of
|
| 262 |
+
eigenvectors, and let :math:`W` be the diagonal matrix of eigenvalues
|
| 263 |
+
such that `V W V^T = A`.
|
| 264 |
+
|
| 265 |
+
When all of the eigenvalues are strictly positive, whitening of a
|
| 266 |
+
data point :math:`x` is performed by computing
|
| 267 |
+
:math:`x^T (V W^{-1/2})`, where the inverse square root can be taken
|
| 268 |
+
element-wise.
|
| 269 |
+
:math:`\log\det{A}` is calculated as :math:`tr(\log{W})`,
|
| 270 |
+
where the :math:`\log` operation is performed element-wise.
|
| 271 |
+
|
| 272 |
+
This `Covariance` class supports singular covariance matrices. When
|
| 273 |
+
computing ``_log_pdet``, non-positive eigenvalues are ignored.
|
| 274 |
+
Whitening is not well defined when the point to be whitened
|
| 275 |
+
does not lie in the span of the columns of the covariance matrix. The
|
| 276 |
+
convention taken here is to treat the inverse square root of
|
| 277 |
+
non-positive eigenvalues as zeros.
|
| 278 |
+
|
| 279 |
+
Examples
|
| 280 |
+
--------
|
| 281 |
+
Prepare a symmetric positive definite covariance matrix ``A`` and a
|
| 282 |
+
data point ``x``.
|
| 283 |
+
|
| 284 |
+
>>> import numpy as np
|
| 285 |
+
>>> from scipy import stats
|
| 286 |
+
>>> rng = np.random.default_rng()
|
| 287 |
+
>>> n = 5
|
| 288 |
+
>>> A = rng.random(size=(n, n))
|
| 289 |
+
>>> A = A @ A.T # make the covariance symmetric positive definite
|
| 290 |
+
>>> x = rng.random(size=n)
|
| 291 |
+
|
| 292 |
+
Perform the eigendecomposition of ``A`` and create the `Covariance`
|
| 293 |
+
object.
|
| 294 |
+
|
| 295 |
+
>>> w, v = np.linalg.eigh(A)
|
| 296 |
+
>>> cov = stats.Covariance.from_eigendecomposition((w, v))
|
| 297 |
+
|
| 298 |
+
Compare the functionality of the `Covariance` object against
|
| 299 |
+
reference implementations.
|
| 300 |
+
|
| 301 |
+
>>> res = cov.whiten(x)
|
| 302 |
+
>>> ref = x @ (v @ np.diag(w**-0.5))
|
| 303 |
+
>>> np.allclose(res, ref)
|
| 304 |
+
True
|
| 305 |
+
>>> res = cov.log_pdet
|
| 306 |
+
>>> ref = np.linalg.slogdet(A)[-1]
|
| 307 |
+
>>> np.allclose(res, ref)
|
| 308 |
+
True
|
| 309 |
+
|
| 310 |
+
"""
|
| 311 |
+
return CovViaEigendecomposition(eigendecomposition)
|
| 312 |
+
|
| 313 |
+
def whiten(self, x):
|
| 314 |
+
"""
|
| 315 |
+
Perform a whitening transformation on data.
|
| 316 |
+
|
| 317 |
+
"Whitening" ("white" as in "white noise", in which each frequency has
|
| 318 |
+
equal magnitude) transforms a set of random variables into a new set of
|
| 319 |
+
random variables with unit-diagonal covariance. When a whitening
|
| 320 |
+
transform is applied to a sample of points distributed according to
|
| 321 |
+
a multivariate normal distribution with zero mean, the covariance of
|
| 322 |
+
the transformed sample is approximately the identity matrix.
|
| 323 |
+
|
| 324 |
+
Parameters
|
| 325 |
+
----------
|
| 326 |
+
x : array_like
|
| 327 |
+
An array of points. The last dimension must correspond with the
|
| 328 |
+
dimensionality of the space, i.e., the number of columns in the
|
| 329 |
+
covariance matrix.
|
| 330 |
+
|
| 331 |
+
Returns
|
| 332 |
+
-------
|
| 333 |
+
x_ : array_like
|
| 334 |
+
The transformed array of points.
|
| 335 |
+
|
| 336 |
+
References
|
| 337 |
+
----------
|
| 338 |
+
.. [1] "Whitening Transformation". Wikipedia.
|
| 339 |
+
https://en.wikipedia.org/wiki/Whitening_transformation
|
| 340 |
+
.. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
|
| 341 |
+
coloring linear transformation". Transactions of VSB 18.2
|
| 342 |
+
(2018): 31-35. :doi:`10.31490/tces-2018-0013`
|
| 343 |
+
|
| 344 |
+
Examples
|
| 345 |
+
--------
|
| 346 |
+
>>> import numpy as np
|
| 347 |
+
>>> from scipy import stats
|
| 348 |
+
>>> rng = np.random.default_rng()
|
| 349 |
+
>>> n = 3
|
| 350 |
+
>>> A = rng.random(size=(n, n))
|
| 351 |
+
>>> cov_array = A @ A.T # make matrix symmetric positive definite
|
| 352 |
+
>>> precision = np.linalg.inv(cov_array)
|
| 353 |
+
>>> cov_object = stats.Covariance.from_precision(precision)
|
| 354 |
+
>>> x = rng.multivariate_normal(np.zeros(n), cov_array, size=(10000))
|
| 355 |
+
>>> x_ = cov_object.whiten(x)
|
| 356 |
+
>>> np.cov(x_, rowvar=False) # near-identity covariance
|
| 357 |
+
array([[0.97862122, 0.00893147, 0.02430451],
|
| 358 |
+
[0.00893147, 0.96719062, 0.02201312],
|
| 359 |
+
[0.02430451, 0.02201312, 0.99206881]])
|
| 360 |
+
|
| 361 |
+
"""
|
| 362 |
+
return self._whiten(np.asarray(x))
|
| 363 |
+
|
| 364 |
+
def colorize(self, x):
|
| 365 |
+
"""
|
| 366 |
+
Perform a colorizing transformation on data.
|
| 367 |
+
|
| 368 |
+
"Colorizing" ("color" as in "colored noise", in which different
|
| 369 |
+
frequencies may have different magnitudes) transforms a set of
|
| 370 |
+
uncorrelated random variables into a new set of random variables with
|
| 371 |
+
the desired covariance. When a coloring transform is applied to a
|
| 372 |
+
sample of points distributed according to a multivariate normal
|
| 373 |
+
distribution with identity covariance and zero mean, the covariance of
|
| 374 |
+
the transformed sample is approximately the covariance matrix used
|
| 375 |
+
in the coloring transform.
|
| 376 |
+
|
| 377 |
+
Parameters
|
| 378 |
+
----------
|
| 379 |
+
x : array_like
|
| 380 |
+
An array of points. The last dimension must correspond with the
|
| 381 |
+
dimensionality of the space, i.e., the number of columns in the
|
| 382 |
+
covariance matrix.
|
| 383 |
+
|
| 384 |
+
Returns
|
| 385 |
+
-------
|
| 386 |
+
x_ : array_like
|
| 387 |
+
The transformed array of points.
|
| 388 |
+
|
| 389 |
+
References
|
| 390 |
+
----------
|
| 391 |
+
.. [1] "Whitening Transformation". Wikipedia.
|
| 392 |
+
https://en.wikipedia.org/wiki/Whitening_transformation
|
| 393 |
+
.. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
|
| 394 |
+
coloring linear transformation". Transactions of VSB 18.2
|
| 395 |
+
(2018): 31-35. :doi:`10.31490/tces-2018-0013`
|
| 396 |
+
|
| 397 |
+
Examples
|
| 398 |
+
--------
|
| 399 |
+
>>> import numpy as np
|
| 400 |
+
>>> from scipy import stats
|
| 401 |
+
>>> rng = np.random.default_rng(1638083107694713882823079058616272161)
|
| 402 |
+
>>> n = 3
|
| 403 |
+
>>> A = rng.random(size=(n, n))
|
| 404 |
+
>>> cov_array = A @ A.T # make matrix symmetric positive definite
|
| 405 |
+
>>> cholesky = np.linalg.cholesky(cov_array)
|
| 406 |
+
>>> cov_object = stats.Covariance.from_cholesky(cholesky)
|
| 407 |
+
>>> x = rng.multivariate_normal(np.zeros(n), np.eye(n), size=(10000))
|
| 408 |
+
>>> x_ = cov_object.colorize(x)
|
| 409 |
+
>>> cov_data = np.cov(x_, rowvar=False)
|
| 410 |
+
>>> np.allclose(cov_data, cov_array, rtol=3e-2)
|
| 411 |
+
True
|
| 412 |
+
"""
|
| 413 |
+
return self._colorize(np.asarray(x))
|
| 414 |
+
|
| 415 |
+
@property
|
| 416 |
+
def log_pdet(self):
|
| 417 |
+
"""
|
| 418 |
+
Log of the pseudo-determinant of the covariance matrix
|
| 419 |
+
"""
|
| 420 |
+
return np.array(self._log_pdet, dtype=float)[()]
|
| 421 |
+
|
| 422 |
+
@property
|
| 423 |
+
def rank(self):
|
| 424 |
+
"""
|
| 425 |
+
Rank of the covariance matrix
|
| 426 |
+
"""
|
| 427 |
+
return np.array(self._rank, dtype=int)[()]
|
| 428 |
+
|
| 429 |
+
@property
|
| 430 |
+
def covariance(self):
|
| 431 |
+
"""
|
| 432 |
+
Explicit representation of the covariance matrix
|
| 433 |
+
"""
|
| 434 |
+
return self._covariance
|
| 435 |
+
|
| 436 |
+
@property
|
| 437 |
+
def shape(self):
|
| 438 |
+
"""
|
| 439 |
+
Shape of the covariance array
|
| 440 |
+
"""
|
| 441 |
+
return self._shape
|
| 442 |
+
|
| 443 |
+
def _validate_matrix(self, A, name):
|
| 444 |
+
A = np.atleast_2d(A)
|
| 445 |
+
m, n = A.shape[-2:]
|
| 446 |
+
if m != n or A.ndim != 2 or not (np.issubdtype(A.dtype, np.integer) or
|
| 447 |
+
np.issubdtype(A.dtype, np.floating)):
|
| 448 |
+
message = (f"The input `{name}` must be a square, "
|
| 449 |
+
"two-dimensional array of real numbers.")
|
| 450 |
+
raise ValueError(message)
|
| 451 |
+
return A
|
| 452 |
+
|
| 453 |
+
def _validate_vector(self, A, name):
|
| 454 |
+
A = np.atleast_1d(A)
|
| 455 |
+
if A.ndim != 1 or not (np.issubdtype(A.dtype, np.integer) or
|
| 456 |
+
np.issubdtype(A.dtype, np.floating)):
|
| 457 |
+
message = (f"The input `{name}` must be a one-dimensional array "
|
| 458 |
+
"of real numbers.")
|
| 459 |
+
raise ValueError(message)
|
| 460 |
+
return A
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
class CovViaPrecision(Covariance):
|
| 464 |
+
|
| 465 |
+
def __init__(self, precision, covariance=None):
|
| 466 |
+
precision = self._validate_matrix(precision, 'precision')
|
| 467 |
+
if covariance is not None:
|
| 468 |
+
covariance = self._validate_matrix(covariance, 'covariance')
|
| 469 |
+
message = "`precision.shape` must equal `covariance.shape`."
|
| 470 |
+
if precision.shape != covariance.shape:
|
| 471 |
+
raise ValueError(message)
|
| 472 |
+
|
| 473 |
+
self._chol_P = np.linalg.cholesky(precision)
|
| 474 |
+
self._log_pdet = -2*np.log(np.diag(self._chol_P)).sum(axis=-1)
|
| 475 |
+
self._rank = precision.shape[-1] # must be full rank if invertible
|
| 476 |
+
self._precision = precision
|
| 477 |
+
self._cov_matrix = covariance
|
| 478 |
+
self._shape = precision.shape
|
| 479 |
+
self._allow_singular = False
|
| 480 |
+
|
| 481 |
+
def _whiten(self, x):
|
| 482 |
+
return x @ self._chol_P
|
| 483 |
+
|
| 484 |
+
@cached_property
|
| 485 |
+
def _covariance(self):
|
| 486 |
+
n = self._shape[-1]
|
| 487 |
+
return (linalg.cho_solve((self._chol_P, True), np.eye(n))
|
| 488 |
+
if self._cov_matrix is None else self._cov_matrix)
|
| 489 |
+
|
| 490 |
+
def _colorize(self, x):
|
| 491 |
+
return linalg.solve_triangular(self._chol_P.T, x.T, lower=False).T
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def _dot_diag(x, d):
|
| 495 |
+
# If d were a full diagonal matrix, x @ d would always do what we want.
|
| 496 |
+
# Special treatment is needed for n-dimensional `d` in which each row
|
| 497 |
+
# includes only the diagonal elements of a covariance matrix.
|
| 498 |
+
return x * d if x.ndim < 2 else x * np.expand_dims(d, -2)
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class CovViaDiagonal(Covariance):
|
| 502 |
+
|
| 503 |
+
def __init__(self, diagonal):
|
| 504 |
+
diagonal = self._validate_vector(diagonal, 'diagonal')
|
| 505 |
+
|
| 506 |
+
i_zero = diagonal <= 0
|
| 507 |
+
positive_diagonal = np.array(diagonal, dtype=np.float64)
|
| 508 |
+
|
| 509 |
+
positive_diagonal[i_zero] = 1 # ones don't affect determinant
|
| 510 |
+
self._log_pdet = np.sum(np.log(positive_diagonal), axis=-1)
|
| 511 |
+
|
| 512 |
+
psuedo_reciprocals = 1 / np.sqrt(positive_diagonal)
|
| 513 |
+
psuedo_reciprocals[i_zero] = 0
|
| 514 |
+
|
| 515 |
+
self._sqrt_diagonal = np.sqrt(diagonal)
|
| 516 |
+
self._LP = psuedo_reciprocals
|
| 517 |
+
self._rank = positive_diagonal.shape[-1] - i_zero.sum(axis=-1)
|
| 518 |
+
self._covariance = np.apply_along_axis(np.diag, -1, diagonal)
|
| 519 |
+
self._i_zero = i_zero
|
| 520 |
+
self._shape = self._covariance.shape
|
| 521 |
+
self._allow_singular = True
|
| 522 |
+
|
| 523 |
+
def _whiten(self, x):
|
| 524 |
+
return _dot_diag(x, self._LP)
|
| 525 |
+
|
| 526 |
+
def _colorize(self, x):
|
| 527 |
+
return _dot_diag(x, self._sqrt_diagonal)
|
| 528 |
+
|
| 529 |
+
def _support_mask(self, x):
|
| 530 |
+
"""
|
| 531 |
+
Check whether x lies in the support of the distribution.
|
| 532 |
+
"""
|
| 533 |
+
return ~np.any(_dot_diag(x, self._i_zero), axis=-1)
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
class CovViaCholesky(Covariance):
|
| 537 |
+
|
| 538 |
+
def __init__(self, cholesky):
|
| 539 |
+
L = self._validate_matrix(cholesky, 'cholesky')
|
| 540 |
+
|
| 541 |
+
self._factor = L
|
| 542 |
+
self._log_pdet = 2*np.log(np.diag(self._factor)).sum(axis=-1)
|
| 543 |
+
self._rank = L.shape[-1] # must be full rank for cholesky
|
| 544 |
+
self._shape = L.shape
|
| 545 |
+
self._allow_singular = False
|
| 546 |
+
|
| 547 |
+
@cached_property
|
| 548 |
+
def _covariance(self):
|
| 549 |
+
return self._factor @ self._factor.T
|
| 550 |
+
|
| 551 |
+
def _whiten(self, x):
|
| 552 |
+
res = linalg.solve_triangular(self._factor, x.T, lower=True).T
|
| 553 |
+
return res
|
| 554 |
+
|
| 555 |
+
def _colorize(self, x):
|
| 556 |
+
return x @ self._factor.T
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
class CovViaEigendecomposition(Covariance):
|
| 560 |
+
|
| 561 |
+
def __init__(self, eigendecomposition):
|
| 562 |
+
eigenvalues, eigenvectors = eigendecomposition
|
| 563 |
+
eigenvalues = self._validate_vector(eigenvalues, 'eigenvalues')
|
| 564 |
+
eigenvectors = self._validate_matrix(eigenvectors, 'eigenvectors')
|
| 565 |
+
message = ("The shapes of `eigenvalues` and `eigenvectors` "
|
| 566 |
+
"must be compatible.")
|
| 567 |
+
try:
|
| 568 |
+
eigenvalues = np.expand_dims(eigenvalues, -2)
|
| 569 |
+
eigenvectors, eigenvalues = np.broadcast_arrays(eigenvectors,
|
| 570 |
+
eigenvalues)
|
| 571 |
+
eigenvalues = eigenvalues[..., 0, :]
|
| 572 |
+
except ValueError:
|
| 573 |
+
raise ValueError(message)
|
| 574 |
+
|
| 575 |
+
i_zero = eigenvalues <= 0
|
| 576 |
+
positive_eigenvalues = np.array(eigenvalues, dtype=np.float64)
|
| 577 |
+
|
| 578 |
+
positive_eigenvalues[i_zero] = 1 # ones don't affect determinant
|
| 579 |
+
self._log_pdet = np.sum(np.log(positive_eigenvalues), axis=-1)
|
| 580 |
+
|
| 581 |
+
psuedo_reciprocals = 1 / np.sqrt(positive_eigenvalues)
|
| 582 |
+
psuedo_reciprocals[i_zero] = 0
|
| 583 |
+
|
| 584 |
+
self._LP = eigenvectors * psuedo_reciprocals
|
| 585 |
+
self._LA = eigenvectors * np.sqrt(eigenvalues)
|
| 586 |
+
self._rank = positive_eigenvalues.shape[-1] - i_zero.sum(axis=-1)
|
| 587 |
+
self._w = eigenvalues
|
| 588 |
+
self._v = eigenvectors
|
| 589 |
+
self._shape = eigenvectors.shape
|
| 590 |
+
self._null_basis = eigenvectors * i_zero
|
| 591 |
+
# This is only used for `_support_mask`, not to decide whether
|
| 592 |
+
# the covariance is singular or not.
|
| 593 |
+
self._eps = _multivariate._eigvalsh_to_eps(eigenvalues) * 10**3
|
| 594 |
+
self._allow_singular = True
|
| 595 |
+
|
| 596 |
+
def _whiten(self, x):
|
| 597 |
+
return x @ self._LP
|
| 598 |
+
|
| 599 |
+
def _colorize(self, x):
|
| 600 |
+
return x @ self._LA.T
|
| 601 |
+
|
| 602 |
+
@cached_property
|
| 603 |
+
def _covariance(self):
|
| 604 |
+
return (self._v * self._w) @ self._v.T
|
| 605 |
+
|
| 606 |
+
def _support_mask(self, x):
|
| 607 |
+
"""
|
| 608 |
+
Check whether x lies in the support of the distribution.
|
| 609 |
+
"""
|
| 610 |
+
residual = np.linalg.norm(x @ self._null_basis, axis=-1)
|
| 611 |
+
in_support = residual < self._eps
|
| 612 |
+
return in_support
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
class CovViaPSD(Covariance):
|
| 616 |
+
"""
|
| 617 |
+
Representation of a covariance provided via an instance of _PSD
|
| 618 |
+
"""
|
| 619 |
+
|
| 620 |
+
def __init__(self, psd):
|
| 621 |
+
self._LP = psd.U
|
| 622 |
+
self._log_pdet = psd.log_pdet
|
| 623 |
+
self._rank = psd.rank
|
| 624 |
+
self._covariance = psd._M
|
| 625 |
+
self._shape = psd._M.shape
|
| 626 |
+
self._psd = psd
|
| 627 |
+
self._allow_singular = False # by default
|
| 628 |
+
|
| 629 |
+
def _whiten(self, x):
|
| 630 |
+
return x @ self._LP
|
| 631 |
+
|
| 632 |
+
def _support_mask(self, x):
|
| 633 |
+
return self._psd._support_mask(x)
|
llava_next/lib/python3.10/site-packages/scipy/stats/_crosstab.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy.sparse import coo_matrix
|
| 3 |
+
from scipy._lib._bunch import _make_tuple_bunch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
CrosstabResult = _make_tuple_bunch(
|
| 7 |
+
"CrosstabResult", ["elements", "count"]
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def crosstab(*args, levels=None, sparse=False):
|
| 12 |
+
"""
|
| 13 |
+
Return table of counts for each possible unique combination in ``*args``.
|
| 14 |
+
|
| 15 |
+
When ``len(args) > 1``, the array computed by this function is
|
| 16 |
+
often referred to as a *contingency table* [1]_.
|
| 17 |
+
|
| 18 |
+
The arguments must be sequences with the same length. The second return
|
| 19 |
+
value, `count`, is an integer array with ``len(args)`` dimensions. If
|
| 20 |
+
`levels` is None, the shape of `count` is ``(n0, n1, ...)``, where ``nk``
|
| 21 |
+
is the number of unique elements in ``args[k]``.
|
| 22 |
+
|
| 23 |
+
Parameters
|
| 24 |
+
----------
|
| 25 |
+
*args : sequences
|
| 26 |
+
A sequence of sequences whose unique aligned elements are to be
|
| 27 |
+
counted. The sequences in args must all be the same length.
|
| 28 |
+
levels : sequence, optional
|
| 29 |
+
If `levels` is given, it must be a sequence that is the same length as
|
| 30 |
+
`args`. Each element in `levels` is either a sequence or None. If it
|
| 31 |
+
is a sequence, it gives the values in the corresponding sequence in
|
| 32 |
+
`args` that are to be counted. If any value in the sequences in `args`
|
| 33 |
+
does not occur in the corresponding sequence in `levels`, that value
|
| 34 |
+
is ignored and not counted in the returned array `count`. The default
|
| 35 |
+
value of `levels` for ``args[i]`` is ``np.unique(args[i])``
|
| 36 |
+
sparse : bool, optional
|
| 37 |
+
If True, return a sparse matrix. The matrix will be an instance of
|
| 38 |
+
the `scipy.sparse.coo_matrix` class. Because SciPy's sparse matrices
|
| 39 |
+
must be 2-d, only two input sequences are allowed when `sparse` is
|
| 40 |
+
True. Default is False.
|
| 41 |
+
|
| 42 |
+
Returns
|
| 43 |
+
-------
|
| 44 |
+
res : CrosstabResult
|
| 45 |
+
An object containing the following attributes:
|
| 46 |
+
|
| 47 |
+
elements : tuple of numpy.ndarrays.
|
| 48 |
+
Tuple of length ``len(args)`` containing the arrays of elements
|
| 49 |
+
that are counted in `count`. These can be interpreted as the
|
| 50 |
+
labels of the corresponding dimensions of `count`. If `levels` was
|
| 51 |
+
given, then if ``levels[i]`` is not None, ``elements[i]`` will
|
| 52 |
+
hold the values given in ``levels[i]``.
|
| 53 |
+
count : numpy.ndarray or scipy.sparse.coo_matrix
|
| 54 |
+
Counts of the unique elements in ``zip(*args)``, stored in an
|
| 55 |
+
array. Also known as a *contingency table* when ``len(args) > 1``.
|
| 56 |
+
|
| 57 |
+
See Also
|
| 58 |
+
--------
|
| 59 |
+
numpy.unique
|
| 60 |
+
|
| 61 |
+
Notes
|
| 62 |
+
-----
|
| 63 |
+
.. versionadded:: 1.7.0
|
| 64 |
+
|
| 65 |
+
References
|
| 66 |
+
----------
|
| 67 |
+
.. [1] "Contingency table", http://en.wikipedia.org/wiki/Contingency_table
|
| 68 |
+
|
| 69 |
+
Examples
|
| 70 |
+
--------
|
| 71 |
+
>>> from scipy.stats.contingency import crosstab
|
| 72 |
+
|
| 73 |
+
Given the lists `a` and `x`, create a contingency table that counts the
|
| 74 |
+
frequencies of the corresponding pairs.
|
| 75 |
+
|
| 76 |
+
>>> a = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']
|
| 77 |
+
>>> x = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']
|
| 78 |
+
>>> res = crosstab(a, x)
|
| 79 |
+
>>> avals, xvals = res.elements
|
| 80 |
+
>>> avals
|
| 81 |
+
array(['A', 'B'], dtype='<U1')
|
| 82 |
+
>>> xvals
|
| 83 |
+
array(['X', 'Y', 'Z'], dtype='<U1')
|
| 84 |
+
>>> res.count
|
| 85 |
+
array([[2, 3, 0],
|
| 86 |
+
[1, 0, 4]])
|
| 87 |
+
|
| 88 |
+
So `('A', 'X')` occurs twice, `('A', 'Y')` occurs three times, etc.
|
| 89 |
+
|
| 90 |
+
Higher dimensional contingency tables can be created.
|
| 91 |
+
|
| 92 |
+
>>> p = [0, 0, 0, 0, 1, 1, 1, 0, 0, 1]
|
| 93 |
+
>>> res = crosstab(a, x, p)
|
| 94 |
+
>>> res.count
|
| 95 |
+
array([[[2, 0],
|
| 96 |
+
[2, 1],
|
| 97 |
+
[0, 0]],
|
| 98 |
+
[[1, 0],
|
| 99 |
+
[0, 0],
|
| 100 |
+
[1, 3]]])
|
| 101 |
+
>>> res.count.shape
|
| 102 |
+
(2, 3, 2)
|
| 103 |
+
|
| 104 |
+
The values to be counted can be set by using the `levels` argument.
|
| 105 |
+
It allows the elements of interest in each input sequence to be
|
| 106 |
+
given explicitly instead finding the unique elements of the sequence.
|
| 107 |
+
|
| 108 |
+
For example, suppose one of the arguments is an array containing the
|
| 109 |
+
answers to a survey question, with integer values 1 to 4. Even if the
|
| 110 |
+
value 1 does not occur in the data, we want an entry for it in the table.
|
| 111 |
+
|
| 112 |
+
>>> q1 = [2, 3, 3, 2, 4, 4, 2, 3, 4, 4, 4, 3, 3, 3, 4] # 1 does not occur.
|
| 113 |
+
>>> q2 = [4, 4, 2, 2, 2, 4, 1, 1, 2, 2, 4, 2, 2, 2, 4] # 3 does not occur.
|
| 114 |
+
>>> options = [1, 2, 3, 4]
|
| 115 |
+
>>> res = crosstab(q1, q2, levels=(options, options))
|
| 116 |
+
>>> res.count
|
| 117 |
+
array([[0, 0, 0, 0],
|
| 118 |
+
[1, 1, 0, 1],
|
| 119 |
+
[1, 4, 0, 1],
|
| 120 |
+
[0, 3, 0, 3]])
|
| 121 |
+
|
| 122 |
+
If `levels` is given, but an element of `levels` is None, the unique values
|
| 123 |
+
of the corresponding argument are used. For example,
|
| 124 |
+
|
| 125 |
+
>>> res = crosstab(q1, q2, levels=(None, options))
|
| 126 |
+
>>> res.elements
|
| 127 |
+
[array([2, 3, 4]), [1, 2, 3, 4]]
|
| 128 |
+
>>> res.count
|
| 129 |
+
array([[1, 1, 0, 1],
|
| 130 |
+
[1, 4, 0, 1],
|
| 131 |
+
[0, 3, 0, 3]])
|
| 132 |
+
|
| 133 |
+
If we want to ignore the pairs where 4 occurs in ``q2``, we can
|
| 134 |
+
give just the values [1, 2] to `levels`, and the 4 will be ignored:
|
| 135 |
+
|
| 136 |
+
>>> res = crosstab(q1, q2, levels=(None, [1, 2]))
|
| 137 |
+
>>> res.elements
|
| 138 |
+
[array([2, 3, 4]), [1, 2]]
|
| 139 |
+
>>> res.count
|
| 140 |
+
array([[1, 1],
|
| 141 |
+
[1, 4],
|
| 142 |
+
[0, 3]])
|
| 143 |
+
|
| 144 |
+
Finally, let's repeat the first example, but return a sparse matrix:
|
| 145 |
+
|
| 146 |
+
>>> res = crosstab(a, x, sparse=True)
|
| 147 |
+
>>> res.count
|
| 148 |
+
<COOrdinate sparse matrix of dtype 'int64'
|
| 149 |
+
with 4 stored elements and shape (2, 3)>
|
| 150 |
+
>>> res.count.toarray()
|
| 151 |
+
array([[2, 3, 0],
|
| 152 |
+
[1, 0, 4]])
|
| 153 |
+
|
| 154 |
+
"""
|
| 155 |
+
nargs = len(args)
|
| 156 |
+
if nargs == 0:
|
| 157 |
+
raise TypeError("At least one input sequence is required.")
|
| 158 |
+
|
| 159 |
+
len0 = len(args[0])
|
| 160 |
+
if not all(len(a) == len0 for a in args[1:]):
|
| 161 |
+
raise ValueError("All input sequences must have the same length.")
|
| 162 |
+
|
| 163 |
+
if sparse and nargs != 2:
|
| 164 |
+
raise ValueError("When `sparse` is True, only two input sequences "
|
| 165 |
+
"are allowed.")
|
| 166 |
+
|
| 167 |
+
if levels is None:
|
| 168 |
+
# Call np.unique with return_inverse=True on each argument.
|
| 169 |
+
actual_levels, indices = zip(*[np.unique(a, return_inverse=True)
|
| 170 |
+
for a in args])
|
| 171 |
+
else:
|
| 172 |
+
# `levels` is not None...
|
| 173 |
+
if len(levels) != nargs:
|
| 174 |
+
raise ValueError('len(levels) must equal the number of input '
|
| 175 |
+
'sequences')
|
| 176 |
+
|
| 177 |
+
args = [np.asarray(arg) for arg in args]
|
| 178 |
+
mask = np.zeros((nargs, len0), dtype=np.bool_)
|
| 179 |
+
inv = np.zeros((nargs, len0), dtype=np.intp)
|
| 180 |
+
actual_levels = []
|
| 181 |
+
for k, (levels_list, arg) in enumerate(zip(levels, args)):
|
| 182 |
+
if levels_list is None:
|
| 183 |
+
levels_list, inv[k, :] = np.unique(arg, return_inverse=True)
|
| 184 |
+
mask[k, :] = True
|
| 185 |
+
else:
|
| 186 |
+
q = arg == np.asarray(levels_list).reshape(-1, 1)
|
| 187 |
+
mask[k, :] = np.any(q, axis=0)
|
| 188 |
+
qnz = q.T.nonzero()
|
| 189 |
+
inv[k, qnz[0]] = qnz[1]
|
| 190 |
+
actual_levels.append(levels_list)
|
| 191 |
+
|
| 192 |
+
mask_all = mask.all(axis=0)
|
| 193 |
+
indices = tuple(inv[:, mask_all])
|
| 194 |
+
|
| 195 |
+
if sparse:
|
| 196 |
+
count = coo_matrix((np.ones(len(indices[0]), dtype=int),
|
| 197 |
+
(indices[0], indices[1])))
|
| 198 |
+
count.sum_duplicates()
|
| 199 |
+
else:
|
| 200 |
+
shape = [len(u) for u in actual_levels]
|
| 201 |
+
count = np.zeros(shape, dtype=int)
|
| 202 |
+
np.add.at(count, indices, 1)
|
| 203 |
+
|
| 204 |
+
return CrosstabResult(actual_levels, count)
|
llava_next/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava_next/lib/python3.10/site-packages/scipy/stats/_mgc.py
ADDED
|
@@ -0,0 +1,550 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
from scipy._lib._util import check_random_state, MapWrapper, rng_integers, _contains_nan
|
| 5 |
+
from scipy._lib._bunch import _make_tuple_bunch
|
| 6 |
+
from scipy.spatial.distance import cdist
|
| 7 |
+
from scipy.ndimage import _measurements
|
| 8 |
+
|
| 9 |
+
from ._stats import _local_correlations # type: ignore[import-not-found]
|
| 10 |
+
from . import distributions
|
| 11 |
+
|
| 12 |
+
__all__ = ['multiscale_graphcorr']
|
| 13 |
+
|
| 14 |
+
# FROM MGCPY: https://github.com/neurodata/mgcpy
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class _ParallelP:
|
| 18 |
+
"""Helper function to calculate parallel p-value."""
|
| 19 |
+
|
| 20 |
+
def __init__(self, x, y, random_states):
|
| 21 |
+
self.x = x
|
| 22 |
+
self.y = y
|
| 23 |
+
self.random_states = random_states
|
| 24 |
+
|
| 25 |
+
def __call__(self, index):
|
| 26 |
+
order = self.random_states[index].permutation(self.y.shape[0])
|
| 27 |
+
permy = self.y[order][:, order]
|
| 28 |
+
|
| 29 |
+
# calculate permuted stats, store in null distribution
|
| 30 |
+
perm_stat = _mgc_stat(self.x, permy)[0]
|
| 31 |
+
|
| 32 |
+
return perm_stat
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):
|
| 36 |
+
r"""Helper function that calculates the p-value. See below for uses.
|
| 37 |
+
|
| 38 |
+
Parameters
|
| 39 |
+
----------
|
| 40 |
+
x, y : ndarray
|
| 41 |
+
`x` and `y` have shapes `(n, p)` and `(n, q)`.
|
| 42 |
+
stat : float
|
| 43 |
+
The sample test statistic.
|
| 44 |
+
reps : int, optional
|
| 45 |
+
The number of replications used to estimate the null when using the
|
| 46 |
+
permutation test. The default is 1000 replications.
|
| 47 |
+
workers : int or map-like callable, optional
|
| 48 |
+
If `workers` is an int the population is subdivided into `workers`
|
| 49 |
+
sections and evaluated in parallel (uses
|
| 50 |
+
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
|
| 51 |
+
available to the Process. Alternatively supply a map-like callable,
|
| 52 |
+
such as `multiprocessing.Pool.map` for evaluating the population in
|
| 53 |
+
parallel. This evaluation is carried out as `workers(func, iterable)`.
|
| 54 |
+
Requires that `func` be pickleable.
|
| 55 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 56 |
+
`numpy.random.RandomState`}, optional
|
| 57 |
+
|
| 58 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 59 |
+
singleton is used.
|
| 60 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 61 |
+
seeded with `seed`.
|
| 62 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 63 |
+
that instance is used.
|
| 64 |
+
|
| 65 |
+
Returns
|
| 66 |
+
-------
|
| 67 |
+
pvalue : float
|
| 68 |
+
The sample test p-value.
|
| 69 |
+
null_dist : list
|
| 70 |
+
The approximated null distribution.
|
| 71 |
+
|
| 72 |
+
"""
|
| 73 |
+
# generate seeds for each rep (change to new parallel random number
|
| 74 |
+
# capabilities in numpy >= 1.17+)
|
| 75 |
+
random_state = check_random_state(random_state)
|
| 76 |
+
random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
|
| 77 |
+
size=4, dtype=np.uint32)) for _ in range(reps)]
|
| 78 |
+
|
| 79 |
+
# parallelizes with specified workers over number of reps and set seeds
|
| 80 |
+
parallelp = _ParallelP(x=x, y=y, random_states=random_states)
|
| 81 |
+
with MapWrapper(workers) as mapwrapper:
|
| 82 |
+
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
|
| 83 |
+
|
| 84 |
+
# calculate p-value and significant permutation map through list
|
| 85 |
+
pvalue = (1 + (null_dist >= stat).sum()) / (1 + reps)
|
| 86 |
+
|
| 87 |
+
return pvalue, null_dist
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _euclidean_dist(x):
|
| 91 |
+
return cdist(x, x)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
MGCResult = _make_tuple_bunch('MGCResult',
|
| 95 |
+
['statistic', 'pvalue', 'mgc_dict'], [])
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
|
| 99 |
+
workers=1, is_twosamp=False, random_state=None):
|
| 100 |
+
r"""Computes the Multiscale Graph Correlation (MGC) test statistic.
|
| 101 |
+
|
| 102 |
+
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
|
| 103 |
+
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
|
| 104 |
+
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
|
| 105 |
+
called the "scale". A priori, however, it is not know which scales will be
|
| 106 |
+
most informative. So, MGC computes all distance pairs, and then efficiently
|
| 107 |
+
computes the distance correlations for all scales. The local correlations
|
| 108 |
+
illustrate which scales are relatively informative about the relationship.
|
| 109 |
+
The key, therefore, to successfully discover and decipher relationships
|
| 110 |
+
between disparate data modalities is to adaptively determine which scales
|
| 111 |
+
are the most informative, and the geometric implication for the most
|
| 112 |
+
informative scales. Doing so not only provides an estimate of whether the
|
| 113 |
+
modalities are related, but also provides insight into how the
|
| 114 |
+
determination was made. This is especially important in high-dimensional
|
| 115 |
+
data, where simple visualizations do not reveal relationships to the
|
| 116 |
+
unaided human eye. Characterizations of this implementation in particular
|
| 117 |
+
have been derived from and benchmarked within in [2]_.
|
| 118 |
+
|
| 119 |
+
Parameters
|
| 120 |
+
----------
|
| 121 |
+
x, y : ndarray
|
| 122 |
+
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
|
| 123 |
+
the number of samples and `p` and `q` are the number of dimensions,
|
| 124 |
+
then the MGC independence test will be run. Alternatively, ``x`` and
|
| 125 |
+
``y`` can have shapes ``(n, n)`` if they are distance or similarity
|
| 126 |
+
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
|
| 127 |
+
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
|
| 128 |
+
two-sample MGC test will be run.
|
| 129 |
+
compute_distance : callable, optional
|
| 130 |
+
A function that computes the distance or similarity among the samples
|
| 131 |
+
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
|
| 132 |
+
already distance matrices. The default uses the euclidean norm metric.
|
| 133 |
+
If you are calling a custom function, either create the distance
|
| 134 |
+
matrix before-hand or create a function of the form
|
| 135 |
+
``compute_distance(x)`` where `x` is the data matrix for which
|
| 136 |
+
pairwise distances are calculated.
|
| 137 |
+
reps : int, optional
|
| 138 |
+
The number of replications used to estimate the null when using the
|
| 139 |
+
permutation test. The default is ``1000``.
|
| 140 |
+
workers : int or map-like callable, optional
|
| 141 |
+
If ``workers`` is an int the population is subdivided into ``workers``
|
| 142 |
+
sections and evaluated in parallel (uses ``multiprocessing.Pool
|
| 143 |
+
<multiprocessing>``). Supply ``-1`` to use all cores available to the
|
| 144 |
+
Process. Alternatively supply a map-like callable, such as
|
| 145 |
+
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
|
| 146 |
+
This evaluation is carried out as ``workers(func, iterable)``.
|
| 147 |
+
Requires that `func` be pickleable. The default is ``1``.
|
| 148 |
+
is_twosamp : bool, optional
|
| 149 |
+
If `True`, a two sample test will be run. If ``x`` and ``y`` have
|
| 150 |
+
shapes ``(n, p)`` and ``(m, p)``, this optional will be overridden and
|
| 151 |
+
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
|
| 152 |
+
``(n, p)`` and a two sample test is desired. The default is ``False``.
|
| 153 |
+
Note that this will not run if inputs are distance matrices.
|
| 154 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 155 |
+
`numpy.random.RandomState`}, optional
|
| 156 |
+
|
| 157 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 158 |
+
singleton is used.
|
| 159 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 160 |
+
seeded with `seed`.
|
| 161 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 162 |
+
that instance is used.
|
| 163 |
+
|
| 164 |
+
Returns
|
| 165 |
+
-------
|
| 166 |
+
res : MGCResult
|
| 167 |
+
An object containing attributes:
|
| 168 |
+
|
| 169 |
+
statistic : float
|
| 170 |
+
The sample MGC test statistic within `[-1, 1]`.
|
| 171 |
+
pvalue : float
|
| 172 |
+
The p-value obtained via permutation.
|
| 173 |
+
mgc_dict : dict
|
| 174 |
+
Contains additional useful results:
|
| 175 |
+
|
| 176 |
+
- mgc_map : ndarray
|
| 177 |
+
A 2D representation of the latent geometry of the
|
| 178 |
+
relationship.
|
| 179 |
+
- opt_scale : (int, int)
|
| 180 |
+
The estimated optimal scale as a `(x, y)` pair.
|
| 181 |
+
- null_dist : list
|
| 182 |
+
The null distribution derived from the permuted matrices.
|
| 183 |
+
|
| 184 |
+
See Also
|
| 185 |
+
--------
|
| 186 |
+
pearsonr : Pearson correlation coefficient and p-value for testing
|
| 187 |
+
non-correlation.
|
| 188 |
+
kendalltau : Calculates Kendall's tau.
|
| 189 |
+
spearmanr : Calculates a Spearman rank-order correlation coefficient.
|
| 190 |
+
|
| 191 |
+
Notes
|
| 192 |
+
-----
|
| 193 |
+
A description of the process of MGC and applications on neuroscience data
|
| 194 |
+
can be found in [1]_. It is performed using the following steps:
|
| 195 |
+
|
| 196 |
+
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
|
| 197 |
+
modified to be mean zero columnwise. This results in two
|
| 198 |
+
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
|
| 199 |
+
centering and unbiased modification) [3]_.
|
| 200 |
+
|
| 201 |
+
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
|
| 202 |
+
|
| 203 |
+
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
|
| 204 |
+
are calculated for each property. Here, :math:`G_k (i, j)` indicates
|
| 205 |
+
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
|
| 206 |
+
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
|
| 207 |
+
the :math:`i`-th row of :math:`B`
|
| 208 |
+
|
| 209 |
+
* Let :math:`\circ` denotes the entry-wise matrix product, then local
|
| 210 |
+
correlations are summed and normalized using the following statistic:
|
| 211 |
+
|
| 212 |
+
.. math::
|
| 213 |
+
|
| 214 |
+
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
|
| 215 |
+
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
|
| 216 |
+
|
| 217 |
+
#. The MGC test statistic is the smoothed optimal local correlation of
|
| 218 |
+
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
|
| 219 |
+
(which essentially set all isolated large correlations) as 0 and
|
| 220 |
+
connected large correlations the same as before, see [3]_.) MGC is,
|
| 221 |
+
|
| 222 |
+
.. math::
|
| 223 |
+
|
| 224 |
+
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
|
| 225 |
+
\right)
|
| 226 |
+
|
| 227 |
+
The test statistic returns a value between :math:`(-1, 1)` since it is
|
| 228 |
+
normalized.
|
| 229 |
+
|
| 230 |
+
The p-value returned is calculated using a permutation test. This process
|
| 231 |
+
is completed by first randomly permuting :math:`y` to estimate the null
|
| 232 |
+
distribution and then calculating the probability of observing a test
|
| 233 |
+
statistic, under the null, at least as extreme as the observed test
|
| 234 |
+
statistic.
|
| 235 |
+
|
| 236 |
+
MGC requires at least 5 samples to run with reliable results. It can also
|
| 237 |
+
handle high-dimensional data sets.
|
| 238 |
+
In addition, by manipulating the input data matrices, the two-sample
|
| 239 |
+
testing problem can be reduced to the independence testing problem [4]_.
|
| 240 |
+
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
|
| 241 |
+
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
|
| 242 |
+
follows:
|
| 243 |
+
|
| 244 |
+
.. math::
|
| 245 |
+
|
| 246 |
+
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
|
| 247 |
+
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
|
| 248 |
+
|
| 249 |
+
Then, the MGC statistic can be calculated as normal. This methodology can
|
| 250 |
+
be extended to similar tests such as distance correlation [4]_.
|
| 251 |
+
|
| 252 |
+
.. versionadded:: 1.4.0
|
| 253 |
+
|
| 254 |
+
References
|
| 255 |
+
----------
|
| 256 |
+
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
|
| 257 |
+
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
|
| 258 |
+
relationships across disparate data modalities. ELife.
|
| 259 |
+
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
|
| 260 |
+
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
|
| 261 |
+
mgcpy: A Comprehensive High Dimensional Independence Testing Python
|
| 262 |
+
Package. :arXiv:`1907.02088`
|
| 263 |
+
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
|
| 264 |
+
correlation to multiscale graph correlation. Journal of the American
|
| 265 |
+
Statistical Association.
|
| 266 |
+
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
|
| 267 |
+
Distance and Kernel Methods for Hypothesis Testing.
|
| 268 |
+
:arXiv:`1806.05514`
|
| 269 |
+
|
| 270 |
+
Examples
|
| 271 |
+
--------
|
| 272 |
+
>>> import numpy as np
|
| 273 |
+
>>> from scipy.stats import multiscale_graphcorr
|
| 274 |
+
>>> x = np.arange(100)
|
| 275 |
+
>>> y = x
|
| 276 |
+
>>> res = multiscale_graphcorr(x, y)
|
| 277 |
+
>>> res.statistic, res.pvalue
|
| 278 |
+
(1.0, 0.001)
|
| 279 |
+
|
| 280 |
+
To run an unpaired two-sample test,
|
| 281 |
+
|
| 282 |
+
>>> x = np.arange(100)
|
| 283 |
+
>>> y = np.arange(79)
|
| 284 |
+
>>> res = multiscale_graphcorr(x, y)
|
| 285 |
+
>>> res.statistic, res.pvalue # doctest: +SKIP
|
| 286 |
+
(0.033258146255703246, 0.023)
|
| 287 |
+
|
| 288 |
+
or, if shape of the inputs are the same,
|
| 289 |
+
|
| 290 |
+
>>> x = np.arange(100)
|
| 291 |
+
>>> y = x
|
| 292 |
+
>>> res = multiscale_graphcorr(x, y, is_twosamp=True)
|
| 293 |
+
>>> res.statistic, res.pvalue # doctest: +SKIP
|
| 294 |
+
(-0.008021809890200488, 1.0)
|
| 295 |
+
|
| 296 |
+
"""
|
| 297 |
+
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
|
| 298 |
+
raise ValueError("x and y must be ndarrays")
|
| 299 |
+
|
| 300 |
+
# convert arrays of type (n,) to (n, 1)
|
| 301 |
+
if x.ndim == 1:
|
| 302 |
+
x = x[:, np.newaxis]
|
| 303 |
+
elif x.ndim != 2:
|
| 304 |
+
raise ValueError(f"Expected a 2-D array `x`, found shape {x.shape}")
|
| 305 |
+
if y.ndim == 1:
|
| 306 |
+
y = y[:, np.newaxis]
|
| 307 |
+
elif y.ndim != 2:
|
| 308 |
+
raise ValueError(f"Expected a 2-D array `y`, found shape {y.shape}")
|
| 309 |
+
|
| 310 |
+
nx, px = x.shape
|
| 311 |
+
ny, py = y.shape
|
| 312 |
+
|
| 313 |
+
# check for NaNs
|
| 314 |
+
_contains_nan(x, nan_policy='raise')
|
| 315 |
+
_contains_nan(y, nan_policy='raise')
|
| 316 |
+
|
| 317 |
+
# check for positive or negative infinity and raise error
|
| 318 |
+
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
|
| 319 |
+
raise ValueError("Inputs contain infinities")
|
| 320 |
+
|
| 321 |
+
if nx != ny:
|
| 322 |
+
if px == py:
|
| 323 |
+
# reshape x and y for two sample testing
|
| 324 |
+
is_twosamp = True
|
| 325 |
+
else:
|
| 326 |
+
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
|
| 327 |
+
"and [n, q] or have shape [n, p] and [m, p].")
|
| 328 |
+
|
| 329 |
+
if nx < 5 or ny < 5:
|
| 330 |
+
raise ValueError("MGC requires at least 5 samples to give reasonable "
|
| 331 |
+
"results.")
|
| 332 |
+
|
| 333 |
+
# convert x and y to float
|
| 334 |
+
x = x.astype(np.float64)
|
| 335 |
+
y = y.astype(np.float64)
|
| 336 |
+
|
| 337 |
+
# check if compute_distance_matrix if a callable()
|
| 338 |
+
if not callable(compute_distance) and compute_distance is not None:
|
| 339 |
+
raise ValueError("Compute_distance must be a function.")
|
| 340 |
+
|
| 341 |
+
# check if number of reps exists, integer, or > 0 (if under 1000 raises
|
| 342 |
+
# warning)
|
| 343 |
+
if not isinstance(reps, int) or reps < 0:
|
| 344 |
+
raise ValueError("Number of reps must be an integer greater than 0.")
|
| 345 |
+
elif reps < 1000:
|
| 346 |
+
msg = ("The number of replications is low (under 1000), and p-value "
|
| 347 |
+
"calculations may be unreliable. Use the p-value result, with "
|
| 348 |
+
"caution!")
|
| 349 |
+
warnings.warn(msg, RuntimeWarning, stacklevel=2)
|
| 350 |
+
|
| 351 |
+
if is_twosamp:
|
| 352 |
+
if compute_distance is None:
|
| 353 |
+
raise ValueError("Cannot run if inputs are distance matrices")
|
| 354 |
+
x, y = _two_sample_transform(x, y)
|
| 355 |
+
|
| 356 |
+
if compute_distance is not None:
|
| 357 |
+
# compute distance matrices for x and y
|
| 358 |
+
x = compute_distance(x)
|
| 359 |
+
y = compute_distance(y)
|
| 360 |
+
|
| 361 |
+
# calculate MGC stat
|
| 362 |
+
stat, stat_dict = _mgc_stat(x, y)
|
| 363 |
+
stat_mgc_map = stat_dict["stat_mgc_map"]
|
| 364 |
+
opt_scale = stat_dict["opt_scale"]
|
| 365 |
+
|
| 366 |
+
# calculate permutation MGC p-value
|
| 367 |
+
pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers,
|
| 368 |
+
random_state=random_state)
|
| 369 |
+
|
| 370 |
+
# save all stats (other than stat/p-value) in dictionary
|
| 371 |
+
mgc_dict = {"mgc_map": stat_mgc_map,
|
| 372 |
+
"opt_scale": opt_scale,
|
| 373 |
+
"null_dist": null_dist}
|
| 374 |
+
|
| 375 |
+
# create result object with alias for backward compatibility
|
| 376 |
+
res = MGCResult(stat, pvalue, mgc_dict)
|
| 377 |
+
res.stat = stat
|
| 378 |
+
return res
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def _mgc_stat(distx, disty):
|
| 382 |
+
r"""Helper function that calculates the MGC stat. See above for use.
|
| 383 |
+
|
| 384 |
+
Parameters
|
| 385 |
+
----------
|
| 386 |
+
distx, disty : ndarray
|
| 387 |
+
`distx` and `disty` have shapes `(n, p)` and `(n, q)` or
|
| 388 |
+
`(n, n)` and `(n, n)`
|
| 389 |
+
if distance matrices.
|
| 390 |
+
|
| 391 |
+
Returns
|
| 392 |
+
-------
|
| 393 |
+
stat : float
|
| 394 |
+
The sample MGC test statistic within `[-1, 1]`.
|
| 395 |
+
stat_dict : dict
|
| 396 |
+
Contains additional useful additional returns containing the following
|
| 397 |
+
keys:
|
| 398 |
+
|
| 399 |
+
- stat_mgc_map : ndarray
|
| 400 |
+
MGC-map of the statistics.
|
| 401 |
+
- opt_scale : (float, float)
|
| 402 |
+
The estimated optimal scale as a `(x, y)` pair.
|
| 403 |
+
|
| 404 |
+
"""
|
| 405 |
+
# calculate MGC map and optimal scale
|
| 406 |
+
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
|
| 407 |
+
|
| 408 |
+
n, m = stat_mgc_map.shape
|
| 409 |
+
if m == 1 or n == 1:
|
| 410 |
+
# the global scale at is the statistic calculated at maximial nearest
|
| 411 |
+
# neighbors. There is not enough local scale to search over, so
|
| 412 |
+
# default to global scale
|
| 413 |
+
stat = stat_mgc_map[m - 1][n - 1]
|
| 414 |
+
opt_scale = m * n
|
| 415 |
+
else:
|
| 416 |
+
samp_size = len(distx) - 1
|
| 417 |
+
|
| 418 |
+
# threshold to find connected region of significant local correlations
|
| 419 |
+
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
|
| 420 |
+
|
| 421 |
+
# maximum within the significant region
|
| 422 |
+
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
|
| 423 |
+
|
| 424 |
+
stat_dict = {"stat_mgc_map": stat_mgc_map,
|
| 425 |
+
"opt_scale": opt_scale}
|
| 426 |
+
|
| 427 |
+
return stat, stat_dict
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
def _threshold_mgc_map(stat_mgc_map, samp_size):
|
| 431 |
+
r"""
|
| 432 |
+
Finds a connected region of significance in the MGC-map by thresholding.
|
| 433 |
+
|
| 434 |
+
Parameters
|
| 435 |
+
----------
|
| 436 |
+
stat_mgc_map : ndarray
|
| 437 |
+
All local correlations within `[-1,1]`.
|
| 438 |
+
samp_size : int
|
| 439 |
+
The sample size of original data.
|
| 440 |
+
|
| 441 |
+
Returns
|
| 442 |
+
-------
|
| 443 |
+
sig_connect : ndarray
|
| 444 |
+
A binary matrix with 1's indicating the significant region.
|
| 445 |
+
|
| 446 |
+
"""
|
| 447 |
+
m, n = stat_mgc_map.shape
|
| 448 |
+
|
| 449 |
+
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
|
| 450 |
+
# with varying levels of performance. Threshold is based on a beta
|
| 451 |
+
# approximation.
|
| 452 |
+
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
|
| 453 |
+
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
|
| 454 |
+
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
|
| 455 |
+
|
| 456 |
+
# the global scale at is the statistic calculated at maximial nearest
|
| 457 |
+
# neighbors. Threshold is the maximum on the global and local scales
|
| 458 |
+
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
|
| 459 |
+
|
| 460 |
+
# find the largest connected component of significant correlations
|
| 461 |
+
sig_connect = stat_mgc_map > threshold
|
| 462 |
+
if np.sum(sig_connect) > 0:
|
| 463 |
+
sig_connect, _ = _measurements.label(sig_connect)
|
| 464 |
+
_, label_counts = np.unique(sig_connect, return_counts=True)
|
| 465 |
+
|
| 466 |
+
# skip the first element in label_counts, as it is count(zeros)
|
| 467 |
+
max_label = np.argmax(label_counts[1:]) + 1
|
| 468 |
+
sig_connect = sig_connect == max_label
|
| 469 |
+
else:
|
| 470 |
+
sig_connect = np.array([[False]])
|
| 471 |
+
|
| 472 |
+
return sig_connect
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
def _smooth_mgc_map(sig_connect, stat_mgc_map):
|
| 476 |
+
"""Finds the smoothed maximal within the significant region R.
|
| 477 |
+
|
| 478 |
+
If area of R is too small it returns the last local correlation. Otherwise,
|
| 479 |
+
returns the maximum within significant_connected_region.
|
| 480 |
+
|
| 481 |
+
Parameters
|
| 482 |
+
----------
|
| 483 |
+
sig_connect : ndarray
|
| 484 |
+
A binary matrix with 1's indicating the significant region.
|
| 485 |
+
stat_mgc_map : ndarray
|
| 486 |
+
All local correlations within `[-1, 1]`.
|
| 487 |
+
|
| 488 |
+
Returns
|
| 489 |
+
-------
|
| 490 |
+
stat : float
|
| 491 |
+
The sample MGC statistic within `[-1, 1]`.
|
| 492 |
+
opt_scale: (float, float)
|
| 493 |
+
The estimated optimal scale as an `(x, y)` pair.
|
| 494 |
+
|
| 495 |
+
"""
|
| 496 |
+
m, n = stat_mgc_map.shape
|
| 497 |
+
|
| 498 |
+
# the global scale at is the statistic calculated at maximial nearest
|
| 499 |
+
# neighbors. By default, statistic and optimal scale are global.
|
| 500 |
+
stat = stat_mgc_map[m - 1][n - 1]
|
| 501 |
+
opt_scale = [m, n]
|
| 502 |
+
|
| 503 |
+
if np.linalg.norm(sig_connect) != 0:
|
| 504 |
+
# proceed only when the connected region's area is sufficiently large
|
| 505 |
+
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
|
| 506 |
+
# with varying levels of performance
|
| 507 |
+
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
|
| 508 |
+
max_corr = max(stat_mgc_map[sig_connect])
|
| 509 |
+
|
| 510 |
+
# find all scales within significant_connected_region that maximize
|
| 511 |
+
# the local correlation
|
| 512 |
+
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
|
| 513 |
+
|
| 514 |
+
if max_corr >= stat:
|
| 515 |
+
stat = max_corr
|
| 516 |
+
|
| 517 |
+
k, l = max_corr_index
|
| 518 |
+
one_d_indices = k * n + l # 2D to 1D indexing
|
| 519 |
+
k = np.max(one_d_indices) // n
|
| 520 |
+
l = np.max(one_d_indices) % n
|
| 521 |
+
opt_scale = [k+1, l+1] # adding 1s to match R indexing
|
| 522 |
+
|
| 523 |
+
return stat, opt_scale
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def _two_sample_transform(u, v):
|
| 527 |
+
"""Helper function that concatenates x and y for two sample MGC stat.
|
| 528 |
+
|
| 529 |
+
See above for use.
|
| 530 |
+
|
| 531 |
+
Parameters
|
| 532 |
+
----------
|
| 533 |
+
u, v : ndarray
|
| 534 |
+
`u` and `v` have shapes `(n, p)` and `(m, p)`.
|
| 535 |
+
|
| 536 |
+
Returns
|
| 537 |
+
-------
|
| 538 |
+
x : ndarray
|
| 539 |
+
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
|
| 540 |
+
`(2n, p)`.
|
| 541 |
+
y : ndarray
|
| 542 |
+
Label matrix for `x` where 0 refers to samples that comes from `u` and
|
| 543 |
+
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
|
| 544 |
+
|
| 545 |
+
"""
|
| 546 |
+
nx = u.shape[0]
|
| 547 |
+
ny = v.shape[0]
|
| 548 |
+
x = np.concatenate([u, v], axis=0)
|
| 549 |
+
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
|
| 550 |
+
return x, y
|
llava_next/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py
ADDED
|
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from scipy.special import ndtri
|
| 4 |
+
from scipy.optimize import brentq
|
| 5 |
+
from ._discrete_distns import nchypergeom_fisher
|
| 6 |
+
from ._common import ConfidenceInterval
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _sample_odds_ratio(table):
|
| 10 |
+
"""
|
| 11 |
+
Given a table [[a, b], [c, d]], compute a*d/(b*c).
|
| 12 |
+
|
| 13 |
+
Return nan if the numerator and denominator are 0.
|
| 14 |
+
Return inf if just the denominator is 0.
|
| 15 |
+
"""
|
| 16 |
+
# table must be a 2x2 numpy array.
|
| 17 |
+
if table[1, 0] > 0 and table[0, 1] > 0:
|
| 18 |
+
oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1])
|
| 19 |
+
elif table[0, 0] == 0 or table[1, 1] == 0:
|
| 20 |
+
oddsratio = np.nan
|
| 21 |
+
else:
|
| 22 |
+
oddsratio = np.inf
|
| 23 |
+
return oddsratio
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _solve(func):
|
| 27 |
+
"""
|
| 28 |
+
Solve func(nc) = 0. func must be an increasing function.
|
| 29 |
+
"""
|
| 30 |
+
# We could just as well call the variable `x` instead of `nc`, but we
|
| 31 |
+
# always call this function with functions for which nc (the noncentrality
|
| 32 |
+
# parameter) is the variable for which we are solving.
|
| 33 |
+
nc = 1.0
|
| 34 |
+
value = func(nc)
|
| 35 |
+
if value == 0:
|
| 36 |
+
return nc
|
| 37 |
+
|
| 38 |
+
# Multiplicative factor by which to increase or decrease nc when
|
| 39 |
+
# searching for a bracketing interval.
|
| 40 |
+
factor = 2.0
|
| 41 |
+
# Find a bracketing interval.
|
| 42 |
+
if value > 0:
|
| 43 |
+
nc /= factor
|
| 44 |
+
while func(nc) > 0:
|
| 45 |
+
nc /= factor
|
| 46 |
+
lo = nc
|
| 47 |
+
hi = factor*nc
|
| 48 |
+
else:
|
| 49 |
+
nc *= factor
|
| 50 |
+
while func(nc) < 0:
|
| 51 |
+
nc *= factor
|
| 52 |
+
lo = nc/factor
|
| 53 |
+
hi = nc
|
| 54 |
+
|
| 55 |
+
# lo and hi bracket the solution for nc.
|
| 56 |
+
nc = brentq(func, lo, hi, xtol=1e-13)
|
| 57 |
+
return nc
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _nc_hypergeom_mean_inverse(x, M, n, N):
|
| 61 |
+
"""
|
| 62 |
+
For the given noncentral hypergeometric parameters x, M, n,and N
|
| 63 |
+
(table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2
|
| 64 |
+
contingency table), find the noncentrality parameter of Fisher's
|
| 65 |
+
noncentral hypergeometric distribution whose mean is x.
|
| 66 |
+
"""
|
| 67 |
+
nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x)
|
| 68 |
+
return nc
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _hypergeom_params_from_table(table):
|
| 72 |
+
# The notation M, n and N is consistent with stats.hypergeom and
|
| 73 |
+
# stats.nchypergeom_fisher.
|
| 74 |
+
x = table[0, 0]
|
| 75 |
+
M = table.sum()
|
| 76 |
+
n = table[0].sum()
|
| 77 |
+
N = table[:, 0].sum()
|
| 78 |
+
return x, M, n, N
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _ci_upper(table, alpha):
|
| 82 |
+
"""
|
| 83 |
+
Compute the upper end of the confidence interval.
|
| 84 |
+
"""
|
| 85 |
+
if _sample_odds_ratio(table) == np.inf:
|
| 86 |
+
return np.inf
|
| 87 |
+
|
| 88 |
+
x, M, n, N = _hypergeom_params_from_table(table)
|
| 89 |
+
|
| 90 |
+
# nchypergeom_fisher.cdf is a decreasing function of nc, so we negate
|
| 91 |
+
# it in the lambda expression.
|
| 92 |
+
nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha)
|
| 93 |
+
return nc
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _ci_lower(table, alpha):
|
| 97 |
+
"""
|
| 98 |
+
Compute the lower end of the confidence interval.
|
| 99 |
+
"""
|
| 100 |
+
if _sample_odds_ratio(table) == 0:
|
| 101 |
+
return 0
|
| 102 |
+
|
| 103 |
+
x, M, n, N = _hypergeom_params_from_table(table)
|
| 104 |
+
|
| 105 |
+
nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha)
|
| 106 |
+
return nc
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _conditional_oddsratio(table):
|
| 110 |
+
"""
|
| 111 |
+
Conditional MLE of the odds ratio for the 2x2 contingency table.
|
| 112 |
+
"""
|
| 113 |
+
x, M, n, N = _hypergeom_params_from_table(table)
|
| 114 |
+
# Get the bounds of the support. The support of the noncentral
|
| 115 |
+
# hypergeometric distribution with parameters M, n, and N is the same
|
| 116 |
+
# for all values of the noncentrality parameter, so we can use 1 here.
|
| 117 |
+
lo, hi = nchypergeom_fisher.support(M, n, N, 1)
|
| 118 |
+
|
| 119 |
+
# Check if x is at one of the extremes of the support. If so, we know
|
| 120 |
+
# the odds ratio is either 0 or inf.
|
| 121 |
+
if x == lo:
|
| 122 |
+
# x is at the low end of the support.
|
| 123 |
+
return 0
|
| 124 |
+
if x == hi:
|
| 125 |
+
# x is at the high end of the support.
|
| 126 |
+
return np.inf
|
| 127 |
+
|
| 128 |
+
nc = _nc_hypergeom_mean_inverse(x, M, n, N)
|
| 129 |
+
return nc
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _conditional_oddsratio_ci(table, confidence_level=0.95,
|
| 133 |
+
alternative='two-sided'):
|
| 134 |
+
"""
|
| 135 |
+
Conditional exact confidence interval for the odds ratio.
|
| 136 |
+
"""
|
| 137 |
+
if alternative == 'two-sided':
|
| 138 |
+
alpha = 0.5*(1 - confidence_level)
|
| 139 |
+
lower = _ci_lower(table, alpha)
|
| 140 |
+
upper = _ci_upper(table, alpha)
|
| 141 |
+
elif alternative == 'less':
|
| 142 |
+
lower = 0.0
|
| 143 |
+
upper = _ci_upper(table, 1 - confidence_level)
|
| 144 |
+
else:
|
| 145 |
+
# alternative == 'greater'
|
| 146 |
+
lower = _ci_lower(table, 1 - confidence_level)
|
| 147 |
+
upper = np.inf
|
| 148 |
+
|
| 149 |
+
return lower, upper
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def _sample_odds_ratio_ci(table, confidence_level=0.95,
|
| 153 |
+
alternative='two-sided'):
|
| 154 |
+
oddsratio = _sample_odds_ratio(table)
|
| 155 |
+
log_or = np.log(oddsratio)
|
| 156 |
+
se = np.sqrt((1/table).sum())
|
| 157 |
+
if alternative == 'less':
|
| 158 |
+
z = ndtri(confidence_level)
|
| 159 |
+
loglow = -np.inf
|
| 160 |
+
loghigh = log_or + z*se
|
| 161 |
+
elif alternative == 'greater':
|
| 162 |
+
z = ndtri(confidence_level)
|
| 163 |
+
loglow = log_or - z*se
|
| 164 |
+
loghigh = np.inf
|
| 165 |
+
else:
|
| 166 |
+
# alternative is 'two-sided'
|
| 167 |
+
z = ndtri(0.5*confidence_level + 0.5)
|
| 168 |
+
loglow = log_or - z*se
|
| 169 |
+
loghigh = log_or + z*se
|
| 170 |
+
|
| 171 |
+
return np.exp(loglow), np.exp(loghigh)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class OddsRatioResult:
|
| 175 |
+
"""
|
| 176 |
+
Result of `scipy.stats.contingency.odds_ratio`. See the
|
| 177 |
+
docstring for `odds_ratio` for more details.
|
| 178 |
+
|
| 179 |
+
Attributes
|
| 180 |
+
----------
|
| 181 |
+
statistic : float
|
| 182 |
+
The computed odds ratio.
|
| 183 |
+
|
| 184 |
+
* If `kind` is ``'sample'``, this is sample (or unconditional)
|
| 185 |
+
estimate, given by
|
| 186 |
+
``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
|
| 187 |
+
* If `kind` is ``'conditional'``, this is the conditional
|
| 188 |
+
maximum likelihood estimate for the odds ratio. It is
|
| 189 |
+
the noncentrality parameter of Fisher's noncentral
|
| 190 |
+
hypergeometric distribution with the same hypergeometric
|
| 191 |
+
parameters as `table` and whose mean is ``table[0, 0]``.
|
| 192 |
+
|
| 193 |
+
Methods
|
| 194 |
+
-------
|
| 195 |
+
confidence_interval :
|
| 196 |
+
Confidence interval for the odds ratio.
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
def __init__(self, _table, _kind, statistic):
|
| 200 |
+
# for now, no need to make _table and _kind public, since this sort of
|
| 201 |
+
# information is returned in very few `scipy.stats` results
|
| 202 |
+
self._table = _table
|
| 203 |
+
self._kind = _kind
|
| 204 |
+
self.statistic = statistic
|
| 205 |
+
|
| 206 |
+
def __repr__(self):
|
| 207 |
+
return f"OddsRatioResult(statistic={self.statistic})"
|
| 208 |
+
|
| 209 |
+
def confidence_interval(self, confidence_level=0.95,
|
| 210 |
+
alternative='two-sided'):
|
| 211 |
+
"""
|
| 212 |
+
Confidence interval for the odds ratio.
|
| 213 |
+
|
| 214 |
+
Parameters
|
| 215 |
+
----------
|
| 216 |
+
confidence_level: float
|
| 217 |
+
Desired confidence level for the confidence interval.
|
| 218 |
+
The value must be given as a fraction between 0 and 1.
|
| 219 |
+
Default is 0.95 (meaning 95%).
|
| 220 |
+
|
| 221 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
| 222 |
+
The alternative hypothesis of the hypothesis test to which the
|
| 223 |
+
confidence interval corresponds. That is, suppose the null
|
| 224 |
+
hypothesis is that the true odds ratio equals ``OR`` and the
|
| 225 |
+
confidence interval is ``(low, high)``. Then the following options
|
| 226 |
+
for `alternative` are available (default is 'two-sided'):
|
| 227 |
+
|
| 228 |
+
* 'two-sided': the true odds ratio is not equal to ``OR``. There
|
| 229 |
+
is evidence against the null hypothesis at the chosen
|
| 230 |
+
`confidence_level` if ``high < OR`` or ``low > OR``.
|
| 231 |
+
* 'less': the true odds ratio is less than ``OR``. The ``low`` end
|
| 232 |
+
of the confidence interval is 0, and there is evidence against
|
| 233 |
+
the null hypothesis at the chosen `confidence_level` if
|
| 234 |
+
``high < OR``.
|
| 235 |
+
* 'greater': the true odds ratio is greater than ``OR``. The
|
| 236 |
+
``high`` end of the confidence interval is ``np.inf``, and there
|
| 237 |
+
is evidence against the null hypothesis at the chosen
|
| 238 |
+
`confidence_level` if ``low > OR``.
|
| 239 |
+
|
| 240 |
+
Returns
|
| 241 |
+
-------
|
| 242 |
+
ci : ``ConfidenceInterval`` instance
|
| 243 |
+
The confidence interval, represented as an object with
|
| 244 |
+
attributes ``low`` and ``high``.
|
| 245 |
+
|
| 246 |
+
Notes
|
| 247 |
+
-----
|
| 248 |
+
When `kind` is ``'conditional'``, the limits of the confidence
|
| 249 |
+
interval are the conditional "exact confidence limits" as described
|
| 250 |
+
by Fisher [1]_. The conditional odds ratio and confidence interval are
|
| 251 |
+
also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_.
|
| 252 |
+
|
| 253 |
+
When `kind` is ``'sample'``, the confidence interval is computed
|
| 254 |
+
under the assumption that the logarithm of the odds ratio is normally
|
| 255 |
+
distributed with standard error given by::
|
| 256 |
+
|
| 257 |
+
se = sqrt(1/a + 1/b + 1/c + 1/d)
|
| 258 |
+
|
| 259 |
+
where ``a``, ``b``, ``c`` and ``d`` are the elements of the
|
| 260 |
+
contingency table. (See, for example, [2]_, section 3.1.3.2,
|
| 261 |
+
or [3]_, section 2.3.3).
|
| 262 |
+
|
| 263 |
+
References
|
| 264 |
+
----------
|
| 265 |
+
.. [1] R. A. Fisher (1935), The logic of inductive inference,
|
| 266 |
+
Journal of the Royal Statistical Society, Vol. 98, No. 1,
|
| 267 |
+
pp. 39-82.
|
| 268 |
+
.. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
|
| 269 |
+
Methods, Techniques, and Applications, CRC Press LLC, Boca
|
| 270 |
+
Raton, Florida.
|
| 271 |
+
.. [3] Alan Agresti, An Introduction to Categorical Data Analysis
|
| 272 |
+
(second edition), Wiley, Hoboken, NJ, USA (2007).
|
| 273 |
+
"""
|
| 274 |
+
if alternative not in ['two-sided', 'less', 'greater']:
|
| 275 |
+
raise ValueError("`alternative` must be 'two-sided', 'less' or "
|
| 276 |
+
"'greater'.")
|
| 277 |
+
|
| 278 |
+
if confidence_level < 0 or confidence_level > 1:
|
| 279 |
+
raise ValueError('confidence_level must be between 0 and 1')
|
| 280 |
+
|
| 281 |
+
if self._kind == 'conditional':
|
| 282 |
+
ci = self._conditional_odds_ratio_ci(confidence_level, alternative)
|
| 283 |
+
else:
|
| 284 |
+
ci = self._sample_odds_ratio_ci(confidence_level, alternative)
|
| 285 |
+
return ci
|
| 286 |
+
|
| 287 |
+
def _conditional_odds_ratio_ci(self, confidence_level=0.95,
|
| 288 |
+
alternative='two-sided'):
|
| 289 |
+
"""
|
| 290 |
+
Confidence interval for the conditional odds ratio.
|
| 291 |
+
"""
|
| 292 |
+
|
| 293 |
+
table = self._table
|
| 294 |
+
if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
|
| 295 |
+
# If both values in a row or column are zero, the p-value is 1,
|
| 296 |
+
# the odds ratio is NaN and the confidence interval is (0, inf).
|
| 297 |
+
ci = (0, np.inf)
|
| 298 |
+
else:
|
| 299 |
+
ci = _conditional_oddsratio_ci(table,
|
| 300 |
+
confidence_level=confidence_level,
|
| 301 |
+
alternative=alternative)
|
| 302 |
+
return ConfidenceInterval(low=ci[0], high=ci[1])
|
| 303 |
+
|
| 304 |
+
def _sample_odds_ratio_ci(self, confidence_level=0.95,
|
| 305 |
+
alternative='two-sided'):
|
| 306 |
+
"""
|
| 307 |
+
Confidence interval for the sample odds ratio.
|
| 308 |
+
"""
|
| 309 |
+
if confidence_level < 0 or confidence_level > 1:
|
| 310 |
+
raise ValueError('confidence_level must be between 0 and 1')
|
| 311 |
+
|
| 312 |
+
table = self._table
|
| 313 |
+
if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
|
| 314 |
+
# If both values in a row or column are zero, the p-value is 1,
|
| 315 |
+
# the odds ratio is NaN and the confidence interval is (0, inf).
|
| 316 |
+
ci = (0, np.inf)
|
| 317 |
+
else:
|
| 318 |
+
ci = _sample_odds_ratio_ci(table,
|
| 319 |
+
confidence_level=confidence_level,
|
| 320 |
+
alternative=alternative)
|
| 321 |
+
return ConfidenceInterval(low=ci[0], high=ci[1])
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def odds_ratio(table, *, kind='conditional'):
|
| 325 |
+
r"""
|
| 326 |
+
Compute the odds ratio for a 2x2 contingency table.
|
| 327 |
+
|
| 328 |
+
Parameters
|
| 329 |
+
----------
|
| 330 |
+
table : array_like of ints
|
| 331 |
+
A 2x2 contingency table. Elements must be non-negative integers.
|
| 332 |
+
kind : str, optional
|
| 333 |
+
Which kind of odds ratio to compute, either the sample
|
| 334 |
+
odds ratio (``kind='sample'``) or the conditional odds ratio
|
| 335 |
+
(``kind='conditional'``). Default is ``'conditional'``.
|
| 336 |
+
|
| 337 |
+
Returns
|
| 338 |
+
-------
|
| 339 |
+
result : `~scipy.stats._result_classes.OddsRatioResult` instance
|
| 340 |
+
The returned object has two computed attributes:
|
| 341 |
+
|
| 342 |
+
statistic : float
|
| 343 |
+
* If `kind` is ``'sample'``, this is sample (or unconditional)
|
| 344 |
+
estimate, given by
|
| 345 |
+
``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
|
| 346 |
+
* If `kind` is ``'conditional'``, this is the conditional
|
| 347 |
+
maximum likelihood estimate for the odds ratio. It is
|
| 348 |
+
the noncentrality parameter of Fisher's noncentral
|
| 349 |
+
hypergeometric distribution with the same hypergeometric
|
| 350 |
+
parameters as `table` and whose mean is ``table[0, 0]``.
|
| 351 |
+
|
| 352 |
+
The object has the method `confidence_interval` that computes
|
| 353 |
+
the confidence interval of the odds ratio.
|
| 354 |
+
|
| 355 |
+
See Also
|
| 356 |
+
--------
|
| 357 |
+
scipy.stats.fisher_exact
|
| 358 |
+
relative_risk
|
| 359 |
+
|
| 360 |
+
Notes
|
| 361 |
+
-----
|
| 362 |
+
The conditional odds ratio was discussed by Fisher (see "Example 1"
|
| 363 |
+
of [1]_). Texts that cover the odds ratio include [2]_ and [3]_.
|
| 364 |
+
|
| 365 |
+
.. versionadded:: 1.10.0
|
| 366 |
+
|
| 367 |
+
References
|
| 368 |
+
----------
|
| 369 |
+
.. [1] R. A. Fisher (1935), The logic of inductive inference,
|
| 370 |
+
Journal of the Royal Statistical Society, Vol. 98, No. 1,
|
| 371 |
+
pp. 39-82.
|
| 372 |
+
.. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research.
|
| 373 |
+
Volume I - The analysis of case-control studies. IARC Sci Publ.
|
| 374 |
+
(32):5-338. PMID: 7216345. (See section 4.2.)
|
| 375 |
+
.. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
|
| 376 |
+
Methods, Techniques, and Applications, CRC Press LLC, Boca
|
| 377 |
+
Raton, Florida.
|
| 378 |
+
.. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of
|
| 379 |
+
Cardiovascular Events in Women and Men: A Sex-Specific
|
| 380 |
+
Meta-analysis of Randomized Controlled Trials."
|
| 381 |
+
JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006.
|
| 382 |
+
|
| 383 |
+
Examples
|
| 384 |
+
--------
|
| 385 |
+
In epidemiology, individuals are classified as "exposed" or
|
| 386 |
+
"unexposed" to some factor or treatment. If the occurrence of some
|
| 387 |
+
illness is under study, those who have the illness are often
|
| 388 |
+
classified as "cases", and those without it are "noncases". The
|
| 389 |
+
counts of the occurrences of these classes gives a contingency
|
| 390 |
+
table::
|
| 391 |
+
|
| 392 |
+
exposed unexposed
|
| 393 |
+
cases a b
|
| 394 |
+
noncases c d
|
| 395 |
+
|
| 396 |
+
The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can
|
| 397 |
+
be interpreted as the odds of a case occurring in the exposed group,
|
| 398 |
+
and ``b/d`` as the odds of a case occurring in the unexposed group.
|
| 399 |
+
The sample odds ratio is the ratio of these odds. If the odds ratio
|
| 400 |
+
is greater than 1, it suggests that there is a positive association
|
| 401 |
+
between being exposed and being a case.
|
| 402 |
+
|
| 403 |
+
Interchanging the rows or columns of the contingency table inverts
|
| 404 |
+
the odds ratio, so it is important to understand the meaning of labels
|
| 405 |
+
given to the rows and columns of the table when interpreting the
|
| 406 |
+
odds ratio.
|
| 407 |
+
|
| 408 |
+
In [4]_, the use of aspirin to prevent cardiovascular events in women
|
| 409 |
+
and men was investigated. The study notably concluded:
|
| 410 |
+
|
| 411 |
+
...aspirin therapy reduced the risk of a composite of
|
| 412 |
+
cardiovascular events due to its effect on reducing the risk of
|
| 413 |
+
ischemic stroke in women [...]
|
| 414 |
+
|
| 415 |
+
The article lists studies of various cardiovascular events. Let's
|
| 416 |
+
focus on the ischemic stoke in women.
|
| 417 |
+
|
| 418 |
+
The following table summarizes the results of the experiment in which
|
| 419 |
+
participants took aspirin or a placebo on a regular basis for several
|
| 420 |
+
years. Cases of ischemic stroke were recorded::
|
| 421 |
+
|
| 422 |
+
Aspirin Control/Placebo
|
| 423 |
+
Ischemic stroke 176 230
|
| 424 |
+
No stroke 21035 21018
|
| 425 |
+
|
| 426 |
+
The question we ask is "Is there evidence that the aspirin reduces the
|
| 427 |
+
risk of ischemic stroke?"
|
| 428 |
+
|
| 429 |
+
Compute the odds ratio:
|
| 430 |
+
|
| 431 |
+
>>> from scipy.stats.contingency import odds_ratio
|
| 432 |
+
>>> res = odds_ratio([[176, 230], [21035, 21018]])
|
| 433 |
+
>>> res.statistic
|
| 434 |
+
0.7646037659999126
|
| 435 |
+
|
| 436 |
+
For this sample, the odds of getting an ischemic stroke for those who have
|
| 437 |
+
been taking aspirin are 0.76 times that of those
|
| 438 |
+
who have received the placebo.
|
| 439 |
+
|
| 440 |
+
To make statistical inferences about the population under study,
|
| 441 |
+
we can compute the 95% confidence interval for the odds ratio:
|
| 442 |
+
|
| 443 |
+
>>> res.confidence_interval(confidence_level=0.95)
|
| 444 |
+
ConfidenceInterval(low=0.6241234078749812, high=0.9354102892100372)
|
| 445 |
+
|
| 446 |
+
The 95% confidence interval for the conditional odds ratio is
|
| 447 |
+
approximately (0.62, 0.94).
|
| 448 |
+
|
| 449 |
+
The fact that the entire 95% confidence interval falls below 1 supports
|
| 450 |
+
the authors' conclusion that the aspirin was associated with a
|
| 451 |
+
statistically significant reduction in ischemic stroke.
|
| 452 |
+
"""
|
| 453 |
+
if kind not in ['conditional', 'sample']:
|
| 454 |
+
raise ValueError("`kind` must be 'conditional' or 'sample'.")
|
| 455 |
+
|
| 456 |
+
c = np.asarray(table)
|
| 457 |
+
|
| 458 |
+
if c.shape != (2, 2):
|
| 459 |
+
raise ValueError(f"Invalid shape {c.shape}. The input `table` must be "
|
| 460 |
+
"of shape (2, 2).")
|
| 461 |
+
|
| 462 |
+
if not np.issubdtype(c.dtype, np.integer):
|
| 463 |
+
raise ValueError("`table` must be an array of integers, but got "
|
| 464 |
+
f"type {c.dtype}")
|
| 465 |
+
c = c.astype(np.int64)
|
| 466 |
+
|
| 467 |
+
if np.any(c < 0):
|
| 468 |
+
raise ValueError("All values in `table` must be nonnegative.")
|
| 469 |
+
|
| 470 |
+
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
|
| 471 |
+
# If both values in a row or column are zero, the p-value is NaN and
|
| 472 |
+
# the odds ratio is NaN.
|
| 473 |
+
result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan)
|
| 474 |
+
return result
|
| 475 |
+
|
| 476 |
+
if kind == 'sample':
|
| 477 |
+
oddsratio = _sample_odds_ratio(c)
|
| 478 |
+
else: # kind is 'conditional'
|
| 479 |
+
oddsratio = _conditional_oddsratio(c)
|
| 480 |
+
|
| 481 |
+
result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio)
|
| 482 |
+
return result
|
llava_next/lib/python3.10/site-packages/scipy/stats/_relative_risk.py
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy.special import ndtri
|
| 5 |
+
from ._common import ConfidenceInterval
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _validate_int(n, bound, name):
|
| 9 |
+
msg = f'{name} must be an integer not less than {bound}, but got {n!r}'
|
| 10 |
+
try:
|
| 11 |
+
n = operator.index(n)
|
| 12 |
+
except TypeError:
|
| 13 |
+
raise TypeError(msg) from None
|
| 14 |
+
if n < bound:
|
| 15 |
+
raise ValueError(msg)
|
| 16 |
+
return n
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclass
|
| 20 |
+
class RelativeRiskResult:
|
| 21 |
+
"""
|
| 22 |
+
Result of `scipy.stats.contingency.relative_risk`.
|
| 23 |
+
|
| 24 |
+
Attributes
|
| 25 |
+
----------
|
| 26 |
+
relative_risk : float
|
| 27 |
+
This is::
|
| 28 |
+
|
| 29 |
+
(exposed_cases/exposed_total) / (control_cases/control_total)
|
| 30 |
+
|
| 31 |
+
exposed_cases : int
|
| 32 |
+
The number of "cases" (i.e. occurrence of disease or other event
|
| 33 |
+
of interest) among the sample of "exposed" individuals.
|
| 34 |
+
exposed_total : int
|
| 35 |
+
The total number of "exposed" individuals in the sample.
|
| 36 |
+
control_cases : int
|
| 37 |
+
The number of "cases" among the sample of "control" or non-exposed
|
| 38 |
+
individuals.
|
| 39 |
+
control_total : int
|
| 40 |
+
The total number of "control" individuals in the sample.
|
| 41 |
+
|
| 42 |
+
Methods
|
| 43 |
+
-------
|
| 44 |
+
confidence_interval :
|
| 45 |
+
Compute the confidence interval for the relative risk estimate.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
relative_risk: float
|
| 49 |
+
exposed_cases: int
|
| 50 |
+
exposed_total: int
|
| 51 |
+
control_cases: int
|
| 52 |
+
control_total: int
|
| 53 |
+
|
| 54 |
+
def confidence_interval(self, confidence_level=0.95):
|
| 55 |
+
"""
|
| 56 |
+
Compute the confidence interval for the relative risk.
|
| 57 |
+
|
| 58 |
+
The confidence interval is computed using the Katz method
|
| 59 |
+
(i.e. "Method C" of [1]_; see also [2]_, section 3.1.2).
|
| 60 |
+
|
| 61 |
+
Parameters
|
| 62 |
+
----------
|
| 63 |
+
confidence_level : float, optional
|
| 64 |
+
The confidence level to use for the confidence interval.
|
| 65 |
+
Default is 0.95.
|
| 66 |
+
|
| 67 |
+
Returns
|
| 68 |
+
-------
|
| 69 |
+
ci : ConfidenceInterval instance
|
| 70 |
+
The return value is an object with attributes ``low`` and
|
| 71 |
+
``high`` that hold the confidence interval.
|
| 72 |
+
|
| 73 |
+
References
|
| 74 |
+
----------
|
| 75 |
+
.. [1] D. Katz, J. Baptista, S. P. Azen and M. C. Pike, "Obtaining
|
| 76 |
+
confidence intervals for the risk ratio in cohort studies",
|
| 77 |
+
Biometrics, 34, 469-474 (1978).
|
| 78 |
+
.. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
|
| 79 |
+
CRC Press LLC, Boca Raton, FL, USA (1996).
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
Examples
|
| 83 |
+
--------
|
| 84 |
+
>>> from scipy.stats.contingency import relative_risk
|
| 85 |
+
>>> result = relative_risk(exposed_cases=10, exposed_total=75,
|
| 86 |
+
... control_cases=12, control_total=225)
|
| 87 |
+
>>> result.relative_risk
|
| 88 |
+
2.5
|
| 89 |
+
>>> result.confidence_interval()
|
| 90 |
+
ConfidenceInterval(low=1.1261564003469628, high=5.549850800541033)
|
| 91 |
+
"""
|
| 92 |
+
if not 0 <= confidence_level <= 1:
|
| 93 |
+
raise ValueError('confidence_level must be in the interval '
|
| 94 |
+
'[0, 1].')
|
| 95 |
+
|
| 96 |
+
# Handle edge cases where either exposed_cases or control_cases
|
| 97 |
+
# is zero. We follow the convention of the R function riskratio
|
| 98 |
+
# from the epitools library.
|
| 99 |
+
if self.exposed_cases == 0 and self.control_cases == 0:
|
| 100 |
+
# relative risk is nan.
|
| 101 |
+
return ConfidenceInterval(low=np.nan, high=np.nan)
|
| 102 |
+
elif self.exposed_cases == 0:
|
| 103 |
+
# relative risk is 0.
|
| 104 |
+
return ConfidenceInterval(low=0.0, high=np.nan)
|
| 105 |
+
elif self.control_cases == 0:
|
| 106 |
+
# relative risk is inf
|
| 107 |
+
return ConfidenceInterval(low=np.nan, high=np.inf)
|
| 108 |
+
|
| 109 |
+
alpha = 1 - confidence_level
|
| 110 |
+
z = ndtri(1 - alpha/2)
|
| 111 |
+
rr = self.relative_risk
|
| 112 |
+
|
| 113 |
+
# Estimate of the variance of log(rr) is
|
| 114 |
+
# var(log(rr)) = 1/exposed_cases - 1/exposed_total +
|
| 115 |
+
# 1/control_cases - 1/control_total
|
| 116 |
+
# and the standard error is the square root of that.
|
| 117 |
+
se = np.sqrt(1/self.exposed_cases - 1/self.exposed_total +
|
| 118 |
+
1/self.control_cases - 1/self.control_total)
|
| 119 |
+
delta = z*se
|
| 120 |
+
katz_lo = rr*np.exp(-delta)
|
| 121 |
+
katz_hi = rr*np.exp(delta)
|
| 122 |
+
return ConfidenceInterval(low=katz_lo, high=katz_hi)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def relative_risk(exposed_cases, exposed_total, control_cases, control_total):
|
| 126 |
+
"""
|
| 127 |
+
Compute the relative risk (also known as the risk ratio).
|
| 128 |
+
|
| 129 |
+
This function computes the relative risk associated with a 2x2
|
| 130 |
+
contingency table ([1]_, section 2.2.3; [2]_, section 3.1.2). Instead
|
| 131 |
+
of accepting a table as an argument, the individual numbers that are
|
| 132 |
+
used to compute the relative risk are given as separate parameters.
|
| 133 |
+
This is to avoid the ambiguity of which row or column of the contingency
|
| 134 |
+
table corresponds to the "exposed" cases and which corresponds to the
|
| 135 |
+
"control" cases. Unlike, say, the odds ratio, the relative risk is not
|
| 136 |
+
invariant under an interchange of the rows or columns.
|
| 137 |
+
|
| 138 |
+
Parameters
|
| 139 |
+
----------
|
| 140 |
+
exposed_cases : nonnegative int
|
| 141 |
+
The number of "cases" (i.e. occurrence of disease or other event
|
| 142 |
+
of interest) among the sample of "exposed" individuals.
|
| 143 |
+
exposed_total : positive int
|
| 144 |
+
The total number of "exposed" individuals in the sample.
|
| 145 |
+
control_cases : nonnegative int
|
| 146 |
+
The number of "cases" among the sample of "control" or non-exposed
|
| 147 |
+
individuals.
|
| 148 |
+
control_total : positive int
|
| 149 |
+
The total number of "control" individuals in the sample.
|
| 150 |
+
|
| 151 |
+
Returns
|
| 152 |
+
-------
|
| 153 |
+
result : instance of `~scipy.stats._result_classes.RelativeRiskResult`
|
| 154 |
+
The object has the float attribute ``relative_risk``, which is::
|
| 155 |
+
|
| 156 |
+
rr = (exposed_cases/exposed_total) / (control_cases/control_total)
|
| 157 |
+
|
| 158 |
+
The object also has the method ``confidence_interval`` to compute
|
| 159 |
+
the confidence interval of the relative risk for a given confidence
|
| 160 |
+
level.
|
| 161 |
+
|
| 162 |
+
See Also
|
| 163 |
+
--------
|
| 164 |
+
odds_ratio
|
| 165 |
+
|
| 166 |
+
Notes
|
| 167 |
+
-----
|
| 168 |
+
The R package epitools has the function `riskratio`, which accepts
|
| 169 |
+
a table with the following layout::
|
| 170 |
+
|
| 171 |
+
disease=0 disease=1
|
| 172 |
+
exposed=0 (ref) n00 n01
|
| 173 |
+
exposed=1 n10 n11
|
| 174 |
+
|
| 175 |
+
With a 2x2 table in the above format, the estimate of the CI is
|
| 176 |
+
computed by `riskratio` when the argument method="wald" is given,
|
| 177 |
+
or with the function `riskratio.wald`.
|
| 178 |
+
|
| 179 |
+
For example, in a test of the incidence of lung cancer among a
|
| 180 |
+
sample of smokers and nonsmokers, the "exposed" category would
|
| 181 |
+
correspond to "is a smoker" and the "disease" category would
|
| 182 |
+
correspond to "has or had lung cancer".
|
| 183 |
+
|
| 184 |
+
To pass the same data to ``relative_risk``, use::
|
| 185 |
+
|
| 186 |
+
relative_risk(n11, n10 + n11, n01, n00 + n01)
|
| 187 |
+
|
| 188 |
+
.. versionadded:: 1.7.0
|
| 189 |
+
|
| 190 |
+
References
|
| 191 |
+
----------
|
| 192 |
+
.. [1] Alan Agresti, An Introduction to Categorical Data Analysis
|
| 193 |
+
(second edition), Wiley, Hoboken, NJ, USA (2007).
|
| 194 |
+
.. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
|
| 195 |
+
CRC Press LLC, Boca Raton, FL, USA (1996).
|
| 196 |
+
|
| 197 |
+
Examples
|
| 198 |
+
--------
|
| 199 |
+
>>> from scipy.stats.contingency import relative_risk
|
| 200 |
+
|
| 201 |
+
This example is from Example 3.1 of [2]_. The results of a heart
|
| 202 |
+
disease study are summarized in the following table::
|
| 203 |
+
|
| 204 |
+
High CAT Low CAT Total
|
| 205 |
+
-------- ------- -----
|
| 206 |
+
CHD 27 44 71
|
| 207 |
+
No CHD 95 443 538
|
| 208 |
+
|
| 209 |
+
Total 122 487 609
|
| 210 |
+
|
| 211 |
+
CHD is coronary heart disease, and CAT refers to the level of
|
| 212 |
+
circulating catecholamine. CAT is the "exposure" variable, and
|
| 213 |
+
high CAT is the "exposed" category. So the data from the table
|
| 214 |
+
to be passed to ``relative_risk`` is::
|
| 215 |
+
|
| 216 |
+
exposed_cases = 27
|
| 217 |
+
exposed_total = 122
|
| 218 |
+
control_cases = 44
|
| 219 |
+
control_total = 487
|
| 220 |
+
|
| 221 |
+
>>> result = relative_risk(27, 122, 44, 487)
|
| 222 |
+
>>> result.relative_risk
|
| 223 |
+
2.4495156482861398
|
| 224 |
+
|
| 225 |
+
Find the confidence interval for the relative risk.
|
| 226 |
+
|
| 227 |
+
>>> result.confidence_interval(confidence_level=0.95)
|
| 228 |
+
ConfidenceInterval(low=1.5836990926700116, high=3.7886786315466354)
|
| 229 |
+
|
| 230 |
+
The interval does not contain 1, so the data supports the statement
|
| 231 |
+
that high CAT is associated with greater risk of CHD.
|
| 232 |
+
"""
|
| 233 |
+
# Relative risk is a trivial calculation. The nontrivial part is in the
|
| 234 |
+
# `confidence_interval` method of the RelativeRiskResult class.
|
| 235 |
+
|
| 236 |
+
exposed_cases = _validate_int(exposed_cases, 0, "exposed_cases")
|
| 237 |
+
exposed_total = _validate_int(exposed_total, 1, "exposed_total")
|
| 238 |
+
control_cases = _validate_int(control_cases, 0, "control_cases")
|
| 239 |
+
control_total = _validate_int(control_total, 1, "control_total")
|
| 240 |
+
|
| 241 |
+
if exposed_cases > exposed_total:
|
| 242 |
+
raise ValueError('exposed_cases must not exceed exposed_total.')
|
| 243 |
+
if control_cases > control_total:
|
| 244 |
+
raise ValueError('control_cases must not exceed control_total.')
|
| 245 |
+
|
| 246 |
+
if exposed_cases == 0 and control_cases == 0:
|
| 247 |
+
# relative risk is 0/0.
|
| 248 |
+
rr = np.nan
|
| 249 |
+
elif exposed_cases == 0:
|
| 250 |
+
# relative risk is 0/nonzero
|
| 251 |
+
rr = 0.0
|
| 252 |
+
elif control_cases == 0:
|
| 253 |
+
# relative risk is nonzero/0.
|
| 254 |
+
rr = np.inf
|
| 255 |
+
else:
|
| 256 |
+
p1 = exposed_cases / exposed_total
|
| 257 |
+
p2 = control_cases / control_total
|
| 258 |
+
rr = p1 / p2
|
| 259 |
+
return RelativeRiskResult(relative_risk=rr,
|
| 260 |
+
exposed_cases=exposed_cases,
|
| 261 |
+
exposed_total=exposed_total,
|
| 262 |
+
control_cases=control_cases,
|
| 263 |
+
control_total=control_total)
|
llava_next/lib/python3.10/site-packages/scipy/stats/_resampling.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava_next/lib/python3.10/site-packages/scipy/stats/_sampling.py
ADDED
|
@@ -0,0 +1,1314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import numbers
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy import stats
|
| 5 |
+
from scipy import special as sc
|
| 6 |
+
from ._qmc import (check_random_state as check_random_state_qmc,
|
| 7 |
+
Halton, QMCEngine)
|
| 8 |
+
from ._unuran.unuran_wrapper import NumericalInversePolynomial
|
| 9 |
+
from scipy._lib._util import check_random_state
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
__all__ = ['FastGeneratorInversion', 'RatioUniforms']
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# define pdfs and other helper functions to create the generators
|
| 16 |
+
|
| 17 |
+
def argus_pdf(x, chi):
|
| 18 |
+
# approach follows Baumgarten/Hoermann: Generating ARGUS random variates
|
| 19 |
+
# for chi > 5, use relationship of the ARGUS distribution to Gamma(1.5)
|
| 20 |
+
if chi <= 5:
|
| 21 |
+
y = 1 - x * x
|
| 22 |
+
return x * math.sqrt(y) * math.exp(-0.5 * chi**2 * y)
|
| 23 |
+
return math.sqrt(x) * math.exp(-x)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def argus_gamma_trf(x, chi):
|
| 27 |
+
if chi <= 5:
|
| 28 |
+
return x
|
| 29 |
+
return np.sqrt(1.0 - 2 * x / chi**2)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def argus_gamma_inv_trf(x, chi):
|
| 33 |
+
if chi <= 5:
|
| 34 |
+
return x
|
| 35 |
+
return 0.5 * chi**2 * (1 - x**2)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def betaprime_pdf(x, a, b):
|
| 39 |
+
if x > 0:
|
| 40 |
+
logf = (a - 1) * math.log(x) - (a + b) * math.log1p(x) - sc.betaln(a, b)
|
| 41 |
+
return math.exp(logf)
|
| 42 |
+
else:
|
| 43 |
+
# return pdf at x == 0 separately to avoid runtime warnings
|
| 44 |
+
if a > 1:
|
| 45 |
+
return 0
|
| 46 |
+
elif a < 1:
|
| 47 |
+
return np.inf
|
| 48 |
+
else:
|
| 49 |
+
return 1 / sc.beta(a, b)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def beta_valid_params(a, b):
|
| 53 |
+
return (min(a, b) >= 0.1) and (max(a, b) <= 700)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def gamma_pdf(x, a):
|
| 57 |
+
if x > 0:
|
| 58 |
+
return math.exp(-math.lgamma(a) + (a - 1.0) * math.log(x) - x)
|
| 59 |
+
else:
|
| 60 |
+
return 0 if a >= 1 else np.inf
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def invgamma_pdf(x, a):
|
| 64 |
+
if x > 0:
|
| 65 |
+
return math.exp(-(a + 1.0) * math.log(x) - math.lgamma(a) - 1 / x)
|
| 66 |
+
else:
|
| 67 |
+
return 0 if a >= 1 else np.inf
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def burr_pdf(x, cc, dd):
|
| 71 |
+
# note: we use np.exp instead of math.exp, otherwise an overflow
|
| 72 |
+
# error can occur in the setup, e.g., for parameters
|
| 73 |
+
# 1.89128135, 0.30195177, see test test_burr_overflow
|
| 74 |
+
if x > 0:
|
| 75 |
+
lx = math.log(x)
|
| 76 |
+
return np.exp(-(cc + 1) * lx - (dd + 1) * math.log1p(np.exp(-cc * lx)))
|
| 77 |
+
else:
|
| 78 |
+
return 0
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def burr12_pdf(x, cc, dd):
|
| 82 |
+
if x > 0:
|
| 83 |
+
lx = math.log(x)
|
| 84 |
+
logterm = math.log1p(math.exp(cc * lx))
|
| 85 |
+
return math.exp((cc - 1) * lx - (dd + 1) * logterm + math.log(cc * dd))
|
| 86 |
+
else:
|
| 87 |
+
return 0
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def chi_pdf(x, a):
|
| 91 |
+
if x > 0:
|
| 92 |
+
return math.exp(
|
| 93 |
+
(a - 1) * math.log(x)
|
| 94 |
+
- 0.5 * (x * x)
|
| 95 |
+
- (a / 2 - 1) * math.log(2)
|
| 96 |
+
- math.lgamma(0.5 * a)
|
| 97 |
+
)
|
| 98 |
+
else:
|
| 99 |
+
return 0 if a >= 1 else np.inf
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def chi2_pdf(x, df):
|
| 103 |
+
if x > 0:
|
| 104 |
+
return math.exp(
|
| 105 |
+
(df / 2 - 1) * math.log(x)
|
| 106 |
+
- 0.5 * x
|
| 107 |
+
- (df / 2) * math.log(2)
|
| 108 |
+
- math.lgamma(0.5 * df)
|
| 109 |
+
)
|
| 110 |
+
else:
|
| 111 |
+
return 0 if df >= 1 else np.inf
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def alpha_pdf(x, a):
|
| 115 |
+
if x > 0:
|
| 116 |
+
return math.exp(-2.0 * math.log(x) - 0.5 * (a - 1.0 / x) ** 2)
|
| 117 |
+
return 0.0
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def bradford_pdf(x, c):
|
| 121 |
+
if 0 <= x <= 1:
|
| 122 |
+
return 1.0 / (1.0 + c * x)
|
| 123 |
+
return 0.0
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def crystalball_pdf(x, b, m):
|
| 127 |
+
if x > -b:
|
| 128 |
+
return math.exp(-0.5 * x * x)
|
| 129 |
+
return math.exp(m * math.log(m / b) - 0.5 * b * b - m * math.log(m / b - b - x))
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def weibull_min_pdf(x, c):
|
| 133 |
+
if x > 0:
|
| 134 |
+
return c * math.exp((c - 1) * math.log(x) - x**c)
|
| 135 |
+
return 0.0
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def weibull_max_pdf(x, c):
|
| 139 |
+
if x < 0:
|
| 140 |
+
return c * math.exp((c - 1) * math.log(-x) - ((-x) ** c))
|
| 141 |
+
return 0.0
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def invweibull_pdf(x, c):
|
| 145 |
+
if x > 0:
|
| 146 |
+
return c * math.exp(-(c + 1) * math.log(x) - x ** (-c))
|
| 147 |
+
return 0.0
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def wald_pdf(x):
|
| 151 |
+
if x > 0:
|
| 152 |
+
return math.exp(-((x - 1) ** 2) / (2 * x)) / math.sqrt(x**3)
|
| 153 |
+
return 0.0
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def geninvgauss_mode(p, b):
|
| 157 |
+
if p > 1: # equivalent mode formulas numerical more stable versions
|
| 158 |
+
return (math.sqrt((1 - p) ** 2 + b**2) - (1 - p)) / b
|
| 159 |
+
return b / (math.sqrt((1 - p) ** 2 + b**2) + (1 - p))
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def geninvgauss_pdf(x, p, b):
|
| 163 |
+
m = geninvgauss_mode(p, b)
|
| 164 |
+
lfm = (p - 1) * math.log(m) - 0.5 * b * (m + 1 / m)
|
| 165 |
+
if x > 0:
|
| 166 |
+
return math.exp((p - 1) * math.log(x) - 0.5 * b * (x + 1 / x) - lfm)
|
| 167 |
+
return 0.0
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def invgauss_mode(mu):
|
| 171 |
+
return 1.0 / (math.sqrt(1.5 * 1.5 + 1 / (mu * mu)) + 1.5)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def invgauss_pdf(x, mu):
|
| 175 |
+
m = invgauss_mode(mu)
|
| 176 |
+
lfm = -1.5 * math.log(m) - (m - mu) ** 2 / (2 * m * mu**2)
|
| 177 |
+
if x > 0:
|
| 178 |
+
return math.exp(-1.5 * math.log(x) - (x - mu) ** 2 / (2 * x * mu**2) - lfm)
|
| 179 |
+
return 0.0
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def powerlaw_pdf(x, a):
|
| 183 |
+
if x > 0:
|
| 184 |
+
return x ** (a - 1)
|
| 185 |
+
return 0.0
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
# Define a dictionary: for a given distribution (keys), another dictionary
|
| 189 |
+
# (values) specifies the parameters for NumericalInversePolynomial (PINV).
|
| 190 |
+
# The keys of the latter dictionary are:
|
| 191 |
+
# - pdf: the pdf of the distribution (callable). The signature of the pdf
|
| 192 |
+
# is float -> float (i.e., the function does not have to be vectorized).
|
| 193 |
+
# If possible, functions like log or exp from the module math should be
|
| 194 |
+
# preferred over functions from numpy since the PINV setup will be faster
|
| 195 |
+
# in that case.
|
| 196 |
+
# - check_pinv_params: callable f that returns true if the shape parameters
|
| 197 |
+
# (args) are recommended parameters for PINV (i.e., the u-error does
|
| 198 |
+
# not exceed the default tolerance)
|
| 199 |
+
# - center: scalar if the center does not depend on args, otherwise
|
| 200 |
+
# callable that returns the center as a function of the shape parameters
|
| 201 |
+
# - rvs_transform: a callable that can be used to transform the rvs that
|
| 202 |
+
# are distributed according to the pdf to the target distribution
|
| 203 |
+
# (as an example, see the entry for the beta distribution)
|
| 204 |
+
# - rvs_transform_inv: the inverse of rvs_transform (it is required
|
| 205 |
+
# for the transformed ppf)
|
| 206 |
+
# - mirror_uniform: boolean or a callable that returns true or false
|
| 207 |
+
# depending on the shape parameters. If True, the ppf is applied
|
| 208 |
+
# to 1-u instead of u to generate rvs, where u is a uniform rv.
|
| 209 |
+
# While both u and 1-u are uniform, it can be required to use 1-u
|
| 210 |
+
# to compute the u-error correctly. This is only relevant for the argus
|
| 211 |
+
# distribution.
|
| 212 |
+
# The only required keys are "pdf" and "check_pinv_params".
|
| 213 |
+
# All other keys are optional.
|
| 214 |
+
|
| 215 |
+
PINV_CONFIG = {
|
| 216 |
+
"alpha": {
|
| 217 |
+
"pdf": alpha_pdf,
|
| 218 |
+
"check_pinv_params": lambda a: 1.0e-11 <= a < 2.1e5,
|
| 219 |
+
"center": lambda a: 0.25 * (math.sqrt(a * a + 8.0) - a),
|
| 220 |
+
},
|
| 221 |
+
"anglit": {
|
| 222 |
+
"pdf": lambda x: math.cos(2 * x) + 1.0e-13,
|
| 223 |
+
# +1.e-13 is necessary, otherwise PINV has strange problems as
|
| 224 |
+
# f(upper border) is very close to 0
|
| 225 |
+
"center": 0,
|
| 226 |
+
},
|
| 227 |
+
"argus": {
|
| 228 |
+
"pdf": argus_pdf,
|
| 229 |
+
"center": lambda chi: 0.7 if chi <= 5 else 0.5,
|
| 230 |
+
"check_pinv_params": lambda chi: 1e-20 < chi < 901,
|
| 231 |
+
"rvs_transform": argus_gamma_trf,
|
| 232 |
+
"rvs_transform_inv": argus_gamma_inv_trf,
|
| 233 |
+
"mirror_uniform": lambda chi: chi > 5,
|
| 234 |
+
},
|
| 235 |
+
"beta": {
|
| 236 |
+
"pdf": betaprime_pdf,
|
| 237 |
+
"center": lambda a, b: max(0.1, (a - 1) / (b + 1)),
|
| 238 |
+
"check_pinv_params": beta_valid_params,
|
| 239 |
+
"rvs_transform": lambda x, *args: x / (1 + x),
|
| 240 |
+
"rvs_transform_inv": lambda x, *args: x / (1 - x) if x < 1 else np.inf,
|
| 241 |
+
},
|
| 242 |
+
"betaprime": {
|
| 243 |
+
"pdf": betaprime_pdf,
|
| 244 |
+
"center": lambda a, b: max(0.1, (a - 1) / (b + 1)),
|
| 245 |
+
"check_pinv_params": beta_valid_params,
|
| 246 |
+
},
|
| 247 |
+
"bradford": {
|
| 248 |
+
"pdf": bradford_pdf,
|
| 249 |
+
"check_pinv_params": lambda a: 1.0e-6 <= a <= 1e9,
|
| 250 |
+
"center": 0.5,
|
| 251 |
+
},
|
| 252 |
+
"burr": {
|
| 253 |
+
"pdf": burr_pdf,
|
| 254 |
+
"center": lambda a, b: (2 ** (1 / b) - 1) ** (-1 / a),
|
| 255 |
+
"check_pinv_params": lambda a, b: (min(a, b) >= 0.3) and (max(a, b) <= 50),
|
| 256 |
+
},
|
| 257 |
+
"burr12": {
|
| 258 |
+
"pdf": burr12_pdf,
|
| 259 |
+
"center": lambda a, b: (2 ** (1 / b) - 1) ** (1 / a),
|
| 260 |
+
"check_pinv_params": lambda a, b: (min(a, b) >= 0.2) and (max(a, b) <= 50),
|
| 261 |
+
},
|
| 262 |
+
"cauchy": {
|
| 263 |
+
"pdf": lambda x: 1 / (1 + (x * x)),
|
| 264 |
+
"center": 0,
|
| 265 |
+
},
|
| 266 |
+
"chi": {
|
| 267 |
+
"pdf": chi_pdf,
|
| 268 |
+
"check_pinv_params": lambda df: 0.05 <= df <= 1.0e6,
|
| 269 |
+
"center": lambda a: math.sqrt(a),
|
| 270 |
+
},
|
| 271 |
+
"chi2": {
|
| 272 |
+
"pdf": chi2_pdf,
|
| 273 |
+
"check_pinv_params": lambda df: 0.07 <= df <= 1e6,
|
| 274 |
+
"center": lambda a: a,
|
| 275 |
+
},
|
| 276 |
+
"cosine": {
|
| 277 |
+
"pdf": lambda x: 1 + math.cos(x),
|
| 278 |
+
"center": 0,
|
| 279 |
+
},
|
| 280 |
+
"crystalball": {
|
| 281 |
+
"pdf": crystalball_pdf,
|
| 282 |
+
"check_pinv_params": lambda b, m: (0.01 <= b <= 5.5)
|
| 283 |
+
and (1.1 <= m <= 75.1),
|
| 284 |
+
"center": 0.0,
|
| 285 |
+
},
|
| 286 |
+
"expon": {
|
| 287 |
+
"pdf": lambda x: math.exp(-x),
|
| 288 |
+
"center": 1.0,
|
| 289 |
+
},
|
| 290 |
+
"gamma": {
|
| 291 |
+
"pdf": gamma_pdf,
|
| 292 |
+
"check_pinv_params": lambda a: 0.04 <= a <= 1e6,
|
| 293 |
+
"center": lambda a: a,
|
| 294 |
+
},
|
| 295 |
+
"gennorm": {
|
| 296 |
+
"pdf": lambda x, b: math.exp(-abs(x) ** b),
|
| 297 |
+
"check_pinv_params": lambda b: 0.081 <= b <= 45.0,
|
| 298 |
+
"center": 0.0,
|
| 299 |
+
},
|
| 300 |
+
"geninvgauss": {
|
| 301 |
+
"pdf": geninvgauss_pdf,
|
| 302 |
+
"check_pinv_params": lambda p, b: (abs(p) <= 1200.0)
|
| 303 |
+
and (1.0e-10 <= b <= 1200.0),
|
| 304 |
+
"center": geninvgauss_mode,
|
| 305 |
+
},
|
| 306 |
+
"gumbel_l": {
|
| 307 |
+
"pdf": lambda x: math.exp(x - math.exp(x)),
|
| 308 |
+
"center": -0.6,
|
| 309 |
+
},
|
| 310 |
+
"gumbel_r": {
|
| 311 |
+
"pdf": lambda x: math.exp(-x - math.exp(-x)),
|
| 312 |
+
"center": 0.6,
|
| 313 |
+
},
|
| 314 |
+
"hypsecant": {
|
| 315 |
+
"pdf": lambda x: 1.0 / (math.exp(x) + math.exp(-x)),
|
| 316 |
+
"center": 0.0,
|
| 317 |
+
},
|
| 318 |
+
"invgamma": {
|
| 319 |
+
"pdf": invgamma_pdf,
|
| 320 |
+
"check_pinv_params": lambda a: 0.04 <= a <= 1e6,
|
| 321 |
+
"center": lambda a: 1 / a,
|
| 322 |
+
},
|
| 323 |
+
"invgauss": {
|
| 324 |
+
"pdf": invgauss_pdf,
|
| 325 |
+
"check_pinv_params": lambda mu: 1.0e-10 <= mu <= 1.0e9,
|
| 326 |
+
"center": invgauss_mode,
|
| 327 |
+
},
|
| 328 |
+
"invweibull": {
|
| 329 |
+
"pdf": invweibull_pdf,
|
| 330 |
+
"check_pinv_params": lambda a: 0.12 <= a <= 512,
|
| 331 |
+
"center": 1.0,
|
| 332 |
+
},
|
| 333 |
+
"laplace": {
|
| 334 |
+
"pdf": lambda x: math.exp(-abs(x)),
|
| 335 |
+
"center": 0.0,
|
| 336 |
+
},
|
| 337 |
+
"logistic": {
|
| 338 |
+
"pdf": lambda x: math.exp(-x) / (1 + math.exp(-x)) ** 2,
|
| 339 |
+
"center": 0.0,
|
| 340 |
+
},
|
| 341 |
+
"maxwell": {
|
| 342 |
+
"pdf": lambda x: x * x * math.exp(-0.5 * x * x),
|
| 343 |
+
"center": 1.41421,
|
| 344 |
+
},
|
| 345 |
+
"moyal": {
|
| 346 |
+
"pdf": lambda x: math.exp(-(x + math.exp(-x)) / 2),
|
| 347 |
+
"center": 1.2,
|
| 348 |
+
},
|
| 349 |
+
"norm": {
|
| 350 |
+
"pdf": lambda x: math.exp(-x * x / 2),
|
| 351 |
+
"center": 0.0,
|
| 352 |
+
},
|
| 353 |
+
"pareto": {
|
| 354 |
+
"pdf": lambda x, b: x ** -(b + 1),
|
| 355 |
+
"center": lambda b: b / (b - 1) if b > 2 else 1.5,
|
| 356 |
+
"check_pinv_params": lambda b: 0.08 <= b <= 400000,
|
| 357 |
+
},
|
| 358 |
+
"powerlaw": {
|
| 359 |
+
"pdf": powerlaw_pdf,
|
| 360 |
+
"center": 1.0,
|
| 361 |
+
"check_pinv_params": lambda a: 0.06 <= a <= 1.0e5,
|
| 362 |
+
},
|
| 363 |
+
"t": {
|
| 364 |
+
"pdf": lambda x, df: (1 + x * x / df) ** (-0.5 * (df + 1)),
|
| 365 |
+
"check_pinv_params": lambda a: 0.07 <= a <= 1e6,
|
| 366 |
+
"center": 0.0,
|
| 367 |
+
},
|
| 368 |
+
"rayleigh": {
|
| 369 |
+
"pdf": lambda x: x * math.exp(-0.5 * (x * x)),
|
| 370 |
+
"center": 1.0,
|
| 371 |
+
},
|
| 372 |
+
"semicircular": {
|
| 373 |
+
"pdf": lambda x: math.sqrt(1.0 - (x * x)),
|
| 374 |
+
"center": 0,
|
| 375 |
+
},
|
| 376 |
+
"wald": {
|
| 377 |
+
"pdf": wald_pdf,
|
| 378 |
+
"center": 1.0,
|
| 379 |
+
},
|
| 380 |
+
"weibull_max": {
|
| 381 |
+
"pdf": weibull_max_pdf,
|
| 382 |
+
"check_pinv_params": lambda a: 0.25 <= a <= 512,
|
| 383 |
+
"center": -1.0,
|
| 384 |
+
},
|
| 385 |
+
"weibull_min": {
|
| 386 |
+
"pdf": weibull_min_pdf,
|
| 387 |
+
"check_pinv_params": lambda a: 0.25 <= a <= 512,
|
| 388 |
+
"center": 1.0,
|
| 389 |
+
},
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def _validate_qmc_input(qmc_engine, d, seed):
|
| 394 |
+
# Input validation for `qmc_engine` and `d`
|
| 395 |
+
# Error messages for invalid `d` are raised by QMCEngine
|
| 396 |
+
# we could probably use a stats.qmc.check_qrandom_state
|
| 397 |
+
if isinstance(qmc_engine, QMCEngine):
|
| 398 |
+
if d is not None and qmc_engine.d != d:
|
| 399 |
+
message = "`d` must be consistent with dimension of `qmc_engine`."
|
| 400 |
+
raise ValueError(message)
|
| 401 |
+
d = qmc_engine.d if d is None else d
|
| 402 |
+
elif qmc_engine is None:
|
| 403 |
+
d = 1 if d is None else d
|
| 404 |
+
qmc_engine = Halton(d, seed=seed)
|
| 405 |
+
else:
|
| 406 |
+
message = (
|
| 407 |
+
"`qmc_engine` must be an instance of "
|
| 408 |
+
"`scipy.stats.qmc.QMCEngine` or `None`."
|
| 409 |
+
)
|
| 410 |
+
raise ValueError(message)
|
| 411 |
+
|
| 412 |
+
return qmc_engine, d
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
class CustomDistPINV:
|
| 416 |
+
def __init__(self, pdf, args):
|
| 417 |
+
self._pdf = lambda x: pdf(x, *args)
|
| 418 |
+
|
| 419 |
+
def pdf(self, x):
|
| 420 |
+
return self._pdf(x)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
class FastGeneratorInversion:
|
| 424 |
+
"""
|
| 425 |
+
Fast sampling by numerical inversion of the CDF for a large class of
|
| 426 |
+
continuous distributions in `scipy.stats`.
|
| 427 |
+
|
| 428 |
+
Parameters
|
| 429 |
+
----------
|
| 430 |
+
dist : rv_frozen object
|
| 431 |
+
Frozen distribution object from `scipy.stats`. The list of supported
|
| 432 |
+
distributions can be found in the Notes section. The shape parameters,
|
| 433 |
+
`loc` and `scale` used to create the distributions must be scalars.
|
| 434 |
+
For example, for the Gamma distribution with shape parameter `p`,
|
| 435 |
+
`p` has to be a float, and for the beta distribution with shape
|
| 436 |
+
parameters (a, b), both a and b have to be floats.
|
| 437 |
+
domain : tuple of floats, optional
|
| 438 |
+
If one wishes to sample from a truncated/conditional distribution,
|
| 439 |
+
the domain has to be specified.
|
| 440 |
+
The default is None. In that case, the random variates are not
|
| 441 |
+
truncated, and the domain is inferred from the support of the
|
| 442 |
+
distribution.
|
| 443 |
+
ignore_shape_range : boolean, optional.
|
| 444 |
+
If False, shape parameters that are outside of the valid range
|
| 445 |
+
of values to ensure that the numerical accuracy (see Notes) is
|
| 446 |
+
high, raise a ValueError. If True, any shape parameters that are valid
|
| 447 |
+
for the distribution are accepted. This can be useful for testing.
|
| 448 |
+
The default is False.
|
| 449 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 450 |
+
`numpy.random.RandomState`}, optional
|
| 451 |
+
|
| 452 |
+
A NumPy random number generator or seed for the underlying NumPy
|
| 453 |
+
random number generator used to generate the stream of uniform
|
| 454 |
+
random numbers.
|
| 455 |
+
If `random_state` is None, it uses ``self.random_state``.
|
| 456 |
+
If `random_state` is an int,
|
| 457 |
+
``np.random.default_rng(random_state)`` is used.
|
| 458 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
| 459 |
+
instance then that instance is used.
|
| 460 |
+
|
| 461 |
+
Attributes
|
| 462 |
+
----------
|
| 463 |
+
loc : float
|
| 464 |
+
The location parameter.
|
| 465 |
+
random_state : {`numpy.random.Generator`, `numpy.random.RandomState`}
|
| 466 |
+
The random state used in relevant methods like `rvs` (unless
|
| 467 |
+
another `random_state` is passed as an argument to these methods).
|
| 468 |
+
scale : float
|
| 469 |
+
The scale parameter.
|
| 470 |
+
|
| 471 |
+
Methods
|
| 472 |
+
-------
|
| 473 |
+
cdf
|
| 474 |
+
evaluate_error
|
| 475 |
+
ppf
|
| 476 |
+
qrvs
|
| 477 |
+
rvs
|
| 478 |
+
support
|
| 479 |
+
|
| 480 |
+
Notes
|
| 481 |
+
-----
|
| 482 |
+
The class creates an object for continuous distributions specified
|
| 483 |
+
by `dist`. The method `rvs` uses a generator from
|
| 484 |
+
`scipy.stats.sampling` that is created when the object is instantiated.
|
| 485 |
+
In addition, the methods `qrvs` and `ppf` are added.
|
| 486 |
+
`qrvs` generate samples based on quasi-random numbers from
|
| 487 |
+
`scipy.stats.qmc`. `ppf` is the PPF based on the
|
| 488 |
+
numerical inversion method in [1]_ (`NumericalInversePolynomial`) that is
|
| 489 |
+
used to generate random variates.
|
| 490 |
+
|
| 491 |
+
Supported distributions (`distname`) are:
|
| 492 |
+
``alpha``, ``anglit``, ``argus``, ``beta``, ``betaprime``, ``bradford``,
|
| 493 |
+
``burr``, ``burr12``, ``cauchy``, ``chi``, ``chi2``, ``cosine``,
|
| 494 |
+
``crystalball``, ``expon``, ``gamma``, ``gennorm``, ``geninvgauss``,
|
| 495 |
+
``gumbel_l``, ``gumbel_r``, ``hypsecant``, ``invgamma``, ``invgauss``,
|
| 496 |
+
``invweibull``, ``laplace``, ``logistic``, ``maxwell``, ``moyal``,
|
| 497 |
+
``norm``, ``pareto``, ``powerlaw``, ``t``, ``rayleigh``, ``semicircular``,
|
| 498 |
+
``wald``, ``weibull_max``, ``weibull_min``.
|
| 499 |
+
|
| 500 |
+
`rvs` relies on the accuracy of the numerical inversion. If very extreme
|
| 501 |
+
shape parameters are used, the numerical inversion might not work. However,
|
| 502 |
+
for all implemented distributions, the admissible shape parameters have
|
| 503 |
+
been tested, and an error will be raised if the user supplies values
|
| 504 |
+
outside of the allowed range. The u-error should not exceed 1e-10 for all
|
| 505 |
+
valid parameters. Note that warnings might be raised even if parameters
|
| 506 |
+
are within the valid range when the object is instantiated.
|
| 507 |
+
To check numerical accuracy, the method `evaluate_error` can be used.
|
| 508 |
+
|
| 509 |
+
Note that all implemented distributions are also part of `scipy.stats`, and
|
| 510 |
+
the object created by `FastGeneratorInversion` relies on methods like
|
| 511 |
+
`ppf`, `cdf` and `pdf` from `rv_frozen`. The main benefit of using this
|
| 512 |
+
class can be summarized as follows: Once the generator to sample random
|
| 513 |
+
variates is created in the setup step, sampling and evaluation of
|
| 514 |
+
the PPF using `ppf` are very fast,
|
| 515 |
+
and performance is essentially independent of the distribution. Therefore,
|
| 516 |
+
a substantial speed-up can be achieved for many distributions if large
|
| 517 |
+
numbers of random variates are required. It is important to know that this
|
| 518 |
+
fast sampling is achieved by inversion of the CDF. Thus, one uniform
|
| 519 |
+
random variate is transformed into a non-uniform variate, which is an
|
| 520 |
+
advantage for several simulation methods, e.g., when
|
| 521 |
+
the variance reduction methods of common random variates or
|
| 522 |
+
antithetic variates are be used ([2]_).
|
| 523 |
+
|
| 524 |
+
In addition, inversion makes it possible to
|
| 525 |
+
- to use a QMC generator from `scipy.stats.qmc` (method `qrvs`),
|
| 526 |
+
- to generate random variates truncated to an interval. For example, if
|
| 527 |
+
one aims to sample standard normal random variates from
|
| 528 |
+
the interval (2, 4), this can be easily achieved by using the parameter
|
| 529 |
+
`domain`.
|
| 530 |
+
|
| 531 |
+
The location and scale that are initially defined by `dist`
|
| 532 |
+
can be reset without having to rerun the setup
|
| 533 |
+
step to create the generator that is used for sampling. The relation
|
| 534 |
+
of the distribution `Y` with `loc` and `scale` to the standard
|
| 535 |
+
distribution `X` (i.e., ``loc=0`` and ``scale=1``) is given by
|
| 536 |
+
``Y = loc + scale * X``.
|
| 537 |
+
|
| 538 |
+
References
|
| 539 |
+
----------
|
| 540 |
+
.. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold.
|
| 541 |
+
"Random variate generation by numerical inversion when only the
|
| 542 |
+
density is known." ACM Transactions on Modeling and Computer
|
| 543 |
+
Simulation (TOMACS) 20.4 (2010): 1-25.
|
| 544 |
+
.. [2] Hörmann, Wolfgang, Josef Leydold and Gerhard Derflinger.
|
| 545 |
+
"Automatic nonuniform random number generation."
|
| 546 |
+
Springer, 2004.
|
| 547 |
+
|
| 548 |
+
Examples
|
| 549 |
+
--------
|
| 550 |
+
>>> import numpy as np
|
| 551 |
+
>>> from scipy import stats
|
| 552 |
+
>>> from scipy.stats.sampling import FastGeneratorInversion
|
| 553 |
+
|
| 554 |
+
Let's start with a simple example to illustrate the main features:
|
| 555 |
+
|
| 556 |
+
>>> gamma_frozen = stats.gamma(1.5)
|
| 557 |
+
>>> gamma_dist = FastGeneratorInversion(gamma_frozen)
|
| 558 |
+
>>> r = gamma_dist.rvs(size=1000)
|
| 559 |
+
|
| 560 |
+
The mean should be approximately equal to the shape parameter 1.5:
|
| 561 |
+
|
| 562 |
+
>>> r.mean()
|
| 563 |
+
1.52423591130436 # may vary
|
| 564 |
+
|
| 565 |
+
Similarly, we can draw a sample based on quasi-random numbers:
|
| 566 |
+
|
| 567 |
+
>>> r = gamma_dist.qrvs(size=1000)
|
| 568 |
+
>>> r.mean()
|
| 569 |
+
1.4996639255942914 # may vary
|
| 570 |
+
|
| 571 |
+
Compare the PPF against approximation `ppf`.
|
| 572 |
+
|
| 573 |
+
>>> q = [0.001, 0.2, 0.5, 0.8, 0.999]
|
| 574 |
+
>>> np.max(np.abs(gamma_frozen.ppf(q) - gamma_dist.ppf(q)))
|
| 575 |
+
4.313394796895409e-08
|
| 576 |
+
|
| 577 |
+
To confirm that the numerical inversion is accurate, we evaluate the
|
| 578 |
+
approximation error (u-error), which should be below 1e-10 (for more
|
| 579 |
+
details, refer to the documentation of `evaluate_error`):
|
| 580 |
+
|
| 581 |
+
>>> gamma_dist.evaluate_error()
|
| 582 |
+
(7.446320551265581e-11, nan) # may vary
|
| 583 |
+
|
| 584 |
+
Note that the location and scale can be changed without instantiating a
|
| 585 |
+
new generator:
|
| 586 |
+
|
| 587 |
+
>>> gamma_dist.loc = 2
|
| 588 |
+
>>> gamma_dist.scale = 3
|
| 589 |
+
>>> r = gamma_dist.rvs(size=1000)
|
| 590 |
+
|
| 591 |
+
The mean should be approximately 2 + 3*1.5 = 6.5.
|
| 592 |
+
|
| 593 |
+
>>> r.mean()
|
| 594 |
+
6.399549295242894 # may vary
|
| 595 |
+
|
| 596 |
+
Let us also illustrate how truncation can be applied:
|
| 597 |
+
|
| 598 |
+
>>> trunc_norm = FastGeneratorInversion(stats.norm(), domain=(3, 4))
|
| 599 |
+
>>> r = trunc_norm.rvs(size=1000)
|
| 600 |
+
>>> 3 < r.min() < r.max() < 4
|
| 601 |
+
True
|
| 602 |
+
|
| 603 |
+
Check the mean:
|
| 604 |
+
|
| 605 |
+
>>> r.mean()
|
| 606 |
+
3.250433367078603 # may vary
|
| 607 |
+
|
| 608 |
+
>>> stats.norm.expect(lb=3, ub=4, conditional=True)
|
| 609 |
+
3.260454285589997
|
| 610 |
+
|
| 611 |
+
In this particular, case, `scipy.stats.truncnorm` could also be used to
|
| 612 |
+
generate truncated normal random variates.
|
| 613 |
+
|
| 614 |
+
"""
|
| 615 |
+
|
| 616 |
+
def __init__(
|
| 617 |
+
self,
|
| 618 |
+
dist,
|
| 619 |
+
*,
|
| 620 |
+
domain=None,
|
| 621 |
+
ignore_shape_range=False,
|
| 622 |
+
random_state=None,
|
| 623 |
+
):
|
| 624 |
+
|
| 625 |
+
if isinstance(dist, stats.distributions.rv_frozen):
|
| 626 |
+
distname = dist.dist.name
|
| 627 |
+
if distname not in PINV_CONFIG.keys():
|
| 628 |
+
raise ValueError(
|
| 629 |
+
f"Distribution '{distname}' is not supported."
|
| 630 |
+
f"It must be one of {list(PINV_CONFIG.keys())}"
|
| 631 |
+
)
|
| 632 |
+
else:
|
| 633 |
+
raise ValueError("`dist` must be a frozen distribution object")
|
| 634 |
+
|
| 635 |
+
loc = dist.kwds.get("loc", 0)
|
| 636 |
+
scale = dist.kwds.get("scale", 1)
|
| 637 |
+
args = dist.args
|
| 638 |
+
if not np.isscalar(loc):
|
| 639 |
+
raise ValueError("loc must be scalar.")
|
| 640 |
+
if not np.isscalar(scale):
|
| 641 |
+
raise ValueError("scale must be scalar.")
|
| 642 |
+
|
| 643 |
+
self._frozendist = getattr(stats, distname)(
|
| 644 |
+
*args,
|
| 645 |
+
loc=loc,
|
| 646 |
+
scale=scale,
|
| 647 |
+
)
|
| 648 |
+
self._distname = distname
|
| 649 |
+
|
| 650 |
+
nargs = np.broadcast_arrays(args)[0].size
|
| 651 |
+
nargs_expected = self._frozendist.dist.numargs
|
| 652 |
+
if nargs != nargs_expected:
|
| 653 |
+
raise ValueError(
|
| 654 |
+
f"Each of the {nargs_expected} shape parameters must be a "
|
| 655 |
+
f"scalar, but {nargs} values are provided."
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
self.random_state = random_state
|
| 659 |
+
|
| 660 |
+
if domain is None:
|
| 661 |
+
self._domain = self._frozendist.support()
|
| 662 |
+
self._p_lower = 0.0
|
| 663 |
+
self._p_domain = 1.0
|
| 664 |
+
else:
|
| 665 |
+
self._domain = domain
|
| 666 |
+
self._p_lower = self._frozendist.cdf(self._domain[0])
|
| 667 |
+
_p_domain = self._frozendist.cdf(self._domain[1]) - self._p_lower
|
| 668 |
+
self._p_domain = _p_domain
|
| 669 |
+
self._set_domain_adj()
|
| 670 |
+
self._ignore_shape_range = ignore_shape_range
|
| 671 |
+
|
| 672 |
+
# the domain to be passed to NumericalInversePolynomial
|
| 673 |
+
# define a separate variable since in case of a transformation,
|
| 674 |
+
# domain_pinv will not be the same as self._domain
|
| 675 |
+
self._domain_pinv = self._domain
|
| 676 |
+
|
| 677 |
+
# get information about the distribution from the config to set up
|
| 678 |
+
# the generator
|
| 679 |
+
dist = self._process_config(distname, args)
|
| 680 |
+
|
| 681 |
+
if self._rvs_transform_inv is not None:
|
| 682 |
+
d0 = self._rvs_transform_inv(self._domain[0], *args)
|
| 683 |
+
d1 = self._rvs_transform_inv(self._domain[1], *args)
|
| 684 |
+
if d0 > d1:
|
| 685 |
+
# swap values if transformation if decreasing
|
| 686 |
+
d0, d1 = d1, d0
|
| 687 |
+
# only update _domain_pinv and not _domain
|
| 688 |
+
# _domain refers to the original distribution, _domain_pinv
|
| 689 |
+
# to the transformed distribution
|
| 690 |
+
self._domain_pinv = d0, d1
|
| 691 |
+
|
| 692 |
+
# self._center has been set by the call self._process_config
|
| 693 |
+
# check if self._center is inside the transformed domain
|
| 694 |
+
# _domain_pinv, otherwise move it to the endpoint that is closer
|
| 695 |
+
if self._center is not None:
|
| 696 |
+
if self._center < self._domain_pinv[0]:
|
| 697 |
+
self._center = self._domain_pinv[0]
|
| 698 |
+
elif self._center > self._domain_pinv[1]:
|
| 699 |
+
self._center = self._domain_pinv[1]
|
| 700 |
+
|
| 701 |
+
self._rng = NumericalInversePolynomial(
|
| 702 |
+
dist,
|
| 703 |
+
random_state=self.random_state,
|
| 704 |
+
domain=self._domain_pinv,
|
| 705 |
+
center=self._center,
|
| 706 |
+
)
|
| 707 |
+
|
| 708 |
+
@property
|
| 709 |
+
def random_state(self):
|
| 710 |
+
return self._random_state
|
| 711 |
+
|
| 712 |
+
@random_state.setter
|
| 713 |
+
def random_state(self, random_state):
|
| 714 |
+
self._random_state = check_random_state_qmc(random_state)
|
| 715 |
+
|
| 716 |
+
@property
|
| 717 |
+
def loc(self):
|
| 718 |
+
return self._frozendist.kwds.get("loc", 0)
|
| 719 |
+
|
| 720 |
+
@loc.setter
|
| 721 |
+
def loc(self, loc):
|
| 722 |
+
if not np.isscalar(loc):
|
| 723 |
+
raise ValueError("loc must be scalar.")
|
| 724 |
+
self._frozendist.kwds["loc"] = loc
|
| 725 |
+
# update the adjusted domain that depends on loc and scale
|
| 726 |
+
self._set_domain_adj()
|
| 727 |
+
|
| 728 |
+
@property
|
| 729 |
+
def scale(self):
|
| 730 |
+
return self._frozendist.kwds.get("scale", 0)
|
| 731 |
+
|
| 732 |
+
@scale.setter
|
| 733 |
+
def scale(self, scale):
|
| 734 |
+
if not np.isscalar(scale):
|
| 735 |
+
raise ValueError("scale must be scalar.")
|
| 736 |
+
self._frozendist.kwds["scale"] = scale
|
| 737 |
+
# update the adjusted domain that depends on loc and scale
|
| 738 |
+
self._set_domain_adj()
|
| 739 |
+
|
| 740 |
+
def _set_domain_adj(self):
|
| 741 |
+
""" Adjust the domain based on loc and scale. """
|
| 742 |
+
loc = self.loc
|
| 743 |
+
scale = self.scale
|
| 744 |
+
lb = self._domain[0] * scale + loc
|
| 745 |
+
ub = self._domain[1] * scale + loc
|
| 746 |
+
self._domain_adj = (lb, ub)
|
| 747 |
+
|
| 748 |
+
def _process_config(self, distname, args):
|
| 749 |
+
cfg = PINV_CONFIG[distname]
|
| 750 |
+
if "check_pinv_params" in cfg:
|
| 751 |
+
if not self._ignore_shape_range:
|
| 752 |
+
if not cfg["check_pinv_params"](*args):
|
| 753 |
+
msg = ("No generator is defined for the shape parameters "
|
| 754 |
+
f"{args}. Use ignore_shape_range to proceed "
|
| 755 |
+
"with the selected values.")
|
| 756 |
+
raise ValueError(msg)
|
| 757 |
+
|
| 758 |
+
if "center" in cfg.keys():
|
| 759 |
+
if not np.isscalar(cfg["center"]):
|
| 760 |
+
self._center = cfg["center"](*args)
|
| 761 |
+
else:
|
| 762 |
+
self._center = cfg["center"]
|
| 763 |
+
else:
|
| 764 |
+
self._center = None
|
| 765 |
+
self._rvs_transform = cfg.get("rvs_transform", None)
|
| 766 |
+
self._rvs_transform_inv = cfg.get("rvs_transform_inv", None)
|
| 767 |
+
_mirror_uniform = cfg.get("mirror_uniform", None)
|
| 768 |
+
if _mirror_uniform is None:
|
| 769 |
+
self._mirror_uniform = False
|
| 770 |
+
else:
|
| 771 |
+
self._mirror_uniform = _mirror_uniform(*args)
|
| 772 |
+
|
| 773 |
+
return CustomDistPINV(cfg["pdf"], args)
|
| 774 |
+
|
| 775 |
+
def rvs(self, size=None):
|
| 776 |
+
"""
|
| 777 |
+
Sample from the distribution by inversion.
|
| 778 |
+
|
| 779 |
+
Parameters
|
| 780 |
+
----------
|
| 781 |
+
size : int or tuple, optional
|
| 782 |
+
The shape of samples. Default is ``None`` in which case a scalar
|
| 783 |
+
sample is returned.
|
| 784 |
+
|
| 785 |
+
Returns
|
| 786 |
+
-------
|
| 787 |
+
rvs : array_like
|
| 788 |
+
A NumPy array of random variates.
|
| 789 |
+
|
| 790 |
+
Notes
|
| 791 |
+
-----
|
| 792 |
+
Random variates are generated by numerical inversion of the CDF, i.e.,
|
| 793 |
+
`ppf` computed by `NumericalInversePolynomial` when the class
|
| 794 |
+
is instantiated. Note that the
|
| 795 |
+
default ``rvs`` method of the rv_continuous class is
|
| 796 |
+
overwritten. Hence, a different stream of random numbers is generated
|
| 797 |
+
even if the same seed is used.
|
| 798 |
+
"""
|
| 799 |
+
# note: we cannot use self._rng.rvs directly in case
|
| 800 |
+
# self._mirror_uniform is true
|
| 801 |
+
u = self.random_state.uniform(size=size)
|
| 802 |
+
if self._mirror_uniform:
|
| 803 |
+
u = 1 - u
|
| 804 |
+
r = self._rng.ppf(u)
|
| 805 |
+
if self._rvs_transform is not None:
|
| 806 |
+
r = self._rvs_transform(r, *self._frozendist.args)
|
| 807 |
+
return self.loc + self.scale * r
|
| 808 |
+
|
| 809 |
+
def ppf(self, q):
|
| 810 |
+
"""
|
| 811 |
+
Very fast PPF (inverse CDF) of the distribution which
|
| 812 |
+
is a very close approximation of the exact PPF values.
|
| 813 |
+
|
| 814 |
+
Parameters
|
| 815 |
+
----------
|
| 816 |
+
u : array_like
|
| 817 |
+
Array with probabilities.
|
| 818 |
+
|
| 819 |
+
Returns
|
| 820 |
+
-------
|
| 821 |
+
ppf : array_like
|
| 822 |
+
Quantiles corresponding to the values in `u`.
|
| 823 |
+
|
| 824 |
+
Notes
|
| 825 |
+
-----
|
| 826 |
+
The evaluation of the PPF is very fast but it may have a large
|
| 827 |
+
relative error in the far tails. The numerical precision of the PPF
|
| 828 |
+
is controlled by the u-error, that is,
|
| 829 |
+
``max |u - CDF(PPF(u))|`` where the max is taken over points in
|
| 830 |
+
the interval [0,1], see `evaluate_error`.
|
| 831 |
+
|
| 832 |
+
Note that this PPF is designed to generate random samples.
|
| 833 |
+
"""
|
| 834 |
+
q = np.asarray(q)
|
| 835 |
+
if self._mirror_uniform:
|
| 836 |
+
x = self._rng.ppf(1 - q)
|
| 837 |
+
else:
|
| 838 |
+
x = self._rng.ppf(q)
|
| 839 |
+
if self._rvs_transform is not None:
|
| 840 |
+
x = self._rvs_transform(x, *self._frozendist.args)
|
| 841 |
+
return self.scale * x + self.loc
|
| 842 |
+
|
| 843 |
+
def qrvs(self, size=None, d=None, qmc_engine=None):
|
| 844 |
+
"""
|
| 845 |
+
Quasi-random variates of the given distribution.
|
| 846 |
+
|
| 847 |
+
The `qmc_engine` is used to draw uniform quasi-random variates, and
|
| 848 |
+
these are converted to quasi-random variates of the given distribution
|
| 849 |
+
using inverse transform sampling.
|
| 850 |
+
|
| 851 |
+
Parameters
|
| 852 |
+
----------
|
| 853 |
+
size : int, tuple of ints, or None; optional
|
| 854 |
+
Defines shape of random variates array. Default is ``None``.
|
| 855 |
+
d : int or None, optional
|
| 856 |
+
Defines dimension of uniform quasi-random variates to be
|
| 857 |
+
transformed. Default is ``None``.
|
| 858 |
+
qmc_engine : scipy.stats.qmc.QMCEngine(d=1), optional
|
| 859 |
+
Defines the object to use for drawing
|
| 860 |
+
quasi-random variates. Default is ``None``, which uses
|
| 861 |
+
`scipy.stats.qmc.Halton(1)`.
|
| 862 |
+
|
| 863 |
+
Returns
|
| 864 |
+
-------
|
| 865 |
+
rvs : ndarray or scalar
|
| 866 |
+
Quasi-random variates. See Notes for shape information.
|
| 867 |
+
|
| 868 |
+
Notes
|
| 869 |
+
-----
|
| 870 |
+
The shape of the output array depends on `size`, `d`, and `qmc_engine`.
|
| 871 |
+
The intent is for the interface to be natural, but the detailed rules
|
| 872 |
+
to achieve this are complicated.
|
| 873 |
+
|
| 874 |
+
- If `qmc_engine` is ``None``, a `scipy.stats.qmc.Halton` instance is
|
| 875 |
+
created with dimension `d`. If `d` is not provided, ``d=1``.
|
| 876 |
+
- If `qmc_engine` is not ``None`` and `d` is ``None``, `d` is
|
| 877 |
+
determined from the dimension of the `qmc_engine`.
|
| 878 |
+
- If `qmc_engine` is not ``None`` and `d` is not ``None`` but the
|
| 879 |
+
dimensions are inconsistent, a ``ValueError`` is raised.
|
| 880 |
+
- After `d` is determined according to the rules above, the output
|
| 881 |
+
shape is ``tuple_shape + d_shape``, where:
|
| 882 |
+
|
| 883 |
+
- ``tuple_shape = tuple()`` if `size` is ``None``,
|
| 884 |
+
- ``tuple_shape = (size,)`` if `size` is an ``int``,
|
| 885 |
+
- ``tuple_shape = size`` if `size` is a sequence,
|
| 886 |
+
- ``d_shape = tuple()`` if `d` is ``None`` or `d` is 1, and
|
| 887 |
+
- ``d_shape = (d,)`` if `d` is greater than 1.
|
| 888 |
+
|
| 889 |
+
The elements of the returned array are part of a low-discrepancy
|
| 890 |
+
sequence. If `d` is 1, this means that none of the samples are truly
|
| 891 |
+
independent. If `d` > 1, each slice ``rvs[..., i]`` will be of a
|
| 892 |
+
quasi-independent sequence; see `scipy.stats.qmc.QMCEngine` for
|
| 893 |
+
details. Note that when `d` > 1, the samples returned are still those
|
| 894 |
+
of the provided univariate distribution, not a multivariate
|
| 895 |
+
generalization of that distribution.
|
| 896 |
+
|
| 897 |
+
"""
|
| 898 |
+
qmc_engine, d = _validate_qmc_input(qmc_engine, d, self.random_state)
|
| 899 |
+
# mainly copied from unuran_wrapper.pyx.templ
|
| 900 |
+
# `rvs` is flexible about whether `size` is an int or tuple, so this
|
| 901 |
+
# should be, too.
|
| 902 |
+
try:
|
| 903 |
+
if size is None:
|
| 904 |
+
tuple_size = (1,)
|
| 905 |
+
else:
|
| 906 |
+
tuple_size = tuple(size)
|
| 907 |
+
except TypeError:
|
| 908 |
+
tuple_size = (size,)
|
| 909 |
+
# we do not use rng.qrvs directly since we need to be
|
| 910 |
+
# able to apply the ppf to 1 - u
|
| 911 |
+
N = 1 if size is None else np.prod(size)
|
| 912 |
+
u = qmc_engine.random(N)
|
| 913 |
+
if self._mirror_uniform:
|
| 914 |
+
u = 1 - u
|
| 915 |
+
qrvs = self._ppf(u)
|
| 916 |
+
if self._rvs_transform is not None:
|
| 917 |
+
qrvs = self._rvs_transform(qrvs, *self._frozendist.args)
|
| 918 |
+
if size is None:
|
| 919 |
+
qrvs = qrvs.squeeze()[()]
|
| 920 |
+
else:
|
| 921 |
+
if d == 1:
|
| 922 |
+
qrvs = qrvs.reshape(tuple_size)
|
| 923 |
+
else:
|
| 924 |
+
qrvs = qrvs.reshape(tuple_size + (d,))
|
| 925 |
+
return self.loc + self.scale * qrvs
|
| 926 |
+
|
| 927 |
+
def evaluate_error(self, size=100000, random_state=None, x_error=False):
|
| 928 |
+
"""
|
| 929 |
+
Evaluate the numerical accuracy of the inversion (u- and x-error).
|
| 930 |
+
|
| 931 |
+
Parameters
|
| 932 |
+
----------
|
| 933 |
+
size : int, optional
|
| 934 |
+
The number of random points over which the error is estimated.
|
| 935 |
+
Default is ``100000``.
|
| 936 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 937 |
+
`numpy.random.RandomState`}, optional
|
| 938 |
+
|
| 939 |
+
A NumPy random number generator or seed for the underlying NumPy
|
| 940 |
+
random number generator used to generate the stream of uniform
|
| 941 |
+
random numbers.
|
| 942 |
+
If `random_state` is None, use ``self.random_state``.
|
| 943 |
+
If `random_state` is an int,
|
| 944 |
+
``np.random.default_rng(random_state)`` is used.
|
| 945 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
| 946 |
+
instance then that instance is used.
|
| 947 |
+
|
| 948 |
+
Returns
|
| 949 |
+
-------
|
| 950 |
+
u_error, x_error : tuple of floats
|
| 951 |
+
A NumPy array of random variates.
|
| 952 |
+
|
| 953 |
+
Notes
|
| 954 |
+
-----
|
| 955 |
+
The numerical precision of the inverse CDF `ppf` is controlled by
|
| 956 |
+
the u-error. It is computed as follows:
|
| 957 |
+
``max |u - CDF(PPF(u))|`` where the max is taken `size` random
|
| 958 |
+
points in the interval [0,1]. `random_state` determines the random
|
| 959 |
+
sample. Note that if `ppf` was exact, the u-error would be zero.
|
| 960 |
+
|
| 961 |
+
The x-error measures the direct distance between the exact PPF
|
| 962 |
+
and `ppf`. If ``x_error`` is set to ``True`, it is
|
| 963 |
+
computed as the maximum of the minimum of the relative and absolute
|
| 964 |
+
x-error:
|
| 965 |
+
``max(min(x_error_abs[i], x_error_rel[i]))`` where
|
| 966 |
+
``x_error_abs[i] = |PPF(u[i]) - PPF_fast(u[i])|``,
|
| 967 |
+
``x_error_rel[i] = max |(PPF(u[i]) - PPF_fast(u[i])) / PPF(u[i])|``.
|
| 968 |
+
Note that it is important to consider the relative x-error in the case
|
| 969 |
+
that ``PPF(u)`` is close to zero or very large.
|
| 970 |
+
|
| 971 |
+
By default, only the u-error is evaluated and the x-error is set to
|
| 972 |
+
``np.nan``. Note that the evaluation of the x-error will be very slow
|
| 973 |
+
if the implementation of the PPF is slow.
|
| 974 |
+
|
| 975 |
+
Further information about these error measures can be found in [1]_.
|
| 976 |
+
|
| 977 |
+
References
|
| 978 |
+
----------
|
| 979 |
+
.. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold.
|
| 980 |
+
"Random variate generation by numerical inversion when only the
|
| 981 |
+
density is known." ACM Transactions on Modeling and Computer
|
| 982 |
+
Simulation (TOMACS) 20.4 (2010): 1-25.
|
| 983 |
+
|
| 984 |
+
Examples
|
| 985 |
+
--------
|
| 986 |
+
|
| 987 |
+
>>> import numpy as np
|
| 988 |
+
>>> from scipy import stats
|
| 989 |
+
>>> from scipy.stats.sampling import FastGeneratorInversion
|
| 990 |
+
|
| 991 |
+
Create an object for the normal distribution:
|
| 992 |
+
|
| 993 |
+
>>> d_norm_frozen = stats.norm()
|
| 994 |
+
>>> d_norm = FastGeneratorInversion(d_norm_frozen)
|
| 995 |
+
|
| 996 |
+
To confirm that the numerical inversion is accurate, we evaluate the
|
| 997 |
+
approximation error (u-error and x-error).
|
| 998 |
+
|
| 999 |
+
>>> u_error, x_error = d_norm.evaluate_error(x_error=True)
|
| 1000 |
+
|
| 1001 |
+
The u-error should be below 1e-10:
|
| 1002 |
+
|
| 1003 |
+
>>> u_error
|
| 1004 |
+
8.785783212061915e-11 # may vary
|
| 1005 |
+
|
| 1006 |
+
Compare the PPF against approximation `ppf`:
|
| 1007 |
+
|
| 1008 |
+
>>> q = [0.001, 0.2, 0.4, 0.6, 0.8, 0.999]
|
| 1009 |
+
>>> diff = np.abs(d_norm_frozen.ppf(q) - d_norm.ppf(q))
|
| 1010 |
+
>>> x_error_abs = np.max(diff)
|
| 1011 |
+
>>> x_error_abs
|
| 1012 |
+
1.2937954707581412e-08
|
| 1013 |
+
|
| 1014 |
+
This is the absolute x-error evaluated at the points q. The relative
|
| 1015 |
+
error is given by
|
| 1016 |
+
|
| 1017 |
+
>>> x_error_rel = np.max(diff / np.abs(d_norm_frozen.ppf(q)))
|
| 1018 |
+
>>> x_error_rel
|
| 1019 |
+
4.186725600453555e-09
|
| 1020 |
+
|
| 1021 |
+
The x_error computed above is derived in a very similar way over a
|
| 1022 |
+
much larger set of random values q. At each value q[i], the minimum
|
| 1023 |
+
of the relative and absolute error is taken. The final value is then
|
| 1024 |
+
derived as the maximum of these values. In our example, we get the
|
| 1025 |
+
following value:
|
| 1026 |
+
|
| 1027 |
+
>>> x_error
|
| 1028 |
+
4.507068014335139e-07 # may vary
|
| 1029 |
+
|
| 1030 |
+
"""
|
| 1031 |
+
if not isinstance(size, (numbers.Integral, np.integer)):
|
| 1032 |
+
raise ValueError("size must be an integer.")
|
| 1033 |
+
# urng will be used to draw the samples for testing the error
|
| 1034 |
+
# it must not interfere with self.random_state. therefore, do not
|
| 1035 |
+
# call self.rvs, but draw uniform random numbers and apply
|
| 1036 |
+
# self.ppf (note: like in rvs, consider self._mirror_uniform)
|
| 1037 |
+
urng = check_random_state_qmc(random_state)
|
| 1038 |
+
u = urng.uniform(size=size)
|
| 1039 |
+
if self._mirror_uniform:
|
| 1040 |
+
u = 1 - u
|
| 1041 |
+
x = self.ppf(u)
|
| 1042 |
+
uerr = np.max(np.abs(self._cdf(x) - u))
|
| 1043 |
+
if not x_error:
|
| 1044 |
+
return uerr, np.nan
|
| 1045 |
+
ppf_u = self._ppf(u)
|
| 1046 |
+
x_error_abs = np.abs(self.ppf(u)-ppf_u)
|
| 1047 |
+
x_error_rel = x_error_abs / np.abs(ppf_u)
|
| 1048 |
+
x_error_combined = np.array([x_error_abs, x_error_rel]).min(axis=0)
|
| 1049 |
+
return uerr, np.max(x_error_combined)
|
| 1050 |
+
|
| 1051 |
+
def support(self):
|
| 1052 |
+
"""Support of the distribution.
|
| 1053 |
+
|
| 1054 |
+
Returns
|
| 1055 |
+
-------
|
| 1056 |
+
a, b : float
|
| 1057 |
+
end-points of the distribution's support.
|
| 1058 |
+
|
| 1059 |
+
Notes
|
| 1060 |
+
-----
|
| 1061 |
+
|
| 1062 |
+
Note that the support of the distribution depends on `loc`,
|
| 1063 |
+
`scale` and `domain`.
|
| 1064 |
+
|
| 1065 |
+
Examples
|
| 1066 |
+
--------
|
| 1067 |
+
|
| 1068 |
+
>>> from scipy import stats
|
| 1069 |
+
>>> from scipy.stats.sampling import FastGeneratorInversion
|
| 1070 |
+
|
| 1071 |
+
Define a truncated normal distribution:
|
| 1072 |
+
|
| 1073 |
+
>>> d_norm = FastGeneratorInversion(stats.norm(), domain=(0, 1))
|
| 1074 |
+
>>> d_norm.support()
|
| 1075 |
+
(0, 1)
|
| 1076 |
+
|
| 1077 |
+
Shift the distribution:
|
| 1078 |
+
|
| 1079 |
+
>>> d_norm.loc = 2.5
|
| 1080 |
+
>>> d_norm.support()
|
| 1081 |
+
(2.5, 3.5)
|
| 1082 |
+
|
| 1083 |
+
"""
|
| 1084 |
+
return self._domain_adj
|
| 1085 |
+
|
| 1086 |
+
def _cdf(self, x):
|
| 1087 |
+
"""Cumulative distribution function (CDF)
|
| 1088 |
+
|
| 1089 |
+
Parameters
|
| 1090 |
+
----------
|
| 1091 |
+
x : array_like
|
| 1092 |
+
The values where the CDF is evaluated
|
| 1093 |
+
|
| 1094 |
+
Returns
|
| 1095 |
+
-------
|
| 1096 |
+
y : ndarray
|
| 1097 |
+
CDF evaluated at x
|
| 1098 |
+
|
| 1099 |
+
"""
|
| 1100 |
+
y = self._frozendist.cdf(x)
|
| 1101 |
+
if self._p_domain == 1.0:
|
| 1102 |
+
return y
|
| 1103 |
+
return np.clip((y - self._p_lower) / self._p_domain, 0, 1)
|
| 1104 |
+
|
| 1105 |
+
def _ppf(self, q):
|
| 1106 |
+
"""Percent point function (inverse of `cdf`)
|
| 1107 |
+
|
| 1108 |
+
Parameters
|
| 1109 |
+
----------
|
| 1110 |
+
q : array_like
|
| 1111 |
+
lower tail probability
|
| 1112 |
+
|
| 1113 |
+
Returns
|
| 1114 |
+
-------
|
| 1115 |
+
x : array_like
|
| 1116 |
+
quantile corresponding to the lower tail probability q.
|
| 1117 |
+
|
| 1118 |
+
"""
|
| 1119 |
+
if self._p_domain == 1.0:
|
| 1120 |
+
return self._frozendist.ppf(q)
|
| 1121 |
+
x = self._frozendist.ppf(self._p_domain * np.array(q) + self._p_lower)
|
| 1122 |
+
return np.clip(x, self._domain_adj[0], self._domain_adj[1])
|
| 1123 |
+
|
| 1124 |
+
|
| 1125 |
+
class RatioUniforms:
|
| 1126 |
+
"""
|
| 1127 |
+
Generate random samples from a probability density function using the
|
| 1128 |
+
ratio-of-uniforms method.
|
| 1129 |
+
|
| 1130 |
+
Parameters
|
| 1131 |
+
----------
|
| 1132 |
+
pdf : callable
|
| 1133 |
+
A function with signature `pdf(x)` that is proportional to the
|
| 1134 |
+
probability density function of the distribution.
|
| 1135 |
+
umax : float
|
| 1136 |
+
The upper bound of the bounding rectangle in the u-direction.
|
| 1137 |
+
vmin : float
|
| 1138 |
+
The lower bound of the bounding rectangle in the v-direction.
|
| 1139 |
+
vmax : float
|
| 1140 |
+
The upper bound of the bounding rectangle in the v-direction.
|
| 1141 |
+
c : float, optional.
|
| 1142 |
+
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
|
| 1143 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 1144 |
+
`numpy.random.RandomState`}, optional
|
| 1145 |
+
|
| 1146 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 1147 |
+
singleton is used.
|
| 1148 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 1149 |
+
seeded with `seed`.
|
| 1150 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 1151 |
+
that instance is used.
|
| 1152 |
+
|
| 1153 |
+
Methods
|
| 1154 |
+
-------
|
| 1155 |
+
rvs
|
| 1156 |
+
|
| 1157 |
+
Notes
|
| 1158 |
+
-----
|
| 1159 |
+
Given a univariate probability density function `pdf` and a constant `c`,
|
| 1160 |
+
define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
|
| 1161 |
+
If ``(U, V)`` is a random vector uniformly distributed over ``A``,
|
| 1162 |
+
then ``V/U + c`` follows a distribution according to `pdf`.
|
| 1163 |
+
|
| 1164 |
+
The above result (see [1]_, [2]_) can be used to sample random variables
|
| 1165 |
+
using only the PDF, i.e. no inversion of the CDF is required. Typical
|
| 1166 |
+
choices of `c` are zero or the mode of `pdf`. The set ``A`` is a subset of
|
| 1167 |
+
the rectangle ``R = [0, umax] x [vmin, vmax]`` where
|
| 1168 |
+
|
| 1169 |
+
- ``umax = sup sqrt(pdf(x))``
|
| 1170 |
+
- ``vmin = inf (x - c) sqrt(pdf(x))``
|
| 1171 |
+
- ``vmax = sup (x - c) sqrt(pdf(x))``
|
| 1172 |
+
|
| 1173 |
+
In particular, these values are finite if `pdf` is bounded and
|
| 1174 |
+
``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
|
| 1175 |
+
One can generate ``(U, V)`` uniformly on ``R`` and return
|
| 1176 |
+
``V/U + c`` if ``(U, V)`` are also in ``A`` which can be directly
|
| 1177 |
+
verified.
|
| 1178 |
+
|
| 1179 |
+
The algorithm is not changed if one replaces `pdf` by k * `pdf` for any
|
| 1180 |
+
constant k > 0. Thus, it is often convenient to work with a function
|
| 1181 |
+
that is proportional to the probability density function by dropping
|
| 1182 |
+
unnecessary normalization factors.
|
| 1183 |
+
|
| 1184 |
+
Intuitively, the method works well if ``A`` fills up most of the
|
| 1185 |
+
enclosing rectangle such that the probability is high that ``(U, V)``
|
| 1186 |
+
lies in ``A`` whenever it lies in ``R`` as the number of required
|
| 1187 |
+
iterations becomes too large otherwise. To be more precise, note that
|
| 1188 |
+
the expected number of iterations to draw ``(U, V)`` uniformly
|
| 1189 |
+
distributed on ``R`` such that ``(U, V)`` is also in ``A`` is given by
|
| 1190 |
+
the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin) / area(pdf)``,
|
| 1191 |
+
where `area(pdf)` is the integral of `pdf` (which is equal to one if the
|
| 1192 |
+
probability density function is used but can take on other values if a
|
| 1193 |
+
function proportional to the density is used). The equality holds since
|
| 1194 |
+
the area of ``A`` is equal to ``0.5 * area(pdf)`` (Theorem 7.1 in [1]_).
|
| 1195 |
+
If the sampling fails to generate a single random variate after 50000
|
| 1196 |
+
iterations (i.e. not a single draw is in ``A``), an exception is raised.
|
| 1197 |
+
|
| 1198 |
+
If the bounding rectangle is not correctly specified (i.e. if it does not
|
| 1199 |
+
contain ``A``), the algorithm samples from a distribution different from
|
| 1200 |
+
the one given by `pdf`. It is therefore recommended to perform a
|
| 1201 |
+
test such as `~scipy.stats.kstest` as a check.
|
| 1202 |
+
|
| 1203 |
+
References
|
| 1204 |
+
----------
|
| 1205 |
+
.. [1] L. Devroye, "Non-Uniform Random Variate Generation",
|
| 1206 |
+
Springer-Verlag, 1986.
|
| 1207 |
+
|
| 1208 |
+
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
|
| 1209 |
+
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
|
| 1210 |
+
|
| 1211 |
+
.. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
|
| 1212 |
+
Variables Using the Ratio of Uniform Deviates",
|
| 1213 |
+
ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
|
| 1214 |
+
|
| 1215 |
+
Examples
|
| 1216 |
+
--------
|
| 1217 |
+
>>> import numpy as np
|
| 1218 |
+
>>> from scipy import stats
|
| 1219 |
+
|
| 1220 |
+
>>> from scipy.stats.sampling import RatioUniforms
|
| 1221 |
+
>>> rng = np.random.default_rng()
|
| 1222 |
+
|
| 1223 |
+
Simulate normally distributed random variables. It is easy to compute the
|
| 1224 |
+
bounding rectangle explicitly in that case. For simplicity, we drop the
|
| 1225 |
+
normalization factor of the density.
|
| 1226 |
+
|
| 1227 |
+
>>> f = lambda x: np.exp(-x**2 / 2)
|
| 1228 |
+
>>> v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
|
| 1229 |
+
>>> umax = np.sqrt(f(0))
|
| 1230 |
+
>>> gen = RatioUniforms(f, umax=umax, vmin=-v, vmax=v, random_state=rng)
|
| 1231 |
+
>>> r = gen.rvs(size=2500)
|
| 1232 |
+
|
| 1233 |
+
The K-S test confirms that the random variates are indeed normally
|
| 1234 |
+
distributed (normality is not rejected at 5% significance level):
|
| 1235 |
+
|
| 1236 |
+
>>> stats.kstest(r, 'norm')[1]
|
| 1237 |
+
0.250634764150542
|
| 1238 |
+
|
| 1239 |
+
The exponential distribution provides another example where the bounding
|
| 1240 |
+
rectangle can be determined explicitly.
|
| 1241 |
+
|
| 1242 |
+
>>> gen = RatioUniforms(lambda x: np.exp(-x), umax=1, vmin=0,
|
| 1243 |
+
... vmax=2*np.exp(-1), random_state=rng)
|
| 1244 |
+
>>> r = gen.rvs(1000)
|
| 1245 |
+
>>> stats.kstest(r, 'expon')[1]
|
| 1246 |
+
0.21121052054580314
|
| 1247 |
+
|
| 1248 |
+
"""
|
| 1249 |
+
|
| 1250 |
+
def __init__(self, pdf, *, umax, vmin, vmax, c=0, random_state=None):
|
| 1251 |
+
if vmin >= vmax:
|
| 1252 |
+
raise ValueError("vmin must be smaller than vmax.")
|
| 1253 |
+
|
| 1254 |
+
if umax <= 0:
|
| 1255 |
+
raise ValueError("umax must be positive.")
|
| 1256 |
+
|
| 1257 |
+
self._pdf = pdf
|
| 1258 |
+
self._umax = umax
|
| 1259 |
+
self._vmin = vmin
|
| 1260 |
+
self._vmax = vmax
|
| 1261 |
+
self._c = c
|
| 1262 |
+
self._rng = check_random_state(random_state)
|
| 1263 |
+
|
| 1264 |
+
def rvs(self, size=1):
|
| 1265 |
+
"""Sampling of random variates
|
| 1266 |
+
|
| 1267 |
+
Parameters
|
| 1268 |
+
----------
|
| 1269 |
+
size : int or tuple of ints, optional
|
| 1270 |
+
Number of random variates to be generated (default is 1).
|
| 1271 |
+
|
| 1272 |
+
Returns
|
| 1273 |
+
-------
|
| 1274 |
+
rvs : ndarray
|
| 1275 |
+
The random variates distributed according to the probability
|
| 1276 |
+
distribution defined by the pdf.
|
| 1277 |
+
|
| 1278 |
+
"""
|
| 1279 |
+
size1d = tuple(np.atleast_1d(size))
|
| 1280 |
+
N = np.prod(size1d) # number of rvs needed, reshape upon return
|
| 1281 |
+
|
| 1282 |
+
# start sampling using ratio of uniforms method
|
| 1283 |
+
x = np.zeros(N)
|
| 1284 |
+
simulated, i = 0, 1
|
| 1285 |
+
|
| 1286 |
+
# loop until N rvs have been generated: expected runtime is finite.
|
| 1287 |
+
# to avoid infinite loop, raise exception if not a single rv has been
|
| 1288 |
+
# generated after 50000 tries. even if the expected number of iterations
|
| 1289 |
+
# is 1000, the probability of this event is (1-1/1000)**50000
|
| 1290 |
+
# which is of order 10e-22
|
| 1291 |
+
while simulated < N:
|
| 1292 |
+
k = N - simulated
|
| 1293 |
+
# simulate uniform rvs on [0, umax] and [vmin, vmax]
|
| 1294 |
+
u1 = self._umax * self._rng.uniform(size=k)
|
| 1295 |
+
v1 = self._rng.uniform(self._vmin, self._vmax, size=k)
|
| 1296 |
+
# apply rejection method
|
| 1297 |
+
rvs = v1 / u1 + self._c
|
| 1298 |
+
accept = (u1**2 <= self._pdf(rvs))
|
| 1299 |
+
num_accept = np.sum(accept)
|
| 1300 |
+
if num_accept > 0:
|
| 1301 |
+
x[simulated:(simulated + num_accept)] = rvs[accept]
|
| 1302 |
+
simulated += num_accept
|
| 1303 |
+
|
| 1304 |
+
if (simulated == 0) and (i*N >= 50000):
|
| 1305 |
+
msg = (
|
| 1306 |
+
f"Not a single random variate could be generated in {i*N} "
|
| 1307 |
+
"attempts. The ratio of uniforms method does not appear "
|
| 1308 |
+
"to work for the provided parameters. Please check the "
|
| 1309 |
+
"pdf and the bounds."
|
| 1310 |
+
)
|
| 1311 |
+
raise RuntimeError(msg)
|
| 1312 |
+
i += 1
|
| 1313 |
+
|
| 1314 |
+
return np.reshape(x, size1d)
|
llava_next/lib/python3.10/site-packages/scipy/stats/_sensitivity_analysis.py
ADDED
|
@@ -0,0 +1,712 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import inspect
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import (
|
| 6 |
+
Callable, Literal, Protocol, TYPE_CHECKING
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from scipy.stats._common import ConfidenceInterval
|
| 12 |
+
from scipy.stats._qmc import check_random_state
|
| 13 |
+
from scipy.stats._resampling import BootstrapResult
|
| 14 |
+
from scipy.stats import qmc, bootstrap
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
import numpy.typing as npt
|
| 19 |
+
from scipy._lib._util import DecimalNumber, IntNumber, SeedType
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
__all__ = [
|
| 23 |
+
'sobol_indices'
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def f_ishigami(x: npt.ArrayLike) -> np.ndarray:
|
| 28 |
+
r"""Ishigami function.
|
| 29 |
+
|
| 30 |
+
.. math::
|
| 31 |
+
|
| 32 |
+
Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1
|
| 33 |
+
|
| 34 |
+
with :math:`\mathbf{x} \in [-\pi, \pi]^3`.
|
| 35 |
+
|
| 36 |
+
Parameters
|
| 37 |
+
----------
|
| 38 |
+
x : array_like ([x1, x2, x3], n)
|
| 39 |
+
|
| 40 |
+
Returns
|
| 41 |
+
-------
|
| 42 |
+
f : array_like (n,)
|
| 43 |
+
Function evaluation.
|
| 44 |
+
|
| 45 |
+
References
|
| 46 |
+
----------
|
| 47 |
+
.. [1] Ishigami, T. and T. Homma. "An importance quantification technique
|
| 48 |
+
in uncertainty analysis for computer models." IEEE,
|
| 49 |
+
:doi:`10.1109/ISUMA.1990.151285`, 1990.
|
| 50 |
+
"""
|
| 51 |
+
x = np.atleast_2d(x)
|
| 52 |
+
f_eval = (
|
| 53 |
+
np.sin(x[0])
|
| 54 |
+
+ 7 * np.sin(x[1])**2
|
| 55 |
+
+ 0.1 * (x[2]**4) * np.sin(x[0])
|
| 56 |
+
)
|
| 57 |
+
return f_eval
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def sample_A_B(
|
| 61 |
+
n: IntNumber,
|
| 62 |
+
dists: list[PPFDist],
|
| 63 |
+
random_state: SeedType = None
|
| 64 |
+
) -> np.ndarray:
|
| 65 |
+
"""Sample two matrices A and B.
|
| 66 |
+
|
| 67 |
+
Uses a Sobol' sequence with 2`d` columns to have 2 uncorrelated matrices.
|
| 68 |
+
This is more efficient than using 2 random draw of Sobol'.
|
| 69 |
+
See sec. 5 from [1]_.
|
| 70 |
+
|
| 71 |
+
Output shape is (d, n).
|
| 72 |
+
|
| 73 |
+
References
|
| 74 |
+
----------
|
| 75 |
+
.. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
|
| 76 |
+
S. Tarantola. "Variance based sensitivity analysis of model
|
| 77 |
+
output. Design and estimator for the total sensitivity index."
|
| 78 |
+
Computer Physics Communications, 181(2):259-270,
|
| 79 |
+
:doi:`10.1016/j.cpc.2009.09.018`, 2010.
|
| 80 |
+
"""
|
| 81 |
+
d = len(dists)
|
| 82 |
+
A_B = qmc.Sobol(d=2*d, seed=random_state, bits=64).random(n).T
|
| 83 |
+
A_B = A_B.reshape(2, d, -1)
|
| 84 |
+
try:
|
| 85 |
+
for d_, dist in enumerate(dists):
|
| 86 |
+
A_B[:, d_] = dist.ppf(A_B[:, d_])
|
| 87 |
+
except AttributeError as exc:
|
| 88 |
+
message = "Each distribution in `dists` must have method `ppf`."
|
| 89 |
+
raise ValueError(message) from exc
|
| 90 |
+
return A_B
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def sample_AB(A: np.ndarray, B: np.ndarray) -> np.ndarray:
|
| 94 |
+
"""AB matrix.
|
| 95 |
+
|
| 96 |
+
AB: rows of B into A. Shape (d, d, n).
|
| 97 |
+
- Copy A into d "pages"
|
| 98 |
+
- In the first page, replace 1st rows of A with 1st row of B.
|
| 99 |
+
...
|
| 100 |
+
- In the dth page, replace dth row of A with dth row of B.
|
| 101 |
+
- return the stack of pages
|
| 102 |
+
"""
|
| 103 |
+
d, n = A.shape
|
| 104 |
+
AB = np.tile(A, (d, 1, 1))
|
| 105 |
+
i = np.arange(d)
|
| 106 |
+
AB[i, i] = B[i]
|
| 107 |
+
return AB
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def saltelli_2010(
|
| 111 |
+
f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray
|
| 112 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
| 113 |
+
r"""Saltelli2010 formulation.
|
| 114 |
+
|
| 115 |
+
.. math::
|
| 116 |
+
|
| 117 |
+
S_i = \frac{1}{N} \sum_{j=1}^N
|
| 118 |
+
f(\mathbf{B})_j (f(\mathbf{AB}^{(i)})_j - f(\mathbf{A})_j)
|
| 119 |
+
|
| 120 |
+
.. math::
|
| 121 |
+
|
| 122 |
+
S_{T_i} = \frac{1}{N} \sum_{j=1}^N
|
| 123 |
+
(f(\mathbf{A})_j - f(\mathbf{AB}^{(i)})_j)^2
|
| 124 |
+
|
| 125 |
+
Parameters
|
| 126 |
+
----------
|
| 127 |
+
f_A, f_B : array_like (s, n)
|
| 128 |
+
Function values at A and B, respectively
|
| 129 |
+
f_AB : array_like (d, s, n)
|
| 130 |
+
Function values at each of the AB pages
|
| 131 |
+
|
| 132 |
+
Returns
|
| 133 |
+
-------
|
| 134 |
+
s, st : array_like (s, d)
|
| 135 |
+
First order and total order Sobol' indices.
|
| 136 |
+
|
| 137 |
+
References
|
| 138 |
+
----------
|
| 139 |
+
.. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
|
| 140 |
+
S. Tarantola. "Variance based sensitivity analysis of model
|
| 141 |
+
output. Design and estimator for the total sensitivity index."
|
| 142 |
+
Computer Physics Communications, 181(2):259-270,
|
| 143 |
+
:doi:`10.1016/j.cpc.2009.09.018`, 2010.
|
| 144 |
+
"""
|
| 145 |
+
# Empirical variance calculated using output from A and B which are
|
| 146 |
+
# independent. Output of AB is not independent and cannot be used
|
| 147 |
+
var = np.var([f_A, f_B], axis=(0, -1))
|
| 148 |
+
|
| 149 |
+
# We divide by the variance to have a ratio of variance
|
| 150 |
+
# this leads to eq. 2
|
| 151 |
+
s = np.mean(f_B * (f_AB - f_A), axis=-1) / var # Table 2 (b)
|
| 152 |
+
st = 0.5 * np.mean((f_A - f_AB) ** 2, axis=-1) / var # Table 2 (f)
|
| 153 |
+
|
| 154 |
+
return s.T, st.T
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@dataclass
|
| 158 |
+
class BootstrapSobolResult:
|
| 159 |
+
first_order: BootstrapResult
|
| 160 |
+
total_order: BootstrapResult
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
@dataclass
|
| 164 |
+
class SobolResult:
|
| 165 |
+
first_order: np.ndarray
|
| 166 |
+
total_order: np.ndarray
|
| 167 |
+
_indices_method: Callable
|
| 168 |
+
_f_A: np.ndarray
|
| 169 |
+
_f_B: np.ndarray
|
| 170 |
+
_f_AB: np.ndarray
|
| 171 |
+
_A: np.ndarray | None = None
|
| 172 |
+
_B: np.ndarray | None = None
|
| 173 |
+
_AB: np.ndarray | None = None
|
| 174 |
+
_bootstrap_result: BootstrapResult | None = None
|
| 175 |
+
|
| 176 |
+
def bootstrap(
|
| 177 |
+
self,
|
| 178 |
+
confidence_level: DecimalNumber = 0.95,
|
| 179 |
+
n_resamples: IntNumber = 999
|
| 180 |
+
) -> BootstrapSobolResult:
|
| 181 |
+
"""Bootstrap Sobol' indices to provide confidence intervals.
|
| 182 |
+
|
| 183 |
+
Parameters
|
| 184 |
+
----------
|
| 185 |
+
confidence_level : float, default: ``0.95``
|
| 186 |
+
The confidence level of the confidence intervals.
|
| 187 |
+
n_resamples : int, default: ``999``
|
| 188 |
+
The number of resamples performed to form the bootstrap
|
| 189 |
+
distribution of the indices.
|
| 190 |
+
|
| 191 |
+
Returns
|
| 192 |
+
-------
|
| 193 |
+
res : BootstrapSobolResult
|
| 194 |
+
Bootstrap result containing the confidence intervals and the
|
| 195 |
+
bootstrap distribution of the indices.
|
| 196 |
+
|
| 197 |
+
An object with attributes:
|
| 198 |
+
|
| 199 |
+
first_order : BootstrapResult
|
| 200 |
+
Bootstrap result of the first order indices.
|
| 201 |
+
total_order : BootstrapResult
|
| 202 |
+
Bootstrap result of the total order indices.
|
| 203 |
+
See `BootstrapResult` for more details.
|
| 204 |
+
|
| 205 |
+
"""
|
| 206 |
+
def statistic(idx):
|
| 207 |
+
f_A_ = self._f_A[:, idx]
|
| 208 |
+
f_B_ = self._f_B[:, idx]
|
| 209 |
+
f_AB_ = self._f_AB[..., idx]
|
| 210 |
+
return self._indices_method(f_A_, f_B_, f_AB_)
|
| 211 |
+
|
| 212 |
+
n = self._f_A.shape[1]
|
| 213 |
+
|
| 214 |
+
res = bootstrap(
|
| 215 |
+
[np.arange(n)], statistic=statistic, method="BCa",
|
| 216 |
+
n_resamples=n_resamples,
|
| 217 |
+
confidence_level=confidence_level,
|
| 218 |
+
bootstrap_result=self._bootstrap_result
|
| 219 |
+
)
|
| 220 |
+
self._bootstrap_result = res
|
| 221 |
+
|
| 222 |
+
first_order = BootstrapResult(
|
| 223 |
+
confidence_interval=ConfidenceInterval(
|
| 224 |
+
res.confidence_interval.low[0], res.confidence_interval.high[0]
|
| 225 |
+
),
|
| 226 |
+
bootstrap_distribution=res.bootstrap_distribution[0],
|
| 227 |
+
standard_error=res.standard_error[0],
|
| 228 |
+
)
|
| 229 |
+
total_order = BootstrapResult(
|
| 230 |
+
confidence_interval=ConfidenceInterval(
|
| 231 |
+
res.confidence_interval.low[1], res.confidence_interval.high[1]
|
| 232 |
+
),
|
| 233 |
+
bootstrap_distribution=res.bootstrap_distribution[1],
|
| 234 |
+
standard_error=res.standard_error[1],
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
return BootstrapSobolResult(
|
| 238 |
+
first_order=first_order, total_order=total_order
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class PPFDist(Protocol):
|
| 243 |
+
@property
|
| 244 |
+
def ppf(self) -> Callable[..., float]:
|
| 245 |
+
...
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def sobol_indices(
|
| 249 |
+
*,
|
| 250 |
+
func: Callable[[np.ndarray], npt.ArrayLike] |
|
| 251 |
+
dict[Literal['f_A', 'f_B', 'f_AB'], np.ndarray],
|
| 252 |
+
n: IntNumber,
|
| 253 |
+
dists: list[PPFDist] | None = None,
|
| 254 |
+
method: Callable | Literal['saltelli_2010'] = 'saltelli_2010',
|
| 255 |
+
random_state: SeedType = None
|
| 256 |
+
) -> SobolResult:
|
| 257 |
+
r"""Global sensitivity indices of Sobol'.
|
| 258 |
+
|
| 259 |
+
Parameters
|
| 260 |
+
----------
|
| 261 |
+
func : callable or dict(str, array_like)
|
| 262 |
+
If `func` is a callable, function to compute the Sobol' indices from.
|
| 263 |
+
Its signature must be::
|
| 264 |
+
|
| 265 |
+
func(x: ArrayLike) -> ArrayLike
|
| 266 |
+
|
| 267 |
+
with ``x`` of shape ``(d, n)`` and output of shape ``(s, n)`` where:
|
| 268 |
+
|
| 269 |
+
- ``d`` is the input dimensionality of `func`
|
| 270 |
+
(number of input variables),
|
| 271 |
+
- ``s`` is the output dimensionality of `func`
|
| 272 |
+
(number of output variables), and
|
| 273 |
+
- ``n`` is the number of samples (see `n` below).
|
| 274 |
+
|
| 275 |
+
Function evaluation values must be finite.
|
| 276 |
+
|
| 277 |
+
If `func` is a dictionary, contains the function evaluations from three
|
| 278 |
+
different arrays. Keys must be: ``f_A``, ``f_B`` and ``f_AB``.
|
| 279 |
+
``f_A`` and ``f_B`` should have a shape ``(s, n)`` and ``f_AB``
|
| 280 |
+
should have a shape ``(d, s, n)``.
|
| 281 |
+
This is an advanced feature and misuse can lead to wrong analysis.
|
| 282 |
+
n : int
|
| 283 |
+
Number of samples used to generate the matrices ``A`` and ``B``.
|
| 284 |
+
Must be a power of 2. The total number of points at which `func` is
|
| 285 |
+
evaluated will be ``n*(d+2)``.
|
| 286 |
+
dists : list(distributions), optional
|
| 287 |
+
List of each parameter's distribution. The distribution of parameters
|
| 288 |
+
depends on the application and should be carefully chosen.
|
| 289 |
+
Parameters are assumed to be independently distributed, meaning there
|
| 290 |
+
is no constraint nor relationship between their values.
|
| 291 |
+
|
| 292 |
+
Distributions must be an instance of a class with a ``ppf``
|
| 293 |
+
method.
|
| 294 |
+
|
| 295 |
+
Must be specified if `func` is a callable, and ignored otherwise.
|
| 296 |
+
method : Callable or str, default: 'saltelli_2010'
|
| 297 |
+
Method used to compute the first and total Sobol' indices.
|
| 298 |
+
|
| 299 |
+
If a callable, its signature must be::
|
| 300 |
+
|
| 301 |
+
func(f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray)
|
| 302 |
+
-> Tuple[np.ndarray, np.ndarray]
|
| 303 |
+
|
| 304 |
+
with ``f_A, f_B`` of shape ``(s, n)`` and ``f_AB`` of shape
|
| 305 |
+
``(d, s, n)``.
|
| 306 |
+
These arrays contain the function evaluations from three different sets
|
| 307 |
+
of samples.
|
| 308 |
+
The output is a tuple of the first and total indices with
|
| 309 |
+
shape ``(s, d)``.
|
| 310 |
+
This is an advanced feature and misuse can lead to wrong analysis.
|
| 311 |
+
random_state : {None, int, `numpy.random.Generator`}, optional
|
| 312 |
+
If `random_state` is an int or None, a new `numpy.random.Generator` is
|
| 313 |
+
created using ``np.random.default_rng(random_state)``.
|
| 314 |
+
If `random_state` is already a ``Generator`` instance, then the
|
| 315 |
+
provided instance is used.
|
| 316 |
+
|
| 317 |
+
Returns
|
| 318 |
+
-------
|
| 319 |
+
res : SobolResult
|
| 320 |
+
An object with attributes:
|
| 321 |
+
|
| 322 |
+
first_order : ndarray of shape (s, d)
|
| 323 |
+
First order Sobol' indices.
|
| 324 |
+
total_order : ndarray of shape (s, d)
|
| 325 |
+
Total order Sobol' indices.
|
| 326 |
+
|
| 327 |
+
And method:
|
| 328 |
+
|
| 329 |
+
bootstrap(confidence_level: float, n_resamples: int)
|
| 330 |
+
-> BootstrapSobolResult
|
| 331 |
+
|
| 332 |
+
A method providing confidence intervals on the indices.
|
| 333 |
+
See `scipy.stats.bootstrap` for more details.
|
| 334 |
+
|
| 335 |
+
The bootstrapping is done on both first and total order indices,
|
| 336 |
+
and they are available in `BootstrapSobolResult` as attributes
|
| 337 |
+
``first_order`` and ``total_order``.
|
| 338 |
+
|
| 339 |
+
Notes
|
| 340 |
+
-----
|
| 341 |
+
The Sobol' method [1]_, [2]_ is a variance-based Sensitivity Analysis which
|
| 342 |
+
obtains the contribution of each parameter to the variance of the
|
| 343 |
+
quantities of interest (QoIs; i.e., the outputs of `func`).
|
| 344 |
+
Respective contributions can be used to rank the parameters and
|
| 345 |
+
also gauge the complexity of the model by computing the
|
| 346 |
+
model's effective (or mean) dimension.
|
| 347 |
+
|
| 348 |
+
.. note::
|
| 349 |
+
|
| 350 |
+
Parameters are assumed to be independently distributed. Each
|
| 351 |
+
parameter can still follow any distribution. In fact, the distribution
|
| 352 |
+
is very important and should match the real distribution of the
|
| 353 |
+
parameters.
|
| 354 |
+
|
| 355 |
+
It uses a functional decomposition of the variance of the function to
|
| 356 |
+
explore
|
| 357 |
+
|
| 358 |
+
.. math::
|
| 359 |
+
|
| 360 |
+
\mathbb{V}(Y) = \sum_{i}^{d} \mathbb{V}_i (Y) + \sum_{i<j}^{d}
|
| 361 |
+
\mathbb{V}_{ij}(Y) + ... + \mathbb{V}_{1,2,...,d}(Y),
|
| 362 |
+
|
| 363 |
+
introducing conditional variances:
|
| 364 |
+
|
| 365 |
+
.. math::
|
| 366 |
+
|
| 367 |
+
\mathbb{V}_i(Y) = \mathbb{\mathbb{V}}[\mathbb{E}(Y|x_i)]
|
| 368 |
+
\qquad
|
| 369 |
+
\mathbb{V}_{ij}(Y) = \mathbb{\mathbb{V}}[\mathbb{E}(Y|x_i x_j)]
|
| 370 |
+
- \mathbb{V}_i(Y) - \mathbb{V}_j(Y),
|
| 371 |
+
|
| 372 |
+
Sobol' indices are expressed as
|
| 373 |
+
|
| 374 |
+
.. math::
|
| 375 |
+
|
| 376 |
+
S_i = \frac{\mathbb{V}_i(Y)}{\mathbb{V}[Y]}
|
| 377 |
+
\qquad
|
| 378 |
+
S_{ij} =\frac{\mathbb{V}_{ij}(Y)}{\mathbb{V}[Y]}.
|
| 379 |
+
|
| 380 |
+
:math:`S_{i}` corresponds to the first-order term which apprises the
|
| 381 |
+
contribution of the i-th parameter, while :math:`S_{ij}` corresponds to the
|
| 382 |
+
second-order term which informs about the contribution of interactions
|
| 383 |
+
between the i-th and the j-th parameters. These equations can be
|
| 384 |
+
generalized to compute higher order terms; however, they are expensive to
|
| 385 |
+
compute and their interpretation is complex.
|
| 386 |
+
This is why only first order indices are provided.
|
| 387 |
+
|
| 388 |
+
Total order indices represent the global contribution of the parameters
|
| 389 |
+
to the variance of the QoI and are defined as:
|
| 390 |
+
|
| 391 |
+
.. math::
|
| 392 |
+
|
| 393 |
+
S_{T_i} = S_i + \sum_j S_{ij} + \sum_{j,k} S_{ijk} + ...
|
| 394 |
+
= 1 - \frac{\mathbb{V}[\mathbb{E}(Y|x_{\sim i})]}{\mathbb{V}[Y]}.
|
| 395 |
+
|
| 396 |
+
First order indices sum to at most 1, while total order indices sum to at
|
| 397 |
+
least 1. If there are no interactions, then first and total order indices
|
| 398 |
+
are equal, and both first and total order indices sum to 1.
|
| 399 |
+
|
| 400 |
+
.. warning::
|
| 401 |
+
|
| 402 |
+
Negative Sobol' values are due to numerical errors. Increasing the
|
| 403 |
+
number of points `n` should help.
|
| 404 |
+
|
| 405 |
+
The number of sample required to have a good analysis increases with
|
| 406 |
+
the dimensionality of the problem. e.g. for a 3 dimension problem,
|
| 407 |
+
consider at minima ``n >= 2**12``. The more complex the model is,
|
| 408 |
+
the more samples will be needed.
|
| 409 |
+
|
| 410 |
+
Even for a purely addiditive model, the indices may not sum to 1 due
|
| 411 |
+
to numerical noise.
|
| 412 |
+
|
| 413 |
+
References
|
| 414 |
+
----------
|
| 415 |
+
.. [1] Sobol, I. M.. "Sensitivity analysis for nonlinear mathematical
|
| 416 |
+
models." Mathematical Modeling and Computational Experiment, 1:407-414,
|
| 417 |
+
1993.
|
| 418 |
+
.. [2] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
|
| 419 |
+
mathematical models and their Monte Carlo estimates." Mathematics
|
| 420 |
+
and Computers in Simulation, 55(1-3):271-280,
|
| 421 |
+
:doi:`10.1016/S0378-4754(00)00270-6`, 2001.
|
| 422 |
+
.. [3] Saltelli, A. "Making best use of model evaluations to
|
| 423 |
+
compute sensitivity indices." Computer Physics Communications,
|
| 424 |
+
145(2):280-297, :doi:`10.1016/S0010-4655(02)00280-1`, 2002.
|
| 425 |
+
.. [4] Saltelli, A., M. Ratto, T. Andres, F. Campolongo, J. Cariboni,
|
| 426 |
+
D. Gatelli, M. Saisana, and S. Tarantola. "Global Sensitivity Analysis.
|
| 427 |
+
The Primer." 2007.
|
| 428 |
+
.. [5] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
|
| 429 |
+
S. Tarantola. "Variance based sensitivity analysis of model
|
| 430 |
+
output. Design and estimator for the total sensitivity index."
|
| 431 |
+
Computer Physics Communications, 181(2):259-270,
|
| 432 |
+
:doi:`10.1016/j.cpc.2009.09.018`, 2010.
|
| 433 |
+
.. [6] Ishigami, T. and T. Homma. "An importance quantification technique
|
| 434 |
+
in uncertainty analysis for computer models." IEEE,
|
| 435 |
+
:doi:`10.1109/ISUMA.1990.151285`, 1990.
|
| 436 |
+
|
| 437 |
+
Examples
|
| 438 |
+
--------
|
| 439 |
+
The following is an example with the Ishigami function [6]_
|
| 440 |
+
|
| 441 |
+
.. math::
|
| 442 |
+
|
| 443 |
+
Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1,
|
| 444 |
+
|
| 445 |
+
with :math:`\mathbf{x} \in [-\pi, \pi]^3`. This function exhibits strong
|
| 446 |
+
non-linearity and non-monotonicity.
|
| 447 |
+
|
| 448 |
+
Remember, Sobol' indices assumes that samples are independently
|
| 449 |
+
distributed. In this case we use a uniform distribution on each marginals.
|
| 450 |
+
|
| 451 |
+
>>> import numpy as np
|
| 452 |
+
>>> from scipy.stats import sobol_indices, uniform
|
| 453 |
+
>>> rng = np.random.default_rng()
|
| 454 |
+
>>> def f_ishigami(x):
|
| 455 |
+
... f_eval = (
|
| 456 |
+
... np.sin(x[0])
|
| 457 |
+
... + 7 * np.sin(x[1])**2
|
| 458 |
+
... + 0.1 * (x[2]**4) * np.sin(x[0])
|
| 459 |
+
... )
|
| 460 |
+
... return f_eval
|
| 461 |
+
>>> indices = sobol_indices(
|
| 462 |
+
... func=f_ishigami, n=1024,
|
| 463 |
+
... dists=[
|
| 464 |
+
... uniform(loc=-np.pi, scale=2*np.pi),
|
| 465 |
+
... uniform(loc=-np.pi, scale=2*np.pi),
|
| 466 |
+
... uniform(loc=-np.pi, scale=2*np.pi)
|
| 467 |
+
... ],
|
| 468 |
+
... random_state=rng
|
| 469 |
+
... )
|
| 470 |
+
>>> indices.first_order
|
| 471 |
+
array([0.31637954, 0.43781162, 0.00318825])
|
| 472 |
+
>>> indices.total_order
|
| 473 |
+
array([0.56122127, 0.44287857, 0.24229595])
|
| 474 |
+
|
| 475 |
+
Confidence interval can be obtained using bootstrapping.
|
| 476 |
+
|
| 477 |
+
>>> boot = indices.bootstrap()
|
| 478 |
+
|
| 479 |
+
Then, this information can be easily visualized.
|
| 480 |
+
|
| 481 |
+
>>> import matplotlib.pyplot as plt
|
| 482 |
+
>>> fig, axs = plt.subplots(1, 2, figsize=(9, 4))
|
| 483 |
+
>>> _ = axs[0].errorbar(
|
| 484 |
+
... [1, 2, 3], indices.first_order, fmt='o',
|
| 485 |
+
... yerr=[
|
| 486 |
+
... indices.first_order - boot.first_order.confidence_interval.low,
|
| 487 |
+
... boot.first_order.confidence_interval.high - indices.first_order
|
| 488 |
+
... ],
|
| 489 |
+
... )
|
| 490 |
+
>>> axs[0].set_ylabel("First order Sobol' indices")
|
| 491 |
+
>>> axs[0].set_xlabel('Input parameters')
|
| 492 |
+
>>> axs[0].set_xticks([1, 2, 3])
|
| 493 |
+
>>> _ = axs[1].errorbar(
|
| 494 |
+
... [1, 2, 3], indices.total_order, fmt='o',
|
| 495 |
+
... yerr=[
|
| 496 |
+
... indices.total_order - boot.total_order.confidence_interval.low,
|
| 497 |
+
... boot.total_order.confidence_interval.high - indices.total_order
|
| 498 |
+
... ],
|
| 499 |
+
... )
|
| 500 |
+
>>> axs[1].set_ylabel("Total order Sobol' indices")
|
| 501 |
+
>>> axs[1].set_xlabel('Input parameters')
|
| 502 |
+
>>> axs[1].set_xticks([1, 2, 3])
|
| 503 |
+
>>> plt.tight_layout()
|
| 504 |
+
>>> plt.show()
|
| 505 |
+
|
| 506 |
+
.. note::
|
| 507 |
+
|
| 508 |
+
By default, `scipy.stats.uniform` has support ``[0, 1]``.
|
| 509 |
+
Using the parameters ``loc`` and ``scale``, one obtains the uniform
|
| 510 |
+
distribution on ``[loc, loc + scale]``.
|
| 511 |
+
|
| 512 |
+
This result is particularly interesting because the first order index
|
| 513 |
+
:math:`S_{x_3} = 0` whereas its total order is :math:`S_{T_{x_3}} = 0.244`.
|
| 514 |
+
This means that higher order interactions with :math:`x_3` are responsible
|
| 515 |
+
for the difference. Almost 25% of the observed variance
|
| 516 |
+
on the QoI is due to the correlations between :math:`x_3` and :math:`x_1`,
|
| 517 |
+
although :math:`x_3` by itself has no impact on the QoI.
|
| 518 |
+
|
| 519 |
+
The following gives a visual explanation of Sobol' indices on this
|
| 520 |
+
function. Let's generate 1024 samples in :math:`[-\pi, \pi]^3` and
|
| 521 |
+
calculate the value of the output.
|
| 522 |
+
|
| 523 |
+
>>> from scipy.stats import qmc
|
| 524 |
+
>>> n_dim = 3
|
| 525 |
+
>>> p_labels = ['$x_1$', '$x_2$', '$x_3$']
|
| 526 |
+
>>> sample = qmc.Sobol(d=n_dim, seed=rng).random(1024)
|
| 527 |
+
>>> sample = qmc.scale(
|
| 528 |
+
... sample=sample,
|
| 529 |
+
... l_bounds=[-np.pi, -np.pi, -np.pi],
|
| 530 |
+
... u_bounds=[np.pi, np.pi, np.pi]
|
| 531 |
+
... )
|
| 532 |
+
>>> output = f_ishigami(sample.T)
|
| 533 |
+
|
| 534 |
+
Now we can do scatter plots of the output with respect to each parameter.
|
| 535 |
+
This gives a visual way to understand how each parameter impacts the
|
| 536 |
+
output of the function.
|
| 537 |
+
|
| 538 |
+
>>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4))
|
| 539 |
+
>>> for i in range(n_dim):
|
| 540 |
+
... xi = sample[:, i]
|
| 541 |
+
... ax[i].scatter(xi, output, marker='+')
|
| 542 |
+
... ax[i].set_xlabel(p_labels[i])
|
| 543 |
+
>>> ax[0].set_ylabel('Y')
|
| 544 |
+
>>> plt.tight_layout()
|
| 545 |
+
>>> plt.show()
|
| 546 |
+
|
| 547 |
+
Now Sobol' goes a step further:
|
| 548 |
+
by conditioning the output value by given values of the parameter
|
| 549 |
+
(black lines), the conditional output mean is computed. It corresponds to
|
| 550 |
+
the term :math:`\mathbb{E}(Y|x_i)`. Taking the variance of this term gives
|
| 551 |
+
the numerator of the Sobol' indices.
|
| 552 |
+
|
| 553 |
+
>>> mini = np.min(output)
|
| 554 |
+
>>> maxi = np.max(output)
|
| 555 |
+
>>> n_bins = 10
|
| 556 |
+
>>> bins = np.linspace(-np.pi, np.pi, num=n_bins, endpoint=False)
|
| 557 |
+
>>> dx = bins[1] - bins[0]
|
| 558 |
+
>>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4))
|
| 559 |
+
>>> for i in range(n_dim):
|
| 560 |
+
... xi = sample[:, i]
|
| 561 |
+
... ax[i].scatter(xi, output, marker='+')
|
| 562 |
+
... ax[i].set_xlabel(p_labels[i])
|
| 563 |
+
... for bin_ in bins:
|
| 564 |
+
... idx = np.where((bin_ <= xi) & (xi <= bin_ + dx))
|
| 565 |
+
... xi_ = xi[idx]
|
| 566 |
+
... y_ = output[idx]
|
| 567 |
+
... ave_y_ = np.mean(y_)
|
| 568 |
+
... ax[i].plot([bin_ + dx/2] * 2, [mini, maxi], c='k')
|
| 569 |
+
... ax[i].scatter(bin_ + dx/2, ave_y_, c='r')
|
| 570 |
+
>>> ax[0].set_ylabel('Y')
|
| 571 |
+
>>> plt.tight_layout()
|
| 572 |
+
>>> plt.show()
|
| 573 |
+
|
| 574 |
+
Looking at :math:`x_3`, the variance
|
| 575 |
+
of the mean is zero leading to :math:`S_{x_3} = 0`. But we can further
|
| 576 |
+
observe that the variance of the output is not constant along the parameter
|
| 577 |
+
values of :math:`x_3`. This heteroscedasticity is explained by higher order
|
| 578 |
+
interactions. Moreover, an heteroscedasticity is also noticeable on
|
| 579 |
+
:math:`x_1` leading to an interaction between :math:`x_3` and :math:`x_1`.
|
| 580 |
+
On :math:`x_2`, the variance seems to be constant and thus null interaction
|
| 581 |
+
with this parameter can be supposed.
|
| 582 |
+
|
| 583 |
+
This case is fairly simple to analyse visually---although it is only a
|
| 584 |
+
qualitative analysis. Nevertheless, when the number of input parameters
|
| 585 |
+
increases such analysis becomes unrealistic as it would be difficult to
|
| 586 |
+
conclude on high-order terms. Hence the benefit of using Sobol' indices.
|
| 587 |
+
|
| 588 |
+
"""
|
| 589 |
+
random_state = check_random_state(random_state)
|
| 590 |
+
|
| 591 |
+
n_ = int(n)
|
| 592 |
+
if not (n_ & (n_ - 1) == 0) or n != n_:
|
| 593 |
+
raise ValueError(
|
| 594 |
+
"The balance properties of Sobol' points require 'n' "
|
| 595 |
+
"to be a power of 2."
|
| 596 |
+
)
|
| 597 |
+
n = n_
|
| 598 |
+
|
| 599 |
+
if not callable(method):
|
| 600 |
+
indices_methods: dict[str, Callable] = {
|
| 601 |
+
"saltelli_2010": saltelli_2010,
|
| 602 |
+
}
|
| 603 |
+
try:
|
| 604 |
+
method = method.lower() # type: ignore[assignment]
|
| 605 |
+
indices_method_ = indices_methods[method]
|
| 606 |
+
except KeyError as exc:
|
| 607 |
+
message = (
|
| 608 |
+
f"{method!r} is not a valid 'method'. It must be one of"
|
| 609 |
+
f" {set(indices_methods)!r} or a callable."
|
| 610 |
+
)
|
| 611 |
+
raise ValueError(message) from exc
|
| 612 |
+
else:
|
| 613 |
+
indices_method_ = method
|
| 614 |
+
sig = inspect.signature(indices_method_)
|
| 615 |
+
|
| 616 |
+
if set(sig.parameters) != {'f_A', 'f_B', 'f_AB'}:
|
| 617 |
+
message = (
|
| 618 |
+
"If 'method' is a callable, it must have the following"
|
| 619 |
+
f" signature: {inspect.signature(saltelli_2010)}"
|
| 620 |
+
)
|
| 621 |
+
raise ValueError(message)
|
| 622 |
+
|
| 623 |
+
def indices_method(f_A, f_B, f_AB):
|
| 624 |
+
"""Wrap indices method to ensure proper output dimension.
|
| 625 |
+
|
| 626 |
+
1D when single output, 2D otherwise.
|
| 627 |
+
"""
|
| 628 |
+
return np.squeeze(indices_method_(f_A=f_A, f_B=f_B, f_AB=f_AB))
|
| 629 |
+
|
| 630 |
+
if callable(func):
|
| 631 |
+
if dists is None:
|
| 632 |
+
raise ValueError(
|
| 633 |
+
"'dists' must be defined when 'func' is a callable."
|
| 634 |
+
)
|
| 635 |
+
|
| 636 |
+
def wrapped_func(x):
|
| 637 |
+
return np.atleast_2d(func(x))
|
| 638 |
+
|
| 639 |
+
A, B = sample_A_B(n=n, dists=dists, random_state=random_state)
|
| 640 |
+
AB = sample_AB(A=A, B=B)
|
| 641 |
+
|
| 642 |
+
f_A = wrapped_func(A)
|
| 643 |
+
|
| 644 |
+
if f_A.shape[1] != n:
|
| 645 |
+
raise ValueError(
|
| 646 |
+
"'func' output should have a shape ``(s, -1)`` with ``s`` "
|
| 647 |
+
"the number of output."
|
| 648 |
+
)
|
| 649 |
+
|
| 650 |
+
def funcAB(AB):
|
| 651 |
+
d, d, n = AB.shape
|
| 652 |
+
AB = np.moveaxis(AB, 0, -1).reshape(d, n*d)
|
| 653 |
+
f_AB = wrapped_func(AB)
|
| 654 |
+
return np.moveaxis(f_AB.reshape((-1, n, d)), -1, 0)
|
| 655 |
+
|
| 656 |
+
f_B = wrapped_func(B)
|
| 657 |
+
f_AB = funcAB(AB)
|
| 658 |
+
else:
|
| 659 |
+
message = (
|
| 660 |
+
"When 'func' is a dictionary, it must contain the following "
|
| 661 |
+
"keys: 'f_A', 'f_B' and 'f_AB'."
|
| 662 |
+
"'f_A' and 'f_B' should have a shape ``(s, n)`` and 'f_AB' "
|
| 663 |
+
"should have a shape ``(d, s, n)``."
|
| 664 |
+
)
|
| 665 |
+
try:
|
| 666 |
+
f_A, f_B, f_AB = np.atleast_2d(
|
| 667 |
+
func['f_A'], func['f_B'], func['f_AB']
|
| 668 |
+
)
|
| 669 |
+
except KeyError as exc:
|
| 670 |
+
raise ValueError(message) from exc
|
| 671 |
+
|
| 672 |
+
if f_A.shape[1] != n or f_A.shape != f_B.shape or \
|
| 673 |
+
f_AB.shape == f_A.shape or f_AB.shape[-1] % n != 0:
|
| 674 |
+
raise ValueError(message)
|
| 675 |
+
|
| 676 |
+
# Normalization by mean
|
| 677 |
+
# Sobol', I. and Levitan, Y. L. (1999). On the use of variance reducing
|
| 678 |
+
# multipliers in monte carlo computations of a global sensitivity index.
|
| 679 |
+
# Computer Physics Communications, 117(1) :52-61.
|
| 680 |
+
mean = np.mean([f_A, f_B], axis=(0, -1)).reshape(-1, 1)
|
| 681 |
+
f_A -= mean
|
| 682 |
+
f_B -= mean
|
| 683 |
+
f_AB -= mean
|
| 684 |
+
|
| 685 |
+
# Compute indices
|
| 686 |
+
# Filter warnings for constant output as var = 0
|
| 687 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
| 688 |
+
first_order, total_order = indices_method(f_A=f_A, f_B=f_B, f_AB=f_AB)
|
| 689 |
+
|
| 690 |
+
# null variance means null indices
|
| 691 |
+
first_order[~np.isfinite(first_order)] = 0
|
| 692 |
+
total_order[~np.isfinite(total_order)] = 0
|
| 693 |
+
|
| 694 |
+
res = dict(
|
| 695 |
+
first_order=first_order,
|
| 696 |
+
total_order=total_order,
|
| 697 |
+
_indices_method=indices_method,
|
| 698 |
+
_f_A=f_A,
|
| 699 |
+
_f_B=f_B,
|
| 700 |
+
_f_AB=f_AB
|
| 701 |
+
)
|
| 702 |
+
|
| 703 |
+
if callable(func):
|
| 704 |
+
res.update(
|
| 705 |
+
dict(
|
| 706 |
+
_A=A,
|
| 707 |
+
_B=B,
|
| 708 |
+
_AB=AB,
|
| 709 |
+
)
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
return SobolResult(**res)
|
llava_next/lib/python3.10/site-packages/scipy/stats/contingency.py
ADDED
|
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Contingency table functions (:mod:`scipy.stats.contingency`)
|
| 3 |
+
============================================================
|
| 4 |
+
|
| 5 |
+
Functions for creating and analyzing contingency tables.
|
| 6 |
+
|
| 7 |
+
.. currentmodule:: scipy.stats.contingency
|
| 8 |
+
|
| 9 |
+
.. autosummary::
|
| 10 |
+
:toctree: generated/
|
| 11 |
+
|
| 12 |
+
chi2_contingency
|
| 13 |
+
relative_risk
|
| 14 |
+
odds_ratio
|
| 15 |
+
crosstab
|
| 16 |
+
association
|
| 17 |
+
|
| 18 |
+
expected_freq
|
| 19 |
+
margins
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
from functools import reduce
|
| 25 |
+
import math
|
| 26 |
+
import numpy as np
|
| 27 |
+
from ._stats_py import power_divergence
|
| 28 |
+
from ._relative_risk import relative_risk
|
| 29 |
+
from ._crosstab import crosstab
|
| 30 |
+
from ._odds_ratio import odds_ratio
|
| 31 |
+
from scipy._lib._bunch import _make_tuple_bunch
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
__all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab',
|
| 35 |
+
'association', 'relative_risk', 'odds_ratio']
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def margins(a):
|
| 39 |
+
"""Return a list of the marginal sums of the array `a`.
|
| 40 |
+
|
| 41 |
+
Parameters
|
| 42 |
+
----------
|
| 43 |
+
a : ndarray
|
| 44 |
+
The array for which to compute the marginal sums.
|
| 45 |
+
|
| 46 |
+
Returns
|
| 47 |
+
-------
|
| 48 |
+
margsums : list of ndarrays
|
| 49 |
+
A list of length `a.ndim`. `margsums[k]` is the result
|
| 50 |
+
of summing `a` over all axes except `k`; it has the same
|
| 51 |
+
number of dimensions as `a`, but the length of each axis
|
| 52 |
+
except axis `k` will be 1.
|
| 53 |
+
|
| 54 |
+
Examples
|
| 55 |
+
--------
|
| 56 |
+
>>> import numpy as np
|
| 57 |
+
>>> from scipy.stats.contingency import margins
|
| 58 |
+
|
| 59 |
+
>>> a = np.arange(12).reshape(2, 6)
|
| 60 |
+
>>> a
|
| 61 |
+
array([[ 0, 1, 2, 3, 4, 5],
|
| 62 |
+
[ 6, 7, 8, 9, 10, 11]])
|
| 63 |
+
>>> m0, m1 = margins(a)
|
| 64 |
+
>>> m0
|
| 65 |
+
array([[15],
|
| 66 |
+
[51]])
|
| 67 |
+
>>> m1
|
| 68 |
+
array([[ 6, 8, 10, 12, 14, 16]])
|
| 69 |
+
|
| 70 |
+
>>> b = np.arange(24).reshape(2,3,4)
|
| 71 |
+
>>> m0, m1, m2 = margins(b)
|
| 72 |
+
>>> m0
|
| 73 |
+
array([[[ 66]],
|
| 74 |
+
[[210]]])
|
| 75 |
+
>>> m1
|
| 76 |
+
array([[[ 60],
|
| 77 |
+
[ 92],
|
| 78 |
+
[124]]])
|
| 79 |
+
>>> m2
|
| 80 |
+
array([[[60, 66, 72, 78]]])
|
| 81 |
+
"""
|
| 82 |
+
margsums = []
|
| 83 |
+
ranged = list(range(a.ndim))
|
| 84 |
+
for k in ranged:
|
| 85 |
+
marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
|
| 86 |
+
margsums.append(marg)
|
| 87 |
+
return margsums
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def expected_freq(observed):
|
| 91 |
+
"""
|
| 92 |
+
Compute the expected frequencies from a contingency table.
|
| 93 |
+
|
| 94 |
+
Given an n-dimensional contingency table of observed frequencies,
|
| 95 |
+
compute the expected frequencies for the table based on the marginal
|
| 96 |
+
sums under the assumption that the groups associated with each
|
| 97 |
+
dimension are independent.
|
| 98 |
+
|
| 99 |
+
Parameters
|
| 100 |
+
----------
|
| 101 |
+
observed : array_like
|
| 102 |
+
The table of observed frequencies. (While this function can handle
|
| 103 |
+
a 1-D array, that case is trivial. Generally `observed` is at
|
| 104 |
+
least 2-D.)
|
| 105 |
+
|
| 106 |
+
Returns
|
| 107 |
+
-------
|
| 108 |
+
expected : ndarray of float64
|
| 109 |
+
The expected frequencies, based on the marginal sums of the table.
|
| 110 |
+
Same shape as `observed`.
|
| 111 |
+
|
| 112 |
+
Examples
|
| 113 |
+
--------
|
| 114 |
+
>>> import numpy as np
|
| 115 |
+
>>> from scipy.stats.contingency import expected_freq
|
| 116 |
+
>>> observed = np.array([[10, 10, 20],[20, 20, 20]])
|
| 117 |
+
>>> expected_freq(observed)
|
| 118 |
+
array([[ 12., 12., 16.],
|
| 119 |
+
[ 18., 18., 24.]])
|
| 120 |
+
|
| 121 |
+
"""
|
| 122 |
+
# Typically `observed` is an integer array. If `observed` has a large
|
| 123 |
+
# number of dimensions or holds large values, some of the following
|
| 124 |
+
# computations may overflow, so we first switch to floating point.
|
| 125 |
+
observed = np.asarray(observed, dtype=np.float64)
|
| 126 |
+
|
| 127 |
+
# Create a list of the marginal sums.
|
| 128 |
+
margsums = margins(observed)
|
| 129 |
+
|
| 130 |
+
# Create the array of expected frequencies. The shapes of the
|
| 131 |
+
# marginal sums returned by apply_over_axes() are just what we
|
| 132 |
+
# need for broadcasting in the following product.
|
| 133 |
+
d = observed.ndim
|
| 134 |
+
expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
|
| 135 |
+
return expected
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
Chi2ContingencyResult = _make_tuple_bunch(
|
| 139 |
+
'Chi2ContingencyResult',
|
| 140 |
+
['statistic', 'pvalue', 'dof', 'expected_freq'], []
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def chi2_contingency(observed, correction=True, lambda_=None):
|
| 145 |
+
"""Chi-square test of independence of variables in a contingency table.
|
| 146 |
+
|
| 147 |
+
This function computes the chi-square statistic and p-value for the
|
| 148 |
+
hypothesis test of independence of the observed frequencies in the
|
| 149 |
+
contingency table [1]_ `observed`. The expected frequencies are computed
|
| 150 |
+
based on the marginal sums under the assumption of independence; see
|
| 151 |
+
`scipy.stats.contingency.expected_freq`. The number of degrees of
|
| 152 |
+
freedom is (expressed using numpy functions and attributes)::
|
| 153 |
+
|
| 154 |
+
dof = observed.size - sum(observed.shape) + observed.ndim - 1
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
Parameters
|
| 158 |
+
----------
|
| 159 |
+
observed : array_like
|
| 160 |
+
The contingency table. The table contains the observed frequencies
|
| 161 |
+
(i.e. number of occurrences) in each category. In the two-dimensional
|
| 162 |
+
case, the table is often described as an "R x C table".
|
| 163 |
+
correction : bool, optional
|
| 164 |
+
If True, *and* the degrees of freedom is 1, apply Yates' correction
|
| 165 |
+
for continuity. The effect of the correction is to adjust each
|
| 166 |
+
observed value by 0.5 towards the corresponding expected value.
|
| 167 |
+
lambda_ : float or str, optional
|
| 168 |
+
By default, the statistic computed in this test is Pearson's
|
| 169 |
+
chi-squared statistic [2]_. `lambda_` allows a statistic from the
|
| 170 |
+
Cressie-Read power divergence family [3]_ to be used instead. See
|
| 171 |
+
`scipy.stats.power_divergence` for details.
|
| 172 |
+
|
| 173 |
+
Returns
|
| 174 |
+
-------
|
| 175 |
+
res : Chi2ContingencyResult
|
| 176 |
+
An object containing attributes:
|
| 177 |
+
|
| 178 |
+
statistic : float
|
| 179 |
+
The test statistic.
|
| 180 |
+
pvalue : float
|
| 181 |
+
The p-value of the test.
|
| 182 |
+
dof : int
|
| 183 |
+
The degrees of freedom.
|
| 184 |
+
expected_freq : ndarray, same shape as `observed`
|
| 185 |
+
The expected frequencies, based on the marginal sums of the table.
|
| 186 |
+
|
| 187 |
+
See Also
|
| 188 |
+
--------
|
| 189 |
+
scipy.stats.contingency.expected_freq
|
| 190 |
+
scipy.stats.fisher_exact
|
| 191 |
+
scipy.stats.chisquare
|
| 192 |
+
scipy.stats.power_divergence
|
| 193 |
+
scipy.stats.barnard_exact
|
| 194 |
+
scipy.stats.boschloo_exact
|
| 195 |
+
|
| 196 |
+
Notes
|
| 197 |
+
-----
|
| 198 |
+
An often quoted guideline for the validity of this calculation is that
|
| 199 |
+
the test should be used only if the observed and expected frequencies
|
| 200 |
+
in each cell are at least 5.
|
| 201 |
+
|
| 202 |
+
This is a test for the independence of different categories of a
|
| 203 |
+
population. The test is only meaningful when the dimension of
|
| 204 |
+
`observed` is two or more. Applying the test to a one-dimensional
|
| 205 |
+
table will always result in `expected` equal to `observed` and a
|
| 206 |
+
chi-square statistic equal to 0.
|
| 207 |
+
|
| 208 |
+
This function does not handle masked arrays, because the calculation
|
| 209 |
+
does not make sense with missing values.
|
| 210 |
+
|
| 211 |
+
Like `scipy.stats.chisquare`, this function computes a chi-square
|
| 212 |
+
statistic; the convenience this function provides is to figure out the
|
| 213 |
+
expected frequencies and degrees of freedom from the given contingency
|
| 214 |
+
table. If these were already known, and if the Yates' correction was not
|
| 215 |
+
required, one could use `scipy.stats.chisquare`. That is, if one calls::
|
| 216 |
+
|
| 217 |
+
res = chi2_contingency(obs, correction=False)
|
| 218 |
+
|
| 219 |
+
then the following is true::
|
| 220 |
+
|
| 221 |
+
(res.statistic, res.pvalue) == stats.chisquare(obs.ravel(),
|
| 222 |
+
f_exp=ex.ravel(),
|
| 223 |
+
ddof=obs.size - 1 - dof)
|
| 224 |
+
|
| 225 |
+
The `lambda_` argument was added in version 0.13.0 of scipy.
|
| 226 |
+
|
| 227 |
+
References
|
| 228 |
+
----------
|
| 229 |
+
.. [1] "Contingency table",
|
| 230 |
+
https://en.wikipedia.org/wiki/Contingency_table
|
| 231 |
+
.. [2] "Pearson's chi-squared test",
|
| 232 |
+
https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
|
| 233 |
+
.. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
|
| 234 |
+
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
|
| 235 |
+
pp. 440-464.
|
| 236 |
+
.. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of
|
| 237 |
+
Cardiovascular Events in Women and Men: A Sex-Specific
|
| 238 |
+
Meta-analysis of Randomized Controlled Trials."
|
| 239 |
+
JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006.
|
| 240 |
+
|
| 241 |
+
Examples
|
| 242 |
+
--------
|
| 243 |
+
In [4]_, the use of aspirin to prevent cardiovascular events in women
|
| 244 |
+
and men was investigated. The study notably concluded:
|
| 245 |
+
|
| 246 |
+
...aspirin therapy reduced the risk of a composite of
|
| 247 |
+
cardiovascular events due to its effect on reducing the risk of
|
| 248 |
+
ischemic stroke in women [...]
|
| 249 |
+
|
| 250 |
+
The article lists studies of various cardiovascular events. Let's
|
| 251 |
+
focus on the ischemic stoke in women.
|
| 252 |
+
|
| 253 |
+
The following table summarizes the results of the experiment in which
|
| 254 |
+
participants took aspirin or a placebo on a regular basis for several
|
| 255 |
+
years. Cases of ischemic stroke were recorded::
|
| 256 |
+
|
| 257 |
+
Aspirin Control/Placebo
|
| 258 |
+
Ischemic stroke 176 230
|
| 259 |
+
No stroke 21035 21018
|
| 260 |
+
|
| 261 |
+
Is there evidence that the aspirin reduces the risk of ischemic stroke?
|
| 262 |
+
We begin by formulating a null hypothesis :math:`H_0`:
|
| 263 |
+
|
| 264 |
+
The effect of aspirin is equivalent to that of placebo.
|
| 265 |
+
|
| 266 |
+
Let's assess the plausibility of this hypothesis with
|
| 267 |
+
a chi-square test.
|
| 268 |
+
|
| 269 |
+
>>> import numpy as np
|
| 270 |
+
>>> from scipy.stats import chi2_contingency
|
| 271 |
+
>>> table = np.array([[176, 230], [21035, 21018]])
|
| 272 |
+
>>> res = chi2_contingency(table)
|
| 273 |
+
>>> res.statistic
|
| 274 |
+
6.892569132546561
|
| 275 |
+
>>> res.pvalue
|
| 276 |
+
0.008655478161175739
|
| 277 |
+
|
| 278 |
+
Using a significance level of 5%, we would reject the null hypothesis in
|
| 279 |
+
favor of the alternative hypothesis: "the effect of aspirin
|
| 280 |
+
is not equivalent to the effect of placebo".
|
| 281 |
+
Because `scipy.stats.contingency.chi2_contingency` performs a two-sided
|
| 282 |
+
test, the alternative hypothesis does not indicate the direction of the
|
| 283 |
+
effect. We can use `stats.contingency.odds_ratio` to support the
|
| 284 |
+
conclusion that aspirin *reduces* the risk of ischemic stroke.
|
| 285 |
+
|
| 286 |
+
Below are further examples showing how larger contingency tables can be
|
| 287 |
+
tested.
|
| 288 |
+
|
| 289 |
+
A two-way example (2 x 3):
|
| 290 |
+
|
| 291 |
+
>>> obs = np.array([[10, 10, 20], [20, 20, 20]])
|
| 292 |
+
>>> res = chi2_contingency(obs)
|
| 293 |
+
>>> res.statistic
|
| 294 |
+
2.7777777777777777
|
| 295 |
+
>>> res.pvalue
|
| 296 |
+
0.24935220877729619
|
| 297 |
+
>>> res.dof
|
| 298 |
+
2
|
| 299 |
+
>>> res.expected_freq
|
| 300 |
+
array([[ 12., 12., 16.],
|
| 301 |
+
[ 18., 18., 24.]])
|
| 302 |
+
|
| 303 |
+
Perform the test using the log-likelihood ratio (i.e. the "G-test")
|
| 304 |
+
instead of Pearson's chi-squared statistic.
|
| 305 |
+
|
| 306 |
+
>>> res = chi2_contingency(obs, lambda_="log-likelihood")
|
| 307 |
+
>>> res.statistic
|
| 308 |
+
2.7688587616781319
|
| 309 |
+
>>> res.pvalue
|
| 310 |
+
0.25046668010954165
|
| 311 |
+
|
| 312 |
+
A four-way example (2 x 2 x 2 x 2):
|
| 313 |
+
|
| 314 |
+
>>> obs = np.array(
|
| 315 |
+
... [[[[12, 17],
|
| 316 |
+
... [11, 16]],
|
| 317 |
+
... [[11, 12],
|
| 318 |
+
... [15, 16]]],
|
| 319 |
+
... [[[23, 15],
|
| 320 |
+
... [30, 22]],
|
| 321 |
+
... [[14, 17],
|
| 322 |
+
... [15, 16]]]])
|
| 323 |
+
>>> res = chi2_contingency(obs)
|
| 324 |
+
>>> res.statistic
|
| 325 |
+
8.7584514426741897
|
| 326 |
+
>>> res.pvalue
|
| 327 |
+
0.64417725029295503
|
| 328 |
+
"""
|
| 329 |
+
observed = np.asarray(observed)
|
| 330 |
+
if np.any(observed < 0):
|
| 331 |
+
raise ValueError("All values in `observed` must be nonnegative.")
|
| 332 |
+
if observed.size == 0:
|
| 333 |
+
raise ValueError("No data; `observed` has size 0.")
|
| 334 |
+
|
| 335 |
+
expected = expected_freq(observed)
|
| 336 |
+
if np.any(expected == 0):
|
| 337 |
+
# Include one of the positions where expected is zero in
|
| 338 |
+
# the exception message.
|
| 339 |
+
zeropos = list(zip(*np.nonzero(expected == 0)))[0]
|
| 340 |
+
raise ValueError("The internally computed table of expected "
|
| 341 |
+
f"frequencies has a zero element at {zeropos}.")
|
| 342 |
+
|
| 343 |
+
# The degrees of freedom
|
| 344 |
+
dof = expected.size - sum(expected.shape) + expected.ndim - 1
|
| 345 |
+
|
| 346 |
+
if dof == 0:
|
| 347 |
+
# Degenerate case; this occurs when `observed` is 1D (or, more
|
| 348 |
+
# generally, when it has only one nontrivial dimension). In this
|
| 349 |
+
# case, we also have observed == expected, so chi2 is 0.
|
| 350 |
+
chi2 = 0.0
|
| 351 |
+
p = 1.0
|
| 352 |
+
else:
|
| 353 |
+
if dof == 1 and correction:
|
| 354 |
+
# Adjust `observed` according to Yates' correction for continuity.
|
| 355 |
+
# Magnitude of correction no bigger than difference; see gh-13875
|
| 356 |
+
diff = expected - observed
|
| 357 |
+
direction = np.sign(diff)
|
| 358 |
+
magnitude = np.minimum(0.5, np.abs(diff))
|
| 359 |
+
observed = observed + magnitude * direction
|
| 360 |
+
|
| 361 |
+
chi2, p = power_divergence(observed, expected,
|
| 362 |
+
ddof=observed.size - 1 - dof, axis=None,
|
| 363 |
+
lambda_=lambda_)
|
| 364 |
+
|
| 365 |
+
return Chi2ContingencyResult(chi2, p, dof, expected)
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def association(observed, method="cramer", correction=False, lambda_=None):
|
| 369 |
+
"""Calculates degree of association between two nominal variables.
|
| 370 |
+
|
| 371 |
+
The function provides the option for computing one of three measures of
|
| 372 |
+
association between two nominal variables from the data given in a 2d
|
| 373 |
+
contingency table: Tschuprow's T, Pearson's Contingency Coefficient
|
| 374 |
+
and Cramer's V.
|
| 375 |
+
|
| 376 |
+
Parameters
|
| 377 |
+
----------
|
| 378 |
+
observed : array-like
|
| 379 |
+
The array of observed values
|
| 380 |
+
method : {"cramer", "tschuprow", "pearson"} (default = "cramer")
|
| 381 |
+
The association test statistic.
|
| 382 |
+
correction : bool, optional
|
| 383 |
+
Inherited from `scipy.stats.contingency.chi2_contingency()`
|
| 384 |
+
lambda_ : float or str, optional
|
| 385 |
+
Inherited from `scipy.stats.contingency.chi2_contingency()`
|
| 386 |
+
|
| 387 |
+
Returns
|
| 388 |
+
-------
|
| 389 |
+
statistic : float
|
| 390 |
+
Value of the test statistic
|
| 391 |
+
|
| 392 |
+
Notes
|
| 393 |
+
-----
|
| 394 |
+
Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all
|
| 395 |
+
measure the degree to which two nominal or ordinal variables are related,
|
| 396 |
+
or the level of their association. This differs from correlation, although
|
| 397 |
+
many often mistakenly consider them equivalent. Correlation measures in
|
| 398 |
+
what way two variables are related, whereas, association measures how
|
| 399 |
+
related the variables are. As such, association does not subsume
|
| 400 |
+
independent variables, and is rather a test of independence. A value of
|
| 401 |
+
1.0 indicates perfect association, and 0.0 means the variables have no
|
| 402 |
+
association.
|
| 403 |
+
|
| 404 |
+
Both the Cramer's V and Tschuprow's T are extensions of the phi
|
| 405 |
+
coefficient. Moreover, due to the close relationship between the
|
| 406 |
+
Cramer's V and Tschuprow's T the returned values can often be similar
|
| 407 |
+
or even equivalent. They are likely to diverge more as the array shape
|
| 408 |
+
diverges from a 2x2.
|
| 409 |
+
|
| 410 |
+
References
|
| 411 |
+
----------
|
| 412 |
+
.. [1] "Tschuprow's T",
|
| 413 |
+
https://en.wikipedia.org/wiki/Tschuprow's_T
|
| 414 |
+
.. [2] Tschuprow, A. A. (1939)
|
| 415 |
+
Principles of the Mathematical Theory of Correlation;
|
| 416 |
+
translated by M. Kantorowitsch. W. Hodge & Co.
|
| 417 |
+
.. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V
|
| 418 |
+
.. [4] "Nominal Association: Phi and Cramer's V",
|
| 419 |
+
http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html
|
| 420 |
+
.. [5] Gingrich, Paul, "Association Between Variables",
|
| 421 |
+
http://uregina.ca/~gingrich/ch11a.pdf
|
| 422 |
+
|
| 423 |
+
Examples
|
| 424 |
+
--------
|
| 425 |
+
An example with a 4x2 contingency table:
|
| 426 |
+
|
| 427 |
+
>>> import numpy as np
|
| 428 |
+
>>> from scipy.stats.contingency import association
|
| 429 |
+
>>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]])
|
| 430 |
+
|
| 431 |
+
Pearson's contingency coefficient
|
| 432 |
+
|
| 433 |
+
>>> association(obs4x2, method="pearson")
|
| 434 |
+
0.18303298140595667
|
| 435 |
+
|
| 436 |
+
Cramer's V
|
| 437 |
+
|
| 438 |
+
>>> association(obs4x2, method="cramer")
|
| 439 |
+
0.18617813077483678
|
| 440 |
+
|
| 441 |
+
Tschuprow's T
|
| 442 |
+
|
| 443 |
+
>>> association(obs4x2, method="tschuprow")
|
| 444 |
+
0.14146478765062995
|
| 445 |
+
"""
|
| 446 |
+
arr = np.asarray(observed)
|
| 447 |
+
if not np.issubdtype(arr.dtype, np.integer):
|
| 448 |
+
raise ValueError("`observed` must be an integer array.")
|
| 449 |
+
|
| 450 |
+
if len(arr.shape) != 2:
|
| 451 |
+
raise ValueError("method only accepts 2d arrays")
|
| 452 |
+
|
| 453 |
+
chi2_stat = chi2_contingency(arr, correction=correction,
|
| 454 |
+
lambda_=lambda_)
|
| 455 |
+
|
| 456 |
+
phi2 = chi2_stat.statistic / arr.sum()
|
| 457 |
+
n_rows, n_cols = arr.shape
|
| 458 |
+
if method == "cramer":
|
| 459 |
+
value = phi2 / min(n_cols - 1, n_rows - 1)
|
| 460 |
+
elif method == "tschuprow":
|
| 461 |
+
value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1))
|
| 462 |
+
elif method == 'pearson':
|
| 463 |
+
value = phi2 / (1 + phi2)
|
| 464 |
+
else:
|
| 465 |
+
raise ValueError("Invalid argument value: 'method' argument must "
|
| 466 |
+
"be 'cramer', 'tschuprow', or 'pearson'")
|
| 467 |
+
|
| 468 |
+
return math.sqrt(value)
|
llava_next/lib/python3.10/site-packages/scipy/stats/distributions.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Author: Travis Oliphant 2002-2011 with contributions from
|
| 3 |
+
# SciPy Developers 2004-2011
|
| 4 |
+
#
|
| 5 |
+
# NOTE: To look at history using `git blame`, use `git blame -M -C -C`
|
| 6 |
+
# instead of `git blame -Lxxx,+x`.
|
| 7 |
+
#
|
| 8 |
+
from ._distn_infrastructure import (rv_discrete, rv_continuous, rv_frozen) # noqa: F401
|
| 9 |
+
|
| 10 |
+
from . import _continuous_distns
|
| 11 |
+
from . import _discrete_distns
|
| 12 |
+
|
| 13 |
+
from ._continuous_distns import * # noqa: F403
|
| 14 |
+
from ._levy_stable import levy_stable
|
| 15 |
+
from ._discrete_distns import * # noqa: F403
|
| 16 |
+
from ._entropy import entropy
|
| 17 |
+
|
| 18 |
+
# For backwards compatibility e.g. pymc expects distributions.__all__.
|
| 19 |
+
__all__ = ['rv_discrete', 'rv_continuous', 'rv_histogram', 'entropy'] # noqa: F405
|
| 20 |
+
|
| 21 |
+
# Add only the distribution names, not the *_gen names.
|
| 22 |
+
__all__ += _continuous_distns._distn_names
|
| 23 |
+
__all__ += ['levy_stable']
|
| 24 |
+
__all__ += _discrete_distns._distn_names
|
llava_next/lib/python3.10/site-packages/scipy/stats/mstats.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
===================================================================
|
| 3 |
+
Statistical functions for masked arrays (:mod:`scipy.stats.mstats`)
|
| 4 |
+
===================================================================
|
| 5 |
+
|
| 6 |
+
.. currentmodule:: scipy.stats.mstats
|
| 7 |
+
|
| 8 |
+
This module contains a large number of statistical functions that can
|
| 9 |
+
be used with masked arrays.
|
| 10 |
+
|
| 11 |
+
Most of these functions are similar to those in `scipy.stats` but might
|
| 12 |
+
have small differences in the API or in the algorithm used. Since this
|
| 13 |
+
is a relatively new package, some API changes are still possible.
|
| 14 |
+
|
| 15 |
+
Summary statistics
|
| 16 |
+
==================
|
| 17 |
+
|
| 18 |
+
.. autosummary::
|
| 19 |
+
:toctree: generated/
|
| 20 |
+
|
| 21 |
+
describe
|
| 22 |
+
gmean
|
| 23 |
+
hmean
|
| 24 |
+
kurtosis
|
| 25 |
+
mode
|
| 26 |
+
mquantiles
|
| 27 |
+
hdmedian
|
| 28 |
+
hdquantiles
|
| 29 |
+
hdquantiles_sd
|
| 30 |
+
idealfourths
|
| 31 |
+
plotting_positions
|
| 32 |
+
meppf
|
| 33 |
+
moment
|
| 34 |
+
skew
|
| 35 |
+
tmean
|
| 36 |
+
tvar
|
| 37 |
+
tmin
|
| 38 |
+
tmax
|
| 39 |
+
tsem
|
| 40 |
+
variation
|
| 41 |
+
find_repeats
|
| 42 |
+
sem
|
| 43 |
+
trimmed_mean
|
| 44 |
+
trimmed_mean_ci
|
| 45 |
+
trimmed_std
|
| 46 |
+
trimmed_var
|
| 47 |
+
|
| 48 |
+
Frequency statistics
|
| 49 |
+
====================
|
| 50 |
+
|
| 51 |
+
.. autosummary::
|
| 52 |
+
:toctree: generated/
|
| 53 |
+
|
| 54 |
+
scoreatpercentile
|
| 55 |
+
|
| 56 |
+
Correlation functions
|
| 57 |
+
=====================
|
| 58 |
+
|
| 59 |
+
.. autosummary::
|
| 60 |
+
:toctree: generated/
|
| 61 |
+
|
| 62 |
+
f_oneway
|
| 63 |
+
pearsonr
|
| 64 |
+
spearmanr
|
| 65 |
+
pointbiserialr
|
| 66 |
+
kendalltau
|
| 67 |
+
kendalltau_seasonal
|
| 68 |
+
linregress
|
| 69 |
+
siegelslopes
|
| 70 |
+
theilslopes
|
| 71 |
+
sen_seasonal_slopes
|
| 72 |
+
|
| 73 |
+
Statistical tests
|
| 74 |
+
=================
|
| 75 |
+
|
| 76 |
+
.. autosummary::
|
| 77 |
+
:toctree: generated/
|
| 78 |
+
|
| 79 |
+
ttest_1samp
|
| 80 |
+
ttest_onesamp
|
| 81 |
+
ttest_ind
|
| 82 |
+
ttest_rel
|
| 83 |
+
chisquare
|
| 84 |
+
kstest
|
| 85 |
+
ks_2samp
|
| 86 |
+
ks_1samp
|
| 87 |
+
ks_twosamp
|
| 88 |
+
mannwhitneyu
|
| 89 |
+
rankdata
|
| 90 |
+
kruskal
|
| 91 |
+
kruskalwallis
|
| 92 |
+
friedmanchisquare
|
| 93 |
+
brunnermunzel
|
| 94 |
+
skewtest
|
| 95 |
+
kurtosistest
|
| 96 |
+
normaltest
|
| 97 |
+
|
| 98 |
+
Transformations
|
| 99 |
+
===============
|
| 100 |
+
|
| 101 |
+
.. autosummary::
|
| 102 |
+
:toctree: generated/
|
| 103 |
+
|
| 104 |
+
obrientransform
|
| 105 |
+
trim
|
| 106 |
+
trima
|
| 107 |
+
trimmed_stde
|
| 108 |
+
trimr
|
| 109 |
+
trimtail
|
| 110 |
+
trimboth
|
| 111 |
+
winsorize
|
| 112 |
+
zmap
|
| 113 |
+
zscore
|
| 114 |
+
|
| 115 |
+
Other
|
| 116 |
+
=====
|
| 117 |
+
|
| 118 |
+
.. autosummary::
|
| 119 |
+
:toctree: generated/
|
| 120 |
+
|
| 121 |
+
argstoarray
|
| 122 |
+
count_tied_groups
|
| 123 |
+
msign
|
| 124 |
+
compare_medians_ms
|
| 125 |
+
median_cihs
|
| 126 |
+
mjci
|
| 127 |
+
mquantiles_cimj
|
| 128 |
+
rsh
|
| 129 |
+
|
| 130 |
+
"""
|
| 131 |
+
from . import _mstats_basic
|
| 132 |
+
from . import _mstats_extras
|
| 133 |
+
from ._mstats_basic import * # noqa: F403
|
| 134 |
+
from ._mstats_extras import * # noqa: F403
|
| 135 |
+
# Functions that support masked array input in stats but need to be kept in the
|
| 136 |
+
# mstats namespace for backwards compatibility:
|
| 137 |
+
from scipy.stats import gmean, hmean, zmap, zscore, chisquare
|
| 138 |
+
|
| 139 |
+
__all__ = _mstats_basic.__all__ + _mstats_extras.__all__
|
| 140 |
+
__all__ += ['gmean', 'hmean', 'zmap', 'zscore', 'chisquare']
|
llava_next/lib/python3.10/site-packages/scipy/stats/mstats_extras.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.stats` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'compare_medians_ms',
|
| 10 |
+
'hdquantiles', 'hdmedian', 'hdquantiles_sd',
|
| 11 |
+
'idealfourths',
|
| 12 |
+
'median_cihs','mjci','mquantiles_cimj',
|
| 13 |
+
'rsh',
|
| 14 |
+
'trimmed_mean_ci',
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def __dir__():
|
| 19 |
+
return __all__
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def __getattr__(name):
|
| 23 |
+
return _sub_module_deprecation(sub_package="stats", module="mstats_extras",
|
| 24 |
+
private_modules=["_mstats_extras"], all=__all__,
|
| 25 |
+
attribute=name, correct_module="mstats")
|
parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
bitsandbytes-0.41.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
bitsandbytes-0.41.0.dist-info/LICENSE,sha256=UkEte8fOQVfqYou6rLiCngqcs8WPV_mRdhJryM8r_IU,1086
|
| 3 |
+
bitsandbytes-0.41.0.dist-info/METADATA,sha256=z88wKooZxLJ9Z5T3i4YEWBIzRKR9o3DZIes663fhUu4,9810
|
| 4 |
+
bitsandbytes-0.41.0.dist-info/NOTICE.md,sha256=_4zDL2L8BqUwtmvoznR_wqhQmsP2QwdXHrAHnBMzAl8,265
|
| 5 |
+
bitsandbytes-0.41.0.dist-info/RECORD,,
|
| 6 |
+
bitsandbytes-0.41.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 7 |
+
bitsandbytes-0.41.0.dist-info/WHEEL,sha256=AtBG6SXL3KF_v0NxLf0ehyVOh0cold-JbJYXNGorC6Q,92
|
| 8 |
+
bitsandbytes-0.41.0.dist-info/top_level.txt,sha256=bK-Zzu-JyIIh4njm8jTYcbuqX-Z80XTcDal4lXCG0-M,13
|
| 9 |
+
bitsandbytes/__init__.py,sha256=mQQknbw8xSpKDtEJgVEiyCemE4HaB-FtAddxY2-Uyhc,670
|
| 10 |
+
bitsandbytes/__main__.py,sha256=rWjs6LsifG_Vglj3WM4brY2IOCjwKpAjuBP3OIzYFPU,4014
|
| 11 |
+
bitsandbytes/__pycache__/__init__.cpython-310.pyc,,
|
| 12 |
+
bitsandbytes/__pycache__/__main__.cpython-310.pyc,,
|
| 13 |
+
bitsandbytes/__pycache__/cextension.cpython-310.pyc,,
|
| 14 |
+
bitsandbytes/__pycache__/functional.cpython-310.pyc,,
|
| 15 |
+
bitsandbytes/__pycache__/utils.cpython-310.pyc,,
|
| 16 |
+
bitsandbytes/autograd/__init__.py,sha256=Ltb59FJrcWYVsTfGW6SscEZtiDhHZe7EFrYnIhnASug,67
|
| 17 |
+
bitsandbytes/autograd/__pycache__/__init__.cpython-310.pyc,,
|
| 18 |
+
bitsandbytes/autograd/__pycache__/_functions.cpython-310.pyc,,
|
| 19 |
+
bitsandbytes/autograd/_functions.py,sha256=ER9xwzolX9T32Xu0VFbvpoRdDiCas1neEaKOZARI2Kw,22361
|
| 20 |
+
bitsandbytes/cextension.py,sha256=klJwL-8ZPylUOETDTW-fvUbZ_Bt_rdB6wRDND1fB_wk,1635
|
| 21 |
+
bitsandbytes/cuda_setup/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 22 |
+
bitsandbytes/cuda_setup/__pycache__/__init__.cpython-310.pyc,,
|
| 23 |
+
bitsandbytes/cuda_setup/__pycache__/env_vars.cpython-310.pyc,,
|
| 24 |
+
bitsandbytes/cuda_setup/__pycache__/main.cpython-310.pyc,,
|
| 25 |
+
bitsandbytes/cuda_setup/env_vars.py,sha256=4T8i0LKAbE6tyDceGbJxdW1o4Nm4_vDLY6br39VwCxc,1614
|
| 26 |
+
bitsandbytes/cuda_setup/main.py,sha256=o9YcJj87_t1yADdrMWY0c_XQRyX_8t3XGjwiERKtaVk,17946
|
| 27 |
+
bitsandbytes/functional.py,sha256=vw-RE4CfEirCvM-O8rsiGKvAGIM5cKWNM0Ekbr8-xXc,79598
|
| 28 |
+
bitsandbytes/libbitsandbytes_cpu.so,sha256=nejNfivapxN6MN_bJxFfR423YImIeqNVhXdts2BcDR8,41608
|
| 29 |
+
bitsandbytes/libbitsandbytes_cuda110.so,sha256=1NM_-9xHfCz2djWods0YXQcDKITkX3KSJfklrUESkKw,5938904
|
| 30 |
+
bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so,sha256=q_1Zn2FlCd6LaXYwjkDrE_rq0lFuNwDjGBJlWM_Nufg,11110784
|
| 31 |
+
bitsandbytes/libbitsandbytes_cuda111.so,sha256=JBLZ6wBWB5x1DasFqxcog59xxks5XHzLAdQFGZjCiDY,8974040
|
| 32 |
+
bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so,sha256=1qsndcAVNcCz-LcXytWYx81hPJgifIgNDw1MSx81ays,20244864
|
| 33 |
+
bitsandbytes/libbitsandbytes_cuda114.so,sha256=kh0dVhz5EoSIcpFoRt9vB9rtMSYayFrT1uQmDAP_nCI,9313912
|
| 34 |
+
bitsandbytes/libbitsandbytes_cuda114_nocublaslt.so,sha256=7BfmpKsEYpxamIB7a9WhjhXN7FC1o0FpyqO8IXu1Ep4,20973856
|
| 35 |
+
bitsandbytes/libbitsandbytes_cuda115.so,sha256=ncH3CjlEB0fyXvvj9my_SkUyfGwj_FVo4D-adRX63Gs,9310152
|
| 36 |
+
bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so,sha256=1vB8bV-E6pXTKZzOmfxFWiz3l7LrtQuSAh9n33oY1hM,20925040
|
| 37 |
+
bitsandbytes/libbitsandbytes_cuda117.so,sha256=bEkYZLxEKQZvsu3Agy-aDcIC2ZqQ8B6JDBHL2n1Osq0,9117944
|
| 38 |
+
bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so,sha256=jqc_QsosEBzjd7cNFNA-6QG5e1GGG1cLfEoh7d23zxA,20741032
|
| 39 |
+
bitsandbytes/libbitsandbytes_cuda118.so,sha256=B2MQaG_5NLc8iVHawOSu3V-ABcpbos6QdpSLTQ0IDXY,14918184
|
| 40 |
+
bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so,sha256=GaYqo8N7cNkxbAhI-dizyyBbuOqbEbNRR0nyh8LIWW4,26516696
|
| 41 |
+
bitsandbytes/libbitsandbytes_cuda120.so,sha256=1olVGrA_Frm3ZzYaUxDKRyeWXbJlTTWhlPjO1a0il_o,14504296
|
| 42 |
+
bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so,sha256=VUXyIHZb4V6-SOGPVPWVHyeKafG9xQPLEQIelTh69Oo,25709592
|
| 43 |
+
bitsandbytes/libbitsandbytes_cuda121.so,sha256=XRKDct-9s0poQp0sNFSgdvrGUMed2lRror6aVBU3hGM,14512488
|
| 44 |
+
bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so,sha256=YeYH36m5h2N7tULUoZ8Gt-CAfb8szLDPW5m9OLAQFAE,25721880
|
| 45 |
+
bitsandbytes/libbitsandbytes_cuda122.so,sha256=FrhXhmfraDbGt5I6OzUI1igJ5OkUKWdKDDq5fPYMU0k,14561032
|
| 46 |
+
bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so,sha256=WPSiBD_ozuUsk_aRdoJd5XVTcnpannmEmR6yok2mZTA,25803272
|
| 47 |
+
bitsandbytes/nn/__init__.py,sha256=i-gJR2uQrRvn8zZCZcS1KC0SbsUqCKTta4aV7HXZTT4,446
|
| 48 |
+
bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc,,
|
| 49 |
+
bitsandbytes/nn/__pycache__/modules.cpython-310.pyc,,
|
| 50 |
+
bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc,,
|
| 51 |
+
bitsandbytes/nn/modules.py,sha256=sIwAAAtMnk9s95HHTOC10rKERMvAl5gw03dCPL12oBY,20528
|
| 52 |
+
bitsandbytes/nn/triton_based_modules.py,sha256=eMEldLd7GX0Dc3dzX0XZpfgzofBPRAi-z1NXf84wCPs,9843
|
| 53 |
+
bitsandbytes/optim/__init__.py,sha256=TSl80yMFkwGBl8N0FBFcfBLt2vt4cZn-hbkuwHGuCUE,794
|
| 54 |
+
bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc,,
|
| 55 |
+
bitsandbytes/optim/__pycache__/adagrad.cpython-310.pyc,,
|
| 56 |
+
bitsandbytes/optim/__pycache__/adam.cpython-310.pyc,,
|
| 57 |
+
bitsandbytes/optim/__pycache__/adamw.cpython-310.pyc,,
|
| 58 |
+
bitsandbytes/optim/__pycache__/lamb.cpython-310.pyc,,
|
| 59 |
+
bitsandbytes/optim/__pycache__/lars.cpython-310.pyc,,
|
| 60 |
+
bitsandbytes/optim/__pycache__/lion.cpython-310.pyc,,
|
| 61 |
+
bitsandbytes/optim/__pycache__/optimizer.cpython-310.pyc,,
|
| 62 |
+
bitsandbytes/optim/__pycache__/rmsprop.cpython-310.pyc,,
|
| 63 |
+
bitsandbytes/optim/__pycache__/sgd.cpython-310.pyc,,
|
| 64 |
+
bitsandbytes/optim/adagrad.py,sha256=E4KsNJKOB2VfgkyKEoeYwFFXnedsxHZItdfzwc5_cdE,3719
|
| 65 |
+
bitsandbytes/optim/adam.py,sha256=nHHvXoeiAuosn4a9VWI3Z7_XmvYC6bOHb8en6mxiwkA,12776
|
| 66 |
+
bitsandbytes/optim/adamw.py,sha256=byibv4xoBM7FUK8FScRTx2KbI4-2Mi0yB8WJCb2x3wE,2699
|
| 67 |
+
bitsandbytes/optim/lamb.py,sha256=hfH4H9eVAHcbjL04DAI_lcPD1OPAmcY4_myow-o21aw,2313
|
| 68 |
+
bitsandbytes/optim/lars.py,sha256=PeUB8RlfaRtHEa-ZZZkrKDdmkHa7XEEfU81irU-mKsY,5653
|
| 69 |
+
bitsandbytes/optim/lion.py,sha256=jANwqVZSAxNZnoqi_OQ9XG8hKa6e84mkwJ9CchtpLHs,2304
|
| 70 |
+
bitsandbytes/optim/optimizer.py,sha256=219zPzx9dpeY0VndzlXt6jn2yV9sEiSXkrxe26wXjIo,25167
|
| 71 |
+
bitsandbytes/optim/rmsprop.py,sha256=1zGT9JIZh214fbBZ-CTirVKk1rQxSZe-BRJzhRtYL2U,2785
|
| 72 |
+
bitsandbytes/optim/sgd.py,sha256=YHVUeEkwxgYx_0GhH0Et6fCpk7rfhboDR2F06jRWz4E,2340
|
| 73 |
+
bitsandbytes/research/__init__.py,sha256=_MilJdwSRWObRfzzy14WD6HsJa6okT4d5YxH4aB9zg4,119
|
| 74 |
+
bitsandbytes/research/__pycache__/__init__.cpython-310.pyc,,
|
| 75 |
+
bitsandbytes/research/autograd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 76 |
+
bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc,,
|
| 77 |
+
bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc,,
|
| 78 |
+
bitsandbytes/research/autograd/_functions.py,sha256=k72rcf4hT3M5GOpGoijWkpTAqjRNoecGlOHmTTn3n80,15874
|
| 79 |
+
bitsandbytes/research/nn/__init__.py,sha256=j5XA_2ZA6efMtcbuUCyegfCLkDDQuL3ix5xS4yKZayY,53
|
| 80 |
+
bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc,,
|
| 81 |
+
bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc,,
|
| 82 |
+
bitsandbytes/research/nn/modules.py,sha256=EnI2qVTosAMkH4G1fQleA0zvm8dZR9G-GJ4pFDo8V9M,2357
|
| 83 |
+
bitsandbytes/triton/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 84 |
+
bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc,,
|
| 85 |
+
bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc,,
|
| 86 |
+
bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequanitze.cpython-310.pyc,,
|
| 87 |
+
bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc,,
|
| 88 |
+
bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc,,
|
| 89 |
+
bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc,,
|
| 90 |
+
bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc,,
|
| 91 |
+
bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc,,
|
| 92 |
+
bitsandbytes/triton/dequantize_rowwise.py,sha256=qdh3f4O53faM6SFT_aYvrytWF_FQW3q2bhBll6Uwfc4,2193
|
| 93 |
+
bitsandbytes/triton/int8_matmul_mixed_dequanitze.py,sha256=QJ_hrZ94ZthnoPD0TCp5ZCPAMkxNNQQY-UNg50TWwHo,8256
|
| 94 |
+
bitsandbytes/triton/int8_matmul_rowwise_dequantize.py,sha256=EMiY3nfx0LIvYEGUqtzcfUonQxwoDcppYli9Qd6kViw,8240
|
| 95 |
+
bitsandbytes/triton/quantize_columnwise_and_transpose.py,sha256=K2fFegPtSsi2tgKxb5goO8YpUmQ6wgTvsXabgTRAFNI,2749
|
| 96 |
+
bitsandbytes/triton/quantize_global.py,sha256=5in9Plx1Kgf6Nx5B1RBXCiJnb0G4qwraGADNiq1LtVc,3957
|
| 97 |
+
bitsandbytes/triton/quantize_rowwise.py,sha256=sraX6TMubZQGiG9Gyh0UFzK823e_TkXZk9R1BILJdPU,2331
|
| 98 |
+
bitsandbytes/triton/triton_utils.py,sha256=f7CP_3lvUoTQJ-xSp4wAfiU8uX_trtGdUsoLzlcsHQY,103
|
| 99 |
+
bitsandbytes/utils.py,sha256=XASxdyR11sKKtY9DIwthe-zLU6v0vXwZzQvIVasjH7o,7499
|
parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/REQUESTED
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
bitsandbytes
|
parrot/lib/python3.10/site-packages/httpcore/__init__.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._api import request, stream
|
| 2 |
+
from ._async import (
|
| 3 |
+
AsyncConnectionInterface,
|
| 4 |
+
AsyncConnectionPool,
|
| 5 |
+
AsyncHTTP2Connection,
|
| 6 |
+
AsyncHTTP11Connection,
|
| 7 |
+
AsyncHTTPConnection,
|
| 8 |
+
AsyncHTTPProxy,
|
| 9 |
+
AsyncSOCKSProxy,
|
| 10 |
+
)
|
| 11 |
+
from ._backends.base import (
|
| 12 |
+
SOCKET_OPTION,
|
| 13 |
+
AsyncNetworkBackend,
|
| 14 |
+
AsyncNetworkStream,
|
| 15 |
+
NetworkBackend,
|
| 16 |
+
NetworkStream,
|
| 17 |
+
)
|
| 18 |
+
from ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream
|
| 19 |
+
from ._backends.sync import SyncBackend
|
| 20 |
+
from ._exceptions import (
|
| 21 |
+
ConnectError,
|
| 22 |
+
ConnectionNotAvailable,
|
| 23 |
+
ConnectTimeout,
|
| 24 |
+
LocalProtocolError,
|
| 25 |
+
NetworkError,
|
| 26 |
+
PoolTimeout,
|
| 27 |
+
ProtocolError,
|
| 28 |
+
ProxyError,
|
| 29 |
+
ReadError,
|
| 30 |
+
ReadTimeout,
|
| 31 |
+
RemoteProtocolError,
|
| 32 |
+
TimeoutException,
|
| 33 |
+
UnsupportedProtocol,
|
| 34 |
+
WriteError,
|
| 35 |
+
WriteTimeout,
|
| 36 |
+
)
|
| 37 |
+
from ._models import URL, Origin, Request, Response
|
| 38 |
+
from ._ssl import default_ssl_context
|
| 39 |
+
from ._sync import (
|
| 40 |
+
ConnectionInterface,
|
| 41 |
+
ConnectionPool,
|
| 42 |
+
HTTP2Connection,
|
| 43 |
+
HTTP11Connection,
|
| 44 |
+
HTTPConnection,
|
| 45 |
+
HTTPProxy,
|
| 46 |
+
SOCKSProxy,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed.
|
| 50 |
+
try:
|
| 51 |
+
from ._backends.anyio import AnyIOBackend
|
| 52 |
+
except ImportError: # pragma: nocover
|
| 53 |
+
|
| 54 |
+
class AnyIOBackend: # type: ignore
|
| 55 |
+
def __init__(self, *args, **kwargs): # type: ignore
|
| 56 |
+
msg = (
|
| 57 |
+
"Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed."
|
| 58 |
+
)
|
| 59 |
+
raise RuntimeError(msg)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# The 'httpcore.TrioBackend' class is conditional on 'trio' being installed.
|
| 63 |
+
try:
|
| 64 |
+
from ._backends.trio import TrioBackend
|
| 65 |
+
except ImportError: # pragma: nocover
|
| 66 |
+
|
| 67 |
+
class TrioBackend: # type: ignore
|
| 68 |
+
def __init__(self, *args, **kwargs): # type: ignore
|
| 69 |
+
msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed."
|
| 70 |
+
raise RuntimeError(msg)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
__all__ = [
|
| 74 |
+
# top-level requests
|
| 75 |
+
"request",
|
| 76 |
+
"stream",
|
| 77 |
+
# models
|
| 78 |
+
"Origin",
|
| 79 |
+
"URL",
|
| 80 |
+
"Request",
|
| 81 |
+
"Response",
|
| 82 |
+
# async
|
| 83 |
+
"AsyncHTTPConnection",
|
| 84 |
+
"AsyncConnectionPool",
|
| 85 |
+
"AsyncHTTPProxy",
|
| 86 |
+
"AsyncHTTP11Connection",
|
| 87 |
+
"AsyncHTTP2Connection",
|
| 88 |
+
"AsyncConnectionInterface",
|
| 89 |
+
"AsyncSOCKSProxy",
|
| 90 |
+
# sync
|
| 91 |
+
"HTTPConnection",
|
| 92 |
+
"ConnectionPool",
|
| 93 |
+
"HTTPProxy",
|
| 94 |
+
"HTTP11Connection",
|
| 95 |
+
"HTTP2Connection",
|
| 96 |
+
"ConnectionInterface",
|
| 97 |
+
"SOCKSProxy",
|
| 98 |
+
# network backends, implementations
|
| 99 |
+
"SyncBackend",
|
| 100 |
+
"AnyIOBackend",
|
| 101 |
+
"TrioBackend",
|
| 102 |
+
# network backends, mock implementations
|
| 103 |
+
"AsyncMockBackend",
|
| 104 |
+
"AsyncMockStream",
|
| 105 |
+
"MockBackend",
|
| 106 |
+
"MockStream",
|
| 107 |
+
# network backends, interface
|
| 108 |
+
"AsyncNetworkStream",
|
| 109 |
+
"AsyncNetworkBackend",
|
| 110 |
+
"NetworkStream",
|
| 111 |
+
"NetworkBackend",
|
| 112 |
+
# util
|
| 113 |
+
"default_ssl_context",
|
| 114 |
+
"SOCKET_OPTION",
|
| 115 |
+
# exceptions
|
| 116 |
+
"ConnectionNotAvailable",
|
| 117 |
+
"ProxyError",
|
| 118 |
+
"ProtocolError",
|
| 119 |
+
"LocalProtocolError",
|
| 120 |
+
"RemoteProtocolError",
|
| 121 |
+
"UnsupportedProtocol",
|
| 122 |
+
"TimeoutException",
|
| 123 |
+
"PoolTimeout",
|
| 124 |
+
"ConnectTimeout",
|
| 125 |
+
"ReadTimeout",
|
| 126 |
+
"WriteTimeout",
|
| 127 |
+
"NetworkError",
|
| 128 |
+
"ConnectError",
|
| 129 |
+
"ReadError",
|
| 130 |
+
"WriteError",
|
| 131 |
+
]
|
| 132 |
+
|
| 133 |
+
__version__ = "0.17.3"
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
__locals = locals()
|
| 137 |
+
for __name in __all__:
|
| 138 |
+
if not __name.startswith("__"):
|
| 139 |
+
setattr(__locals[__name], "__module__", "httpcore") # noqa
|
parrot/lib/python3.10/site-packages/httpcore/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.74 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/__pycache__/_api.cpython-310.pyc
ADDED
|
Binary file (3.26 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/__pycache__/_models.cpython-310.pyc
ADDED
|
Binary file (15.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/__pycache__/_ssl.cpython-310.pyc
ADDED
|
Binary file (425 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/__pycache__/_synchronization.cpython-310.pyc
ADDED
|
Binary file (7.84 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/__pycache__/_trace.cpython-310.pyc
ADDED
|
Binary file (3.53 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (865 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_api.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import contextmanager
|
| 2 |
+
from typing import Iterator, Optional, Union
|
| 3 |
+
|
| 4 |
+
from ._models import URL, Extensions, HeaderTypes, Response
|
| 5 |
+
from ._sync.connection_pool import ConnectionPool
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def request(
|
| 9 |
+
method: Union[bytes, str],
|
| 10 |
+
url: Union[URL, bytes, str],
|
| 11 |
+
*,
|
| 12 |
+
headers: HeaderTypes = None,
|
| 13 |
+
content: Union[bytes, Iterator[bytes], None] = None,
|
| 14 |
+
extensions: Optional[Extensions] = None,
|
| 15 |
+
) -> Response:
|
| 16 |
+
"""
|
| 17 |
+
Sends an HTTP request, returning the response.
|
| 18 |
+
|
| 19 |
+
```
|
| 20 |
+
response = httpcore.request("GET", "https://www.example.com/")
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
Arguments:
|
| 24 |
+
method: The HTTP method for the request. Typically one of `"GET"`,
|
| 25 |
+
`"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`.
|
| 26 |
+
url: The URL of the HTTP request. Either as an instance of `httpcore.URL`,
|
| 27 |
+
or as str/bytes.
|
| 28 |
+
headers: The HTTP request headers. Either as a dictionary of str/bytes,
|
| 29 |
+
or as a list of two-tuples of str/bytes.
|
| 30 |
+
content: The content of the request body. Either as bytes,
|
| 31 |
+
or as a bytes iterator.
|
| 32 |
+
extensions: A dictionary of optional extra information included on the request.
|
| 33 |
+
Possible keys include `"timeout"`.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
An instance of `httpcore.Response`.
|
| 37 |
+
"""
|
| 38 |
+
with ConnectionPool() as pool:
|
| 39 |
+
return pool.request(
|
| 40 |
+
method=method,
|
| 41 |
+
url=url,
|
| 42 |
+
headers=headers,
|
| 43 |
+
content=content,
|
| 44 |
+
extensions=extensions,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@contextmanager
|
| 49 |
+
def stream(
|
| 50 |
+
method: Union[bytes, str],
|
| 51 |
+
url: Union[URL, bytes, str],
|
| 52 |
+
*,
|
| 53 |
+
headers: HeaderTypes = None,
|
| 54 |
+
content: Union[bytes, Iterator[bytes], None] = None,
|
| 55 |
+
extensions: Optional[Extensions] = None,
|
| 56 |
+
) -> Iterator[Response]:
|
| 57 |
+
"""
|
| 58 |
+
Sends an HTTP request, returning the response within a content manager.
|
| 59 |
+
|
| 60 |
+
```
|
| 61 |
+
with httpcore.stream("GET", "https://www.example.com/") as response:
|
| 62 |
+
...
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
When using the `stream()` function, the body of the response will not be
|
| 66 |
+
automatically read. If you want to access the response body you should
|
| 67 |
+
either use `content = response.read()`, or `for chunk in response.iter_content()`.
|
| 68 |
+
|
| 69 |
+
Arguments:
|
| 70 |
+
method: The HTTP method for the request. Typically one of `"GET"`,
|
| 71 |
+
`"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`.
|
| 72 |
+
url: The URL of the HTTP request. Either as an instance of `httpcore.URL`,
|
| 73 |
+
or as str/bytes.
|
| 74 |
+
headers: The HTTP request headers. Either as a dictionary of str/bytes,
|
| 75 |
+
or as a list of two-tuples of str/bytes.
|
| 76 |
+
content: The content of the request body. Either as bytes,
|
| 77 |
+
or as a bytes iterator.
|
| 78 |
+
extensions: A dictionary of optional extra information included on the request.
|
| 79 |
+
Possible keys include `"timeout"`.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
An instance of `httpcore.Response`.
|
| 83 |
+
"""
|
| 84 |
+
with ConnectionPool() as pool:
|
| 85 |
+
with pool.stream(
|
| 86 |
+
method=method,
|
| 87 |
+
url=url,
|
| 88 |
+
headers=headers,
|
| 89 |
+
content=content,
|
| 90 |
+
extensions=extensions,
|
| 91 |
+
) as response:
|
| 92 |
+
yield response
|
parrot/lib/python3.10/site-packages/httpcore/_async/__init__.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .connection import AsyncHTTPConnection
|
| 2 |
+
from .connection_pool import AsyncConnectionPool
|
| 3 |
+
from .http11 import AsyncHTTP11Connection
|
| 4 |
+
from .http_proxy import AsyncHTTPProxy
|
| 5 |
+
from .interfaces import AsyncConnectionInterface
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
from .http2 import AsyncHTTP2Connection
|
| 9 |
+
except ImportError: # pragma: nocover
|
| 10 |
+
|
| 11 |
+
class AsyncHTTP2Connection: # type: ignore
|
| 12 |
+
def __init__(self, *args, **kwargs) -> None: # type: ignore
|
| 13 |
+
raise RuntimeError(
|
| 14 |
+
"Attempted to use http2 support, but the `h2` package is not "
|
| 15 |
+
"installed. Use 'pip install httpcore[http2]'."
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
from .socks_proxy import AsyncSOCKSProxy
|
| 21 |
+
except ImportError: # pragma: nocover
|
| 22 |
+
|
| 23 |
+
class AsyncSOCKSProxy: # type: ignore
|
| 24 |
+
def __init__(self, *args, **kwargs) -> None: # type: ignore
|
| 25 |
+
raise RuntimeError(
|
| 26 |
+
"Attempted to use SOCKS support, but the `socksio` package is not "
|
| 27 |
+
"installed. Use 'pip install httpcore[socks]'."
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
__all__ = [
|
| 32 |
+
"AsyncHTTPConnection",
|
| 33 |
+
"AsyncConnectionPool",
|
| 34 |
+
"AsyncHTTPProxy",
|
| 35 |
+
"AsyncHTTP11Connection",
|
| 36 |
+
"AsyncHTTP2Connection",
|
| 37 |
+
"AsyncConnectionInterface",
|
| 38 |
+
"AsyncSOCKSProxy",
|
| 39 |
+
]
|
parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.42 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection.cpython-310.pyc
ADDED
|
Binary file (6.52 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-310.pyc
ADDED
|
Binary file (11.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http11.cpython-310.pyc
ADDED
|
Binary file (9.72 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http2.cpython-310.pyc
ADDED
|
Binary file (16.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-310.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/interfaces.cpython-310.pyc
ADDED
|
Binary file (4.41 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-310.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_async/connection.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import logging
|
| 3 |
+
import ssl
|
| 4 |
+
from types import TracebackType
|
| 5 |
+
from typing import Iterable, Iterator, Optional, Type
|
| 6 |
+
|
| 7 |
+
from .._backends.auto import AutoBackend
|
| 8 |
+
from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream
|
| 9 |
+
from .._exceptions import ConnectError, ConnectionNotAvailable, ConnectTimeout
|
| 10 |
+
from .._models import Origin, Request, Response
|
| 11 |
+
from .._ssl import default_ssl_context
|
| 12 |
+
from .._synchronization import AsyncLock
|
| 13 |
+
from .._trace import Trace
|
| 14 |
+
from .http11 import AsyncHTTP11Connection
|
| 15 |
+
from .interfaces import AsyncConnectionInterface
|
| 16 |
+
|
| 17 |
+
RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc.
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger("httpcore.connection")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def exponential_backoff(factor: float) -> Iterator[float]:
|
| 24 |
+
yield 0
|
| 25 |
+
for n in itertools.count(2):
|
| 26 |
+
yield factor * (2 ** (n - 2))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class AsyncHTTPConnection(AsyncConnectionInterface):
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
origin: Origin,
|
| 33 |
+
ssl_context: Optional[ssl.SSLContext] = None,
|
| 34 |
+
keepalive_expiry: Optional[float] = None,
|
| 35 |
+
http1: bool = True,
|
| 36 |
+
http2: bool = False,
|
| 37 |
+
retries: int = 0,
|
| 38 |
+
local_address: Optional[str] = None,
|
| 39 |
+
uds: Optional[str] = None,
|
| 40 |
+
network_backend: Optional[AsyncNetworkBackend] = None,
|
| 41 |
+
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
| 42 |
+
) -> None:
|
| 43 |
+
self._origin = origin
|
| 44 |
+
self._ssl_context = ssl_context
|
| 45 |
+
self._keepalive_expiry = keepalive_expiry
|
| 46 |
+
self._http1 = http1
|
| 47 |
+
self._http2 = http2
|
| 48 |
+
self._retries = retries
|
| 49 |
+
self._local_address = local_address
|
| 50 |
+
self._uds = uds
|
| 51 |
+
|
| 52 |
+
self._network_backend: AsyncNetworkBackend = (
|
| 53 |
+
AutoBackend() if network_backend is None else network_backend
|
| 54 |
+
)
|
| 55 |
+
self._connection: Optional[AsyncConnectionInterface] = None
|
| 56 |
+
self._connect_failed: bool = False
|
| 57 |
+
self._request_lock = AsyncLock()
|
| 58 |
+
self._socket_options = socket_options
|
| 59 |
+
|
| 60 |
+
async def handle_async_request(self, request: Request) -> Response:
|
| 61 |
+
if not self.can_handle_request(request.url.origin):
|
| 62 |
+
raise RuntimeError(
|
| 63 |
+
f"Attempted to send request to {request.url.origin} on connection to {self._origin}"
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
async with self._request_lock:
|
| 67 |
+
if self._connection is None:
|
| 68 |
+
try:
|
| 69 |
+
stream = await self._connect(request)
|
| 70 |
+
|
| 71 |
+
ssl_object = stream.get_extra_info("ssl_object")
|
| 72 |
+
http2_negotiated = (
|
| 73 |
+
ssl_object is not None
|
| 74 |
+
and ssl_object.selected_alpn_protocol() == "h2"
|
| 75 |
+
)
|
| 76 |
+
if http2_negotiated or (self._http2 and not self._http1):
|
| 77 |
+
from .http2 import AsyncHTTP2Connection
|
| 78 |
+
|
| 79 |
+
self._connection = AsyncHTTP2Connection(
|
| 80 |
+
origin=self._origin,
|
| 81 |
+
stream=stream,
|
| 82 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 83 |
+
)
|
| 84 |
+
else:
|
| 85 |
+
self._connection = AsyncHTTP11Connection(
|
| 86 |
+
origin=self._origin,
|
| 87 |
+
stream=stream,
|
| 88 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 89 |
+
)
|
| 90 |
+
except Exception as exc:
|
| 91 |
+
self._connect_failed = True
|
| 92 |
+
raise exc
|
| 93 |
+
elif not self._connection.is_available():
|
| 94 |
+
raise ConnectionNotAvailable()
|
| 95 |
+
|
| 96 |
+
return await self._connection.handle_async_request(request)
|
| 97 |
+
|
| 98 |
+
async def _connect(self, request: Request) -> AsyncNetworkStream:
|
| 99 |
+
timeouts = request.extensions.get("timeout", {})
|
| 100 |
+
sni_hostname = request.extensions.get("sni_hostname", None)
|
| 101 |
+
timeout = timeouts.get("connect", None)
|
| 102 |
+
|
| 103 |
+
retries_left = self._retries
|
| 104 |
+
delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR)
|
| 105 |
+
|
| 106 |
+
while True:
|
| 107 |
+
try:
|
| 108 |
+
if self._uds is None:
|
| 109 |
+
kwargs = {
|
| 110 |
+
"host": self._origin.host.decode("ascii"),
|
| 111 |
+
"port": self._origin.port,
|
| 112 |
+
"local_address": self._local_address,
|
| 113 |
+
"timeout": timeout,
|
| 114 |
+
"socket_options": self._socket_options,
|
| 115 |
+
}
|
| 116 |
+
async with Trace("connect_tcp", logger, request, kwargs) as trace:
|
| 117 |
+
stream = await self._network_backend.connect_tcp(**kwargs)
|
| 118 |
+
trace.return_value = stream
|
| 119 |
+
else:
|
| 120 |
+
kwargs = {
|
| 121 |
+
"path": self._uds,
|
| 122 |
+
"timeout": timeout,
|
| 123 |
+
"socket_options": self._socket_options,
|
| 124 |
+
}
|
| 125 |
+
async with Trace(
|
| 126 |
+
"connect_unix_socket", logger, request, kwargs
|
| 127 |
+
) as trace:
|
| 128 |
+
stream = await self._network_backend.connect_unix_socket(
|
| 129 |
+
**kwargs
|
| 130 |
+
)
|
| 131 |
+
trace.return_value = stream
|
| 132 |
+
|
| 133 |
+
if self._origin.scheme == b"https":
|
| 134 |
+
ssl_context = (
|
| 135 |
+
default_ssl_context()
|
| 136 |
+
if self._ssl_context is None
|
| 137 |
+
else self._ssl_context
|
| 138 |
+
)
|
| 139 |
+
alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
|
| 140 |
+
ssl_context.set_alpn_protocols(alpn_protocols)
|
| 141 |
+
|
| 142 |
+
kwargs = {
|
| 143 |
+
"ssl_context": ssl_context,
|
| 144 |
+
"server_hostname": sni_hostname
|
| 145 |
+
or self._origin.host.decode("ascii"),
|
| 146 |
+
"timeout": timeout,
|
| 147 |
+
}
|
| 148 |
+
async with Trace("start_tls", logger, request, kwargs) as trace:
|
| 149 |
+
stream = await stream.start_tls(**kwargs)
|
| 150 |
+
trace.return_value = stream
|
| 151 |
+
return stream
|
| 152 |
+
except (ConnectError, ConnectTimeout):
|
| 153 |
+
if retries_left <= 0:
|
| 154 |
+
raise
|
| 155 |
+
retries_left -= 1
|
| 156 |
+
delay = next(delays)
|
| 157 |
+
async with Trace("retry", logger, request, kwargs) as trace:
|
| 158 |
+
await self._network_backend.sleep(delay)
|
| 159 |
+
|
| 160 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 161 |
+
return origin == self._origin
|
| 162 |
+
|
| 163 |
+
async def aclose(self) -> None:
|
| 164 |
+
if self._connection is not None:
|
| 165 |
+
async with Trace("close", logger, None, {}):
|
| 166 |
+
await self._connection.aclose()
|
| 167 |
+
|
| 168 |
+
def is_available(self) -> bool:
|
| 169 |
+
if self._connection is None:
|
| 170 |
+
# If HTTP/2 support is enabled, and the resulting connection could
|
| 171 |
+
# end up as HTTP/2 then we should indicate the connection as being
|
| 172 |
+
# available to service multiple requests.
|
| 173 |
+
return (
|
| 174 |
+
self._http2
|
| 175 |
+
and (self._origin.scheme == b"https" or not self._http1)
|
| 176 |
+
and not self._connect_failed
|
| 177 |
+
)
|
| 178 |
+
return self._connection.is_available()
|
| 179 |
+
|
| 180 |
+
def has_expired(self) -> bool:
|
| 181 |
+
if self._connection is None:
|
| 182 |
+
return self._connect_failed
|
| 183 |
+
return self._connection.has_expired()
|
| 184 |
+
|
| 185 |
+
def is_idle(self) -> bool:
|
| 186 |
+
if self._connection is None:
|
| 187 |
+
return self._connect_failed
|
| 188 |
+
return self._connection.is_idle()
|
| 189 |
+
|
| 190 |
+
def is_closed(self) -> bool:
|
| 191 |
+
if self._connection is None:
|
| 192 |
+
return self._connect_failed
|
| 193 |
+
return self._connection.is_closed()
|
| 194 |
+
|
| 195 |
+
def info(self) -> str:
|
| 196 |
+
if self._connection is None:
|
| 197 |
+
return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"
|
| 198 |
+
return self._connection.info()
|
| 199 |
+
|
| 200 |
+
def __repr__(self) -> str:
|
| 201 |
+
return f"<{self.__class__.__name__} [{self.info()}]>"
|
| 202 |
+
|
| 203 |
+
# These context managers are not used in the standard flow, but are
|
| 204 |
+
# useful for testing or working with connection instances directly.
|
| 205 |
+
|
| 206 |
+
async def __aenter__(self) -> "AsyncHTTPConnection":
|
| 207 |
+
return self
|
| 208 |
+
|
| 209 |
+
async def __aexit__(
|
| 210 |
+
self,
|
| 211 |
+
exc_type: Optional[Type[BaseException]] = None,
|
| 212 |
+
exc_value: Optional[BaseException] = None,
|
| 213 |
+
traceback: Optional[TracebackType] = None,
|
| 214 |
+
) -> None:
|
| 215 |
+
await self.aclose()
|
parrot/lib/python3.10/site-packages/httpcore/_async/connection_pool.py
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ssl
|
| 2 |
+
import sys
|
| 3 |
+
from types import TracebackType
|
| 4 |
+
from typing import AsyncIterable, AsyncIterator, Iterable, List, Optional, Type
|
| 5 |
+
|
| 6 |
+
from .._backends.auto import AutoBackend
|
| 7 |
+
from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend
|
| 8 |
+
from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol
|
| 9 |
+
from .._models import Origin, Request, Response
|
| 10 |
+
from .._synchronization import AsyncEvent, AsyncLock, AsyncShieldCancellation
|
| 11 |
+
from .connection import AsyncHTTPConnection
|
| 12 |
+
from .interfaces import AsyncConnectionInterface, AsyncRequestInterface
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class RequestStatus:
|
| 16 |
+
def __init__(self, request: Request):
|
| 17 |
+
self.request = request
|
| 18 |
+
self.connection: Optional[AsyncConnectionInterface] = None
|
| 19 |
+
self._connection_acquired = AsyncEvent()
|
| 20 |
+
|
| 21 |
+
def set_connection(self, connection: AsyncConnectionInterface) -> None:
|
| 22 |
+
assert self.connection is None
|
| 23 |
+
self.connection = connection
|
| 24 |
+
self._connection_acquired.set()
|
| 25 |
+
|
| 26 |
+
def unset_connection(self) -> None:
|
| 27 |
+
assert self.connection is not None
|
| 28 |
+
self.connection = None
|
| 29 |
+
self._connection_acquired = AsyncEvent()
|
| 30 |
+
|
| 31 |
+
async def wait_for_connection(
|
| 32 |
+
self, timeout: Optional[float] = None
|
| 33 |
+
) -> AsyncConnectionInterface:
|
| 34 |
+
if self.connection is None:
|
| 35 |
+
await self._connection_acquired.wait(timeout=timeout)
|
| 36 |
+
assert self.connection is not None
|
| 37 |
+
return self.connection
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class AsyncConnectionPool(AsyncRequestInterface):
|
| 41 |
+
"""
|
| 42 |
+
A connection pool for making HTTP requests.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(
|
| 46 |
+
self,
|
| 47 |
+
ssl_context: Optional[ssl.SSLContext] = None,
|
| 48 |
+
max_connections: Optional[int] = 10,
|
| 49 |
+
max_keepalive_connections: Optional[int] = None,
|
| 50 |
+
keepalive_expiry: Optional[float] = None,
|
| 51 |
+
http1: bool = True,
|
| 52 |
+
http2: bool = False,
|
| 53 |
+
retries: int = 0,
|
| 54 |
+
local_address: Optional[str] = None,
|
| 55 |
+
uds: Optional[str] = None,
|
| 56 |
+
network_backend: Optional[AsyncNetworkBackend] = None,
|
| 57 |
+
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
| 58 |
+
) -> None:
|
| 59 |
+
"""
|
| 60 |
+
A connection pool for making HTTP requests.
|
| 61 |
+
|
| 62 |
+
Parameters:
|
| 63 |
+
ssl_context: An SSL context to use for verifying connections.
|
| 64 |
+
If not specified, the default `httpcore.default_ssl_context()`
|
| 65 |
+
will be used.
|
| 66 |
+
max_connections: The maximum number of concurrent HTTP connections that
|
| 67 |
+
the pool should allow. Any attempt to send a request on a pool that
|
| 68 |
+
would exceed this amount will block until a connection is available.
|
| 69 |
+
max_keepalive_connections: The maximum number of idle HTTP connections
|
| 70 |
+
that will be maintained in the pool.
|
| 71 |
+
keepalive_expiry: The duration in seconds that an idle HTTP connection
|
| 72 |
+
may be maintained for before being expired from the pool.
|
| 73 |
+
http1: A boolean indicating if HTTP/1.1 requests should be supported
|
| 74 |
+
by the connection pool. Defaults to True.
|
| 75 |
+
http2: A boolean indicating if HTTP/2 requests should be supported by
|
| 76 |
+
the connection pool. Defaults to False.
|
| 77 |
+
retries: The maximum number of retries when trying to establish a
|
| 78 |
+
connection.
|
| 79 |
+
local_address: Local address to connect from. Can also be used to connect
|
| 80 |
+
using a particular address family. Using `local_address="0.0.0.0"`
|
| 81 |
+
will connect using an `AF_INET` address (IPv4), while using
|
| 82 |
+
`local_address="::"` will connect using an `AF_INET6` address (IPv6).
|
| 83 |
+
uds: Path to a Unix Domain Socket to use instead of TCP sockets.
|
| 84 |
+
network_backend: A backend instance to use for handling network I/O.
|
| 85 |
+
socket_options: Socket options that have to be included
|
| 86 |
+
in the TCP socket when the connection was established.
|
| 87 |
+
"""
|
| 88 |
+
self._ssl_context = ssl_context
|
| 89 |
+
|
| 90 |
+
self._max_connections = (
|
| 91 |
+
sys.maxsize if max_connections is None else max_connections
|
| 92 |
+
)
|
| 93 |
+
self._max_keepalive_connections = (
|
| 94 |
+
sys.maxsize
|
| 95 |
+
if max_keepalive_connections is None
|
| 96 |
+
else max_keepalive_connections
|
| 97 |
+
)
|
| 98 |
+
self._max_keepalive_connections = min(
|
| 99 |
+
self._max_connections, self._max_keepalive_connections
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
self._keepalive_expiry = keepalive_expiry
|
| 103 |
+
self._http1 = http1
|
| 104 |
+
self._http2 = http2
|
| 105 |
+
self._retries = retries
|
| 106 |
+
self._local_address = local_address
|
| 107 |
+
self._uds = uds
|
| 108 |
+
|
| 109 |
+
self._pool: List[AsyncConnectionInterface] = []
|
| 110 |
+
self._requests: List[RequestStatus] = []
|
| 111 |
+
self._pool_lock = AsyncLock()
|
| 112 |
+
self._network_backend = (
|
| 113 |
+
AutoBackend() if network_backend is None else network_backend
|
| 114 |
+
)
|
| 115 |
+
self._socket_options = socket_options
|
| 116 |
+
|
| 117 |
+
def create_connection(self, origin: Origin) -> AsyncConnectionInterface:
|
| 118 |
+
return AsyncHTTPConnection(
|
| 119 |
+
origin=origin,
|
| 120 |
+
ssl_context=self._ssl_context,
|
| 121 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 122 |
+
http1=self._http1,
|
| 123 |
+
http2=self._http2,
|
| 124 |
+
retries=self._retries,
|
| 125 |
+
local_address=self._local_address,
|
| 126 |
+
uds=self._uds,
|
| 127 |
+
network_backend=self._network_backend,
|
| 128 |
+
socket_options=self._socket_options,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
@property
|
| 132 |
+
def connections(self) -> List[AsyncConnectionInterface]:
|
| 133 |
+
"""
|
| 134 |
+
Return a list of the connections currently in the pool.
|
| 135 |
+
|
| 136 |
+
For example:
|
| 137 |
+
|
| 138 |
+
```python
|
| 139 |
+
>>> pool.connections
|
| 140 |
+
[
|
| 141 |
+
<AsyncHTTPConnection ['https://example.com:443', HTTP/1.1, ACTIVE, Request Count: 6]>,
|
| 142 |
+
<AsyncHTTPConnection ['https://example.com:443', HTTP/1.1, IDLE, Request Count: 9]> ,
|
| 143 |
+
<AsyncHTTPConnection ['http://example.com:80', HTTP/1.1, IDLE, Request Count: 1]>,
|
| 144 |
+
]
|
| 145 |
+
```
|
| 146 |
+
"""
|
| 147 |
+
return list(self._pool)
|
| 148 |
+
|
| 149 |
+
async def _attempt_to_acquire_connection(self, status: RequestStatus) -> bool:
|
| 150 |
+
"""
|
| 151 |
+
Attempt to provide a connection that can handle the given origin.
|
| 152 |
+
"""
|
| 153 |
+
origin = status.request.url.origin
|
| 154 |
+
|
| 155 |
+
# If there are queued requests in front of us, then don't acquire a
|
| 156 |
+
# connection. We handle requests strictly in order.
|
| 157 |
+
waiting = [s for s in self._requests if s.connection is None]
|
| 158 |
+
if waiting and waiting[0] is not status:
|
| 159 |
+
return False
|
| 160 |
+
|
| 161 |
+
# Reuse an existing connection if one is currently available.
|
| 162 |
+
for idx, connection in enumerate(self._pool):
|
| 163 |
+
if connection.can_handle_request(origin) and connection.is_available():
|
| 164 |
+
self._pool.pop(idx)
|
| 165 |
+
self._pool.insert(0, connection)
|
| 166 |
+
status.set_connection(connection)
|
| 167 |
+
return True
|
| 168 |
+
|
| 169 |
+
# If the pool is currently full, attempt to close one idle connection.
|
| 170 |
+
if len(self._pool) >= self._max_connections:
|
| 171 |
+
for idx, connection in reversed(list(enumerate(self._pool))):
|
| 172 |
+
if connection.is_idle():
|
| 173 |
+
await connection.aclose()
|
| 174 |
+
self._pool.pop(idx)
|
| 175 |
+
break
|
| 176 |
+
|
| 177 |
+
# If the pool is still full, then we cannot acquire a connection.
|
| 178 |
+
if len(self._pool) >= self._max_connections:
|
| 179 |
+
return False
|
| 180 |
+
|
| 181 |
+
# Otherwise create a new connection.
|
| 182 |
+
connection = self.create_connection(origin)
|
| 183 |
+
self._pool.insert(0, connection)
|
| 184 |
+
status.set_connection(connection)
|
| 185 |
+
return True
|
| 186 |
+
|
| 187 |
+
async def _close_expired_connections(self) -> None:
|
| 188 |
+
"""
|
| 189 |
+
Clean up the connection pool by closing off any connections that have expired.
|
| 190 |
+
"""
|
| 191 |
+
# Close any connections that have expired their keep-alive time.
|
| 192 |
+
for idx, connection in reversed(list(enumerate(self._pool))):
|
| 193 |
+
if connection.has_expired():
|
| 194 |
+
await connection.aclose()
|
| 195 |
+
self._pool.pop(idx)
|
| 196 |
+
|
| 197 |
+
# If the pool size exceeds the maximum number of allowed keep-alive connections,
|
| 198 |
+
# then close off idle connections as required.
|
| 199 |
+
pool_size = len(self._pool)
|
| 200 |
+
for idx, connection in reversed(list(enumerate(self._pool))):
|
| 201 |
+
if connection.is_idle() and pool_size > self._max_keepalive_connections:
|
| 202 |
+
await connection.aclose()
|
| 203 |
+
self._pool.pop(idx)
|
| 204 |
+
pool_size -= 1
|
| 205 |
+
|
| 206 |
+
async def handle_async_request(self, request: Request) -> Response:
|
| 207 |
+
"""
|
| 208 |
+
Send an HTTP request, and return an HTTP response.
|
| 209 |
+
|
| 210 |
+
This is the core implementation that is called into by `.request()` or `.stream()`.
|
| 211 |
+
"""
|
| 212 |
+
scheme = request.url.scheme.decode()
|
| 213 |
+
if scheme == "":
|
| 214 |
+
raise UnsupportedProtocol(
|
| 215 |
+
"Request URL is missing an 'http://' or 'https://' protocol."
|
| 216 |
+
)
|
| 217 |
+
if scheme not in ("http", "https", "ws", "wss"):
|
| 218 |
+
raise UnsupportedProtocol(
|
| 219 |
+
f"Request URL has an unsupported protocol '{scheme}://'."
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
status = RequestStatus(request)
|
| 223 |
+
|
| 224 |
+
async with self._pool_lock:
|
| 225 |
+
self._requests.append(status)
|
| 226 |
+
await self._close_expired_connections()
|
| 227 |
+
await self._attempt_to_acquire_connection(status)
|
| 228 |
+
|
| 229 |
+
while True:
|
| 230 |
+
timeouts = request.extensions.get("timeout", {})
|
| 231 |
+
timeout = timeouts.get("pool", None)
|
| 232 |
+
try:
|
| 233 |
+
connection = await status.wait_for_connection(timeout=timeout)
|
| 234 |
+
except BaseException as exc:
|
| 235 |
+
# If we timeout here, or if the task is cancelled, then make
|
| 236 |
+
# sure to remove the request from the queue before bubbling
|
| 237 |
+
# up the exception.
|
| 238 |
+
async with self._pool_lock:
|
| 239 |
+
# Ensure only remove when task exists.
|
| 240 |
+
if status in self._requests:
|
| 241 |
+
self._requests.remove(status)
|
| 242 |
+
raise exc
|
| 243 |
+
|
| 244 |
+
try:
|
| 245 |
+
response = await connection.handle_async_request(request)
|
| 246 |
+
except ConnectionNotAvailable:
|
| 247 |
+
# The ConnectionNotAvailable exception is a special case, that
|
| 248 |
+
# indicates we need to retry the request on a new connection.
|
| 249 |
+
#
|
| 250 |
+
# The most common case where this can occur is when multiple
|
| 251 |
+
# requests are queued waiting for a single connection, which
|
| 252 |
+
# might end up as an HTTP/2 connection, but which actually ends
|
| 253 |
+
# up as HTTP/1.1.
|
| 254 |
+
async with self._pool_lock:
|
| 255 |
+
# Maintain our position in the request queue, but reset the
|
| 256 |
+
# status so that the request becomes queued again.
|
| 257 |
+
status.unset_connection()
|
| 258 |
+
await self._attempt_to_acquire_connection(status)
|
| 259 |
+
except BaseException as exc:
|
| 260 |
+
with AsyncShieldCancellation():
|
| 261 |
+
await self.response_closed(status)
|
| 262 |
+
raise exc
|
| 263 |
+
else:
|
| 264 |
+
break
|
| 265 |
+
|
| 266 |
+
# When we return the response, we wrap the stream in a special class
|
| 267 |
+
# that handles notifying the connection pool once the response
|
| 268 |
+
# has been released.
|
| 269 |
+
assert isinstance(response.stream, AsyncIterable)
|
| 270 |
+
return Response(
|
| 271 |
+
status=response.status,
|
| 272 |
+
headers=response.headers,
|
| 273 |
+
content=ConnectionPoolByteStream(response.stream, self, status),
|
| 274 |
+
extensions=response.extensions,
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
async def response_closed(self, status: RequestStatus) -> None:
|
| 278 |
+
"""
|
| 279 |
+
This method acts as a callback once the request/response cycle is complete.
|
| 280 |
+
|
| 281 |
+
It is called into from the `ConnectionPoolByteStream.aclose()` method.
|
| 282 |
+
"""
|
| 283 |
+
assert status.connection is not None
|
| 284 |
+
connection = status.connection
|
| 285 |
+
|
| 286 |
+
async with self._pool_lock:
|
| 287 |
+
# Update the state of the connection pool.
|
| 288 |
+
if status in self._requests:
|
| 289 |
+
self._requests.remove(status)
|
| 290 |
+
|
| 291 |
+
if connection.is_closed() and connection in self._pool:
|
| 292 |
+
self._pool.remove(connection)
|
| 293 |
+
|
| 294 |
+
# Since we've had a response closed, it's possible we'll now be able
|
| 295 |
+
# to service one or more requests that are currently pending.
|
| 296 |
+
for status in self._requests:
|
| 297 |
+
if status.connection is None:
|
| 298 |
+
acquired = await self._attempt_to_acquire_connection(status)
|
| 299 |
+
# If we could not acquire a connection for a queued request
|
| 300 |
+
# then we don't need to check anymore requests that are
|
| 301 |
+
# queued later behind it.
|
| 302 |
+
if not acquired:
|
| 303 |
+
break
|
| 304 |
+
|
| 305 |
+
# Housekeeping.
|
| 306 |
+
await self._close_expired_connections()
|
| 307 |
+
|
| 308 |
+
async def aclose(self) -> None:
|
| 309 |
+
"""
|
| 310 |
+
Close any connections in the pool.
|
| 311 |
+
"""
|
| 312 |
+
async with self._pool_lock:
|
| 313 |
+
for connection in self._pool:
|
| 314 |
+
await connection.aclose()
|
| 315 |
+
self._pool = []
|
| 316 |
+
self._requests = []
|
| 317 |
+
|
| 318 |
+
async def __aenter__(self) -> "AsyncConnectionPool":
|
| 319 |
+
return self
|
| 320 |
+
|
| 321 |
+
async def __aexit__(
|
| 322 |
+
self,
|
| 323 |
+
exc_type: Optional[Type[BaseException]] = None,
|
| 324 |
+
exc_value: Optional[BaseException] = None,
|
| 325 |
+
traceback: Optional[TracebackType] = None,
|
| 326 |
+
) -> None:
|
| 327 |
+
await self.aclose()
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
class ConnectionPoolByteStream:
|
| 331 |
+
"""
|
| 332 |
+
A wrapper around the response byte stream, that additionally handles
|
| 333 |
+
notifying the connection pool when the response has been closed.
|
| 334 |
+
"""
|
| 335 |
+
|
| 336 |
+
def __init__(
|
| 337 |
+
self,
|
| 338 |
+
stream: AsyncIterable[bytes],
|
| 339 |
+
pool: AsyncConnectionPool,
|
| 340 |
+
status: RequestStatus,
|
| 341 |
+
) -> None:
|
| 342 |
+
self._stream = stream
|
| 343 |
+
self._pool = pool
|
| 344 |
+
self._status = status
|
| 345 |
+
|
| 346 |
+
async def __aiter__(self) -> AsyncIterator[bytes]:
|
| 347 |
+
async for part in self._stream:
|
| 348 |
+
yield part
|
| 349 |
+
|
| 350 |
+
async def aclose(self) -> None:
|
| 351 |
+
try:
|
| 352 |
+
if hasattr(self._stream, "aclose"):
|
| 353 |
+
await self._stream.aclose()
|
| 354 |
+
finally:
|
| 355 |
+
with AsyncShieldCancellation():
|
| 356 |
+
await self._pool.response_closed(self._status)
|
parrot/lib/python3.10/site-packages/httpcore/_async/http11.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
import logging
|
| 3 |
+
import time
|
| 4 |
+
from types import TracebackType
|
| 5 |
+
from typing import (
|
| 6 |
+
AsyncIterable,
|
| 7 |
+
AsyncIterator,
|
| 8 |
+
List,
|
| 9 |
+
Optional,
|
| 10 |
+
Tuple,
|
| 11 |
+
Type,
|
| 12 |
+
Union,
|
| 13 |
+
cast,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
import h11
|
| 17 |
+
|
| 18 |
+
from .._backends.base import AsyncNetworkStream
|
| 19 |
+
from .._exceptions import (
|
| 20 |
+
ConnectionNotAvailable,
|
| 21 |
+
LocalProtocolError,
|
| 22 |
+
RemoteProtocolError,
|
| 23 |
+
map_exceptions,
|
| 24 |
+
)
|
| 25 |
+
from .._models import Origin, Request, Response
|
| 26 |
+
from .._synchronization import AsyncLock, AsyncShieldCancellation
|
| 27 |
+
from .._trace import Trace
|
| 28 |
+
from .interfaces import AsyncConnectionInterface
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger("httpcore.http11")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# A subset of `h11.Event` types supported by `_send_event`
|
| 34 |
+
H11SendEvent = Union[
|
| 35 |
+
h11.Request,
|
| 36 |
+
h11.Data,
|
| 37 |
+
h11.EndOfMessage,
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class HTTPConnectionState(enum.IntEnum):
|
| 42 |
+
NEW = 0
|
| 43 |
+
ACTIVE = 1
|
| 44 |
+
IDLE = 2
|
| 45 |
+
CLOSED = 3
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class AsyncHTTP11Connection(AsyncConnectionInterface):
|
| 49 |
+
READ_NUM_BYTES = 64 * 1024
|
| 50 |
+
MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
origin: Origin,
|
| 55 |
+
stream: AsyncNetworkStream,
|
| 56 |
+
keepalive_expiry: Optional[float] = None,
|
| 57 |
+
) -> None:
|
| 58 |
+
self._origin = origin
|
| 59 |
+
self._network_stream = stream
|
| 60 |
+
self._keepalive_expiry: Optional[float] = keepalive_expiry
|
| 61 |
+
self._expire_at: Optional[float] = None
|
| 62 |
+
self._state = HTTPConnectionState.NEW
|
| 63 |
+
self._state_lock = AsyncLock()
|
| 64 |
+
self._request_count = 0
|
| 65 |
+
self._h11_state = h11.Connection(
|
| 66 |
+
our_role=h11.CLIENT,
|
| 67 |
+
max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
async def handle_async_request(self, request: Request) -> Response:
|
| 71 |
+
if not self.can_handle_request(request.url.origin):
|
| 72 |
+
raise RuntimeError(
|
| 73 |
+
f"Attempted to send request to {request.url.origin} on connection "
|
| 74 |
+
f"to {self._origin}"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
async with self._state_lock:
|
| 78 |
+
if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE):
|
| 79 |
+
self._request_count += 1
|
| 80 |
+
self._state = HTTPConnectionState.ACTIVE
|
| 81 |
+
self._expire_at = None
|
| 82 |
+
else:
|
| 83 |
+
raise ConnectionNotAvailable()
|
| 84 |
+
|
| 85 |
+
try:
|
| 86 |
+
kwargs = {"request": request}
|
| 87 |
+
async with Trace("send_request_headers", logger, request, kwargs) as trace:
|
| 88 |
+
await self._send_request_headers(**kwargs)
|
| 89 |
+
async with Trace("send_request_body", logger, request, kwargs) as trace:
|
| 90 |
+
await self._send_request_body(**kwargs)
|
| 91 |
+
async with Trace(
|
| 92 |
+
"receive_response_headers", logger, request, kwargs
|
| 93 |
+
) as trace:
|
| 94 |
+
(
|
| 95 |
+
http_version,
|
| 96 |
+
status,
|
| 97 |
+
reason_phrase,
|
| 98 |
+
headers,
|
| 99 |
+
) = await self._receive_response_headers(**kwargs)
|
| 100 |
+
trace.return_value = (
|
| 101 |
+
http_version,
|
| 102 |
+
status,
|
| 103 |
+
reason_phrase,
|
| 104 |
+
headers,
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
return Response(
|
| 108 |
+
status=status,
|
| 109 |
+
headers=headers,
|
| 110 |
+
content=HTTP11ConnectionByteStream(self, request),
|
| 111 |
+
extensions={
|
| 112 |
+
"http_version": http_version,
|
| 113 |
+
"reason_phrase": reason_phrase,
|
| 114 |
+
"network_stream": self._network_stream,
|
| 115 |
+
},
|
| 116 |
+
)
|
| 117 |
+
except BaseException as exc:
|
| 118 |
+
with AsyncShieldCancellation():
|
| 119 |
+
async with Trace("response_closed", logger, request) as trace:
|
| 120 |
+
await self._response_closed()
|
| 121 |
+
raise exc
|
| 122 |
+
|
| 123 |
+
# Sending the request...
|
| 124 |
+
|
| 125 |
+
async def _send_request_headers(self, request: Request) -> None:
|
| 126 |
+
timeouts = request.extensions.get("timeout", {})
|
| 127 |
+
timeout = timeouts.get("write", None)
|
| 128 |
+
|
| 129 |
+
with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):
|
| 130 |
+
event = h11.Request(
|
| 131 |
+
method=request.method,
|
| 132 |
+
target=request.url.target,
|
| 133 |
+
headers=request.headers,
|
| 134 |
+
)
|
| 135 |
+
await self._send_event(event, timeout=timeout)
|
| 136 |
+
|
| 137 |
+
async def _send_request_body(self, request: Request) -> None:
|
| 138 |
+
timeouts = request.extensions.get("timeout", {})
|
| 139 |
+
timeout = timeouts.get("write", None)
|
| 140 |
+
|
| 141 |
+
assert isinstance(request.stream, AsyncIterable)
|
| 142 |
+
async for chunk in request.stream:
|
| 143 |
+
event = h11.Data(data=chunk)
|
| 144 |
+
await self._send_event(event, timeout=timeout)
|
| 145 |
+
|
| 146 |
+
await self._send_event(h11.EndOfMessage(), timeout=timeout)
|
| 147 |
+
|
| 148 |
+
async def _send_event(
|
| 149 |
+
self, event: h11.Event, timeout: Optional[float] = None
|
| 150 |
+
) -> None:
|
| 151 |
+
bytes_to_send = self._h11_state.send(event)
|
| 152 |
+
if bytes_to_send is not None:
|
| 153 |
+
await self._network_stream.write(bytes_to_send, timeout=timeout)
|
| 154 |
+
|
| 155 |
+
# Receiving the response...
|
| 156 |
+
|
| 157 |
+
async def _receive_response_headers(
|
| 158 |
+
self, request: Request
|
| 159 |
+
) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]:
|
| 160 |
+
timeouts = request.extensions.get("timeout", {})
|
| 161 |
+
timeout = timeouts.get("read", None)
|
| 162 |
+
|
| 163 |
+
while True:
|
| 164 |
+
event = await self._receive_event(timeout=timeout)
|
| 165 |
+
if isinstance(event, h11.Response):
|
| 166 |
+
break
|
| 167 |
+
if (
|
| 168 |
+
isinstance(event, h11.InformationalResponse)
|
| 169 |
+
and event.status_code == 101
|
| 170 |
+
):
|
| 171 |
+
break
|
| 172 |
+
|
| 173 |
+
http_version = b"HTTP/" + event.http_version
|
| 174 |
+
|
| 175 |
+
# h11 version 0.11+ supports a `raw_items` interface to get the
|
| 176 |
+
# raw header casing, rather than the enforced lowercase headers.
|
| 177 |
+
headers = event.headers.raw_items()
|
| 178 |
+
|
| 179 |
+
return http_version, event.status_code, event.reason, headers
|
| 180 |
+
|
| 181 |
+
async def _receive_response_body(self, request: Request) -> AsyncIterator[bytes]:
|
| 182 |
+
timeouts = request.extensions.get("timeout", {})
|
| 183 |
+
timeout = timeouts.get("read", None)
|
| 184 |
+
|
| 185 |
+
while True:
|
| 186 |
+
event = await self._receive_event(timeout=timeout)
|
| 187 |
+
if isinstance(event, h11.Data):
|
| 188 |
+
yield bytes(event.data)
|
| 189 |
+
elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):
|
| 190 |
+
break
|
| 191 |
+
|
| 192 |
+
async def _receive_event(
|
| 193 |
+
self, timeout: Optional[float] = None
|
| 194 |
+
) -> Union[h11.Event, Type[h11.PAUSED]]:
|
| 195 |
+
while True:
|
| 196 |
+
with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):
|
| 197 |
+
event = self._h11_state.next_event()
|
| 198 |
+
|
| 199 |
+
if event is h11.NEED_DATA:
|
| 200 |
+
data = await self._network_stream.read(
|
| 201 |
+
self.READ_NUM_BYTES, timeout=timeout
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
# If we feed this case through h11 we'll raise an exception like:
|
| 205 |
+
#
|
| 206 |
+
# httpcore.RemoteProtocolError: can't handle event type
|
| 207 |
+
# ConnectionClosed when role=SERVER and state=SEND_RESPONSE
|
| 208 |
+
#
|
| 209 |
+
# Which is accurate, but not very informative from an end-user
|
| 210 |
+
# perspective. Instead we handle this case distinctly and treat
|
| 211 |
+
# it as a ConnectError.
|
| 212 |
+
if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE:
|
| 213 |
+
msg = "Server disconnected without sending a response."
|
| 214 |
+
raise RemoteProtocolError(msg)
|
| 215 |
+
|
| 216 |
+
self._h11_state.receive_data(data)
|
| 217 |
+
else:
|
| 218 |
+
# mypy fails to narrow the type in the above if statement above
|
| 219 |
+
return cast(Union[h11.Event, Type[h11.PAUSED]], event)
|
| 220 |
+
|
| 221 |
+
async def _response_closed(self) -> None:
|
| 222 |
+
async with self._state_lock:
|
| 223 |
+
if (
|
| 224 |
+
self._h11_state.our_state is h11.DONE
|
| 225 |
+
and self._h11_state.their_state is h11.DONE
|
| 226 |
+
):
|
| 227 |
+
self._state = HTTPConnectionState.IDLE
|
| 228 |
+
self._h11_state.start_next_cycle()
|
| 229 |
+
if self._keepalive_expiry is not None:
|
| 230 |
+
now = time.monotonic()
|
| 231 |
+
self._expire_at = now + self._keepalive_expiry
|
| 232 |
+
else:
|
| 233 |
+
await self.aclose()
|
| 234 |
+
|
| 235 |
+
# Once the connection is no longer required...
|
| 236 |
+
|
| 237 |
+
async def aclose(self) -> None:
|
| 238 |
+
# Note that this method unilaterally closes the connection, and does
|
| 239 |
+
# not have any kind of locking in place around it.
|
| 240 |
+
self._state = HTTPConnectionState.CLOSED
|
| 241 |
+
await self._network_stream.aclose()
|
| 242 |
+
|
| 243 |
+
# The AsyncConnectionInterface methods provide information about the state of
|
| 244 |
+
# the connection, allowing for a connection pooling implementation to
|
| 245 |
+
# determine when to reuse and when to close the connection...
|
| 246 |
+
|
| 247 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 248 |
+
return origin == self._origin
|
| 249 |
+
|
| 250 |
+
def is_available(self) -> bool:
|
| 251 |
+
# Note that HTTP/1.1 connections in the "NEW" state are not treated as
|
| 252 |
+
# being "available". The control flow which created the connection will
|
| 253 |
+
# be able to send an outgoing request, but the connection will not be
|
| 254 |
+
# acquired from the connection pool for any other request.
|
| 255 |
+
return self._state == HTTPConnectionState.IDLE
|
| 256 |
+
|
| 257 |
+
def has_expired(self) -> bool:
|
| 258 |
+
now = time.monotonic()
|
| 259 |
+
keepalive_expired = self._expire_at is not None and now > self._expire_at
|
| 260 |
+
|
| 261 |
+
# If the HTTP connection is idle but the socket is readable, then the
|
| 262 |
+
# only valid state is that the socket is about to return b"", indicating
|
| 263 |
+
# a server-initiated disconnect.
|
| 264 |
+
server_disconnected = (
|
| 265 |
+
self._state == HTTPConnectionState.IDLE
|
| 266 |
+
and self._network_stream.get_extra_info("is_readable")
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
return keepalive_expired or server_disconnected
|
| 270 |
+
|
| 271 |
+
def is_idle(self) -> bool:
|
| 272 |
+
return self._state == HTTPConnectionState.IDLE
|
| 273 |
+
|
| 274 |
+
def is_closed(self) -> bool:
|
| 275 |
+
return self._state == HTTPConnectionState.CLOSED
|
| 276 |
+
|
| 277 |
+
def info(self) -> str:
|
| 278 |
+
origin = str(self._origin)
|
| 279 |
+
return (
|
| 280 |
+
f"{origin!r}, HTTP/1.1, {self._state.name}, "
|
| 281 |
+
f"Request Count: {self._request_count}"
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
def __repr__(self) -> str:
|
| 285 |
+
class_name = self.__class__.__name__
|
| 286 |
+
origin = str(self._origin)
|
| 287 |
+
return (
|
| 288 |
+
f"<{class_name} [{origin!r}, {self._state.name}, "
|
| 289 |
+
f"Request Count: {self._request_count}]>"
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
# These context managers are not used in the standard flow, but are
|
| 293 |
+
# useful for testing or working with connection instances directly.
|
| 294 |
+
|
| 295 |
+
async def __aenter__(self) -> "AsyncHTTP11Connection":
|
| 296 |
+
return self
|
| 297 |
+
|
| 298 |
+
async def __aexit__(
|
| 299 |
+
self,
|
| 300 |
+
exc_type: Optional[Type[BaseException]] = None,
|
| 301 |
+
exc_value: Optional[BaseException] = None,
|
| 302 |
+
traceback: Optional[TracebackType] = None,
|
| 303 |
+
) -> None:
|
| 304 |
+
await self.aclose()
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
class HTTP11ConnectionByteStream:
|
| 308 |
+
def __init__(self, connection: AsyncHTTP11Connection, request: Request) -> None:
|
| 309 |
+
self._connection = connection
|
| 310 |
+
self._request = request
|
| 311 |
+
self._closed = False
|
| 312 |
+
|
| 313 |
+
async def __aiter__(self) -> AsyncIterator[bytes]:
|
| 314 |
+
kwargs = {"request": self._request}
|
| 315 |
+
try:
|
| 316 |
+
async with Trace("receive_response_body", logger, self._request, kwargs):
|
| 317 |
+
async for chunk in self._connection._receive_response_body(**kwargs):
|
| 318 |
+
yield chunk
|
| 319 |
+
except BaseException as exc:
|
| 320 |
+
# If we get an exception while streaming the response,
|
| 321 |
+
# we want to close the response (and possibly the connection)
|
| 322 |
+
# before raising that exception.
|
| 323 |
+
with AsyncShieldCancellation():
|
| 324 |
+
await self.aclose()
|
| 325 |
+
raise exc
|
| 326 |
+
|
| 327 |
+
async def aclose(self) -> None:
|
| 328 |
+
if not self._closed:
|
| 329 |
+
self._closed = True
|
| 330 |
+
async with Trace("response_closed", logger, self._request):
|
| 331 |
+
await self._connection._response_closed()
|
parrot/lib/python3.10/site-packages/httpcore/_async/http2.py
ADDED
|
@@ -0,0 +1,589 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
import logging
|
| 3 |
+
import time
|
| 4 |
+
import types
|
| 5 |
+
import typing
|
| 6 |
+
|
| 7 |
+
import h2.config
|
| 8 |
+
import h2.connection
|
| 9 |
+
import h2.events
|
| 10 |
+
import h2.exceptions
|
| 11 |
+
import h2.settings
|
| 12 |
+
|
| 13 |
+
from .._backends.base import AsyncNetworkStream
|
| 14 |
+
from .._exceptions import (
|
| 15 |
+
ConnectionNotAvailable,
|
| 16 |
+
LocalProtocolError,
|
| 17 |
+
RemoteProtocolError,
|
| 18 |
+
)
|
| 19 |
+
from .._models import Origin, Request, Response
|
| 20 |
+
from .._synchronization import AsyncLock, AsyncSemaphore, AsyncShieldCancellation
|
| 21 |
+
from .._trace import Trace
|
| 22 |
+
from .interfaces import AsyncConnectionInterface
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger("httpcore.http2")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def has_body_headers(request: Request) -> bool:
|
| 28 |
+
return any(
|
| 29 |
+
k.lower() == b"content-length" or k.lower() == b"transfer-encoding"
|
| 30 |
+
for k, v in request.headers
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class HTTPConnectionState(enum.IntEnum):
|
| 35 |
+
ACTIVE = 1
|
| 36 |
+
IDLE = 2
|
| 37 |
+
CLOSED = 3
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class AsyncHTTP2Connection(AsyncConnectionInterface):
|
| 41 |
+
READ_NUM_BYTES = 64 * 1024
|
| 42 |
+
CONFIG = h2.config.H2Configuration(validate_inbound_headers=False)
|
| 43 |
+
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
origin: Origin,
|
| 47 |
+
stream: AsyncNetworkStream,
|
| 48 |
+
keepalive_expiry: typing.Optional[float] = None,
|
| 49 |
+
):
|
| 50 |
+
self._origin = origin
|
| 51 |
+
self._network_stream = stream
|
| 52 |
+
self._keepalive_expiry: typing.Optional[float] = keepalive_expiry
|
| 53 |
+
self._h2_state = h2.connection.H2Connection(config=self.CONFIG)
|
| 54 |
+
self._state = HTTPConnectionState.IDLE
|
| 55 |
+
self._expire_at: typing.Optional[float] = None
|
| 56 |
+
self._request_count = 0
|
| 57 |
+
self._init_lock = AsyncLock()
|
| 58 |
+
self._state_lock = AsyncLock()
|
| 59 |
+
self._read_lock = AsyncLock()
|
| 60 |
+
self._write_lock = AsyncLock()
|
| 61 |
+
self._sent_connection_init = False
|
| 62 |
+
self._used_all_stream_ids = False
|
| 63 |
+
self._connection_error = False
|
| 64 |
+
|
| 65 |
+
# Mapping from stream ID to response stream events.
|
| 66 |
+
self._events: typing.Dict[
|
| 67 |
+
int,
|
| 68 |
+
typing.Union[
|
| 69 |
+
h2.events.ResponseReceived,
|
| 70 |
+
h2.events.DataReceived,
|
| 71 |
+
h2.events.StreamEnded,
|
| 72 |
+
h2.events.StreamReset,
|
| 73 |
+
],
|
| 74 |
+
] = {}
|
| 75 |
+
|
| 76 |
+
# Connection terminated events are stored as state since
|
| 77 |
+
# we need to handle them for all streams.
|
| 78 |
+
self._connection_terminated: typing.Optional[
|
| 79 |
+
h2.events.ConnectionTerminated
|
| 80 |
+
] = None
|
| 81 |
+
|
| 82 |
+
self._read_exception: typing.Optional[Exception] = None
|
| 83 |
+
self._write_exception: typing.Optional[Exception] = None
|
| 84 |
+
|
| 85 |
+
async def handle_async_request(self, request: Request) -> Response:
|
| 86 |
+
if not self.can_handle_request(request.url.origin):
|
| 87 |
+
# This cannot occur in normal operation, since the connection pool
|
| 88 |
+
# will only send requests on connections that handle them.
|
| 89 |
+
# It's in place simply for resilience as a guard against incorrect
|
| 90 |
+
# usage, for anyone working directly with httpcore connections.
|
| 91 |
+
raise RuntimeError(
|
| 92 |
+
f"Attempted to send request to {request.url.origin} on connection "
|
| 93 |
+
f"to {self._origin}"
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
async with self._state_lock:
|
| 97 |
+
if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE):
|
| 98 |
+
self._request_count += 1
|
| 99 |
+
self._expire_at = None
|
| 100 |
+
self._state = HTTPConnectionState.ACTIVE
|
| 101 |
+
else:
|
| 102 |
+
raise ConnectionNotAvailable()
|
| 103 |
+
|
| 104 |
+
async with self._init_lock:
|
| 105 |
+
if not self._sent_connection_init:
|
| 106 |
+
try:
|
| 107 |
+
kwargs = {"request": request}
|
| 108 |
+
async with Trace("send_connection_init", logger, request, kwargs):
|
| 109 |
+
await self._send_connection_init(**kwargs)
|
| 110 |
+
except BaseException as exc:
|
| 111 |
+
with AsyncShieldCancellation():
|
| 112 |
+
await self.aclose()
|
| 113 |
+
raise exc
|
| 114 |
+
|
| 115 |
+
self._sent_connection_init = True
|
| 116 |
+
|
| 117 |
+
# Initially start with just 1 until the remote server provides
|
| 118 |
+
# its max_concurrent_streams value
|
| 119 |
+
self._max_streams = 1
|
| 120 |
+
|
| 121 |
+
local_settings_max_streams = (
|
| 122 |
+
self._h2_state.local_settings.max_concurrent_streams
|
| 123 |
+
)
|
| 124 |
+
self._max_streams_semaphore = AsyncSemaphore(local_settings_max_streams)
|
| 125 |
+
|
| 126 |
+
for _ in range(local_settings_max_streams - self._max_streams):
|
| 127 |
+
await self._max_streams_semaphore.acquire()
|
| 128 |
+
|
| 129 |
+
await self._max_streams_semaphore.acquire()
|
| 130 |
+
|
| 131 |
+
try:
|
| 132 |
+
stream_id = self._h2_state.get_next_available_stream_id()
|
| 133 |
+
self._events[stream_id] = []
|
| 134 |
+
except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover
|
| 135 |
+
self._used_all_stream_ids = True
|
| 136 |
+
self._request_count -= 1
|
| 137 |
+
raise ConnectionNotAvailable()
|
| 138 |
+
|
| 139 |
+
try:
|
| 140 |
+
kwargs = {"request": request, "stream_id": stream_id}
|
| 141 |
+
async with Trace("send_request_headers", logger, request, kwargs):
|
| 142 |
+
await self._send_request_headers(request=request, stream_id=stream_id)
|
| 143 |
+
async with Trace("send_request_body", logger, request, kwargs):
|
| 144 |
+
await self._send_request_body(request=request, stream_id=stream_id)
|
| 145 |
+
async with Trace(
|
| 146 |
+
"receive_response_headers", logger, request, kwargs
|
| 147 |
+
) as trace:
|
| 148 |
+
status, headers = await self._receive_response(
|
| 149 |
+
request=request, stream_id=stream_id
|
| 150 |
+
)
|
| 151 |
+
trace.return_value = (status, headers)
|
| 152 |
+
|
| 153 |
+
return Response(
|
| 154 |
+
status=status,
|
| 155 |
+
headers=headers,
|
| 156 |
+
content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id),
|
| 157 |
+
extensions={
|
| 158 |
+
"http_version": b"HTTP/2",
|
| 159 |
+
"network_stream": self._network_stream,
|
| 160 |
+
"stream_id": stream_id,
|
| 161 |
+
},
|
| 162 |
+
)
|
| 163 |
+
except BaseException as exc: # noqa: PIE786
|
| 164 |
+
with AsyncShieldCancellation():
|
| 165 |
+
kwargs = {"stream_id": stream_id}
|
| 166 |
+
async with Trace("response_closed", logger, request, kwargs):
|
| 167 |
+
await self._response_closed(stream_id=stream_id)
|
| 168 |
+
|
| 169 |
+
if isinstance(exc, h2.exceptions.ProtocolError):
|
| 170 |
+
# One case where h2 can raise a protocol error is when a
|
| 171 |
+
# closed frame has been seen by the state machine.
|
| 172 |
+
#
|
| 173 |
+
# This happens when one stream is reading, and encounters
|
| 174 |
+
# a GOAWAY event. Other flows of control may then raise
|
| 175 |
+
# a protocol error at any point they interact with the 'h2_state'.
|
| 176 |
+
#
|
| 177 |
+
# In this case we'll have stored the event, and should raise
|
| 178 |
+
# it as a RemoteProtocolError.
|
| 179 |
+
if self._connection_terminated: # pragma: nocover
|
| 180 |
+
raise RemoteProtocolError(self._connection_terminated)
|
| 181 |
+
# If h2 raises a protocol error in some other state then we
|
| 182 |
+
# must somehow have made a protocol violation.
|
| 183 |
+
raise LocalProtocolError(exc) # pragma: nocover
|
| 184 |
+
|
| 185 |
+
raise exc
|
| 186 |
+
|
| 187 |
+
async def _send_connection_init(self, request: Request) -> None:
|
| 188 |
+
"""
|
| 189 |
+
The HTTP/2 connection requires some initial setup before we can start
|
| 190 |
+
using individual request/response streams on it.
|
| 191 |
+
"""
|
| 192 |
+
# Need to set these manually here instead of manipulating via
|
| 193 |
+
# __setitem__() otherwise the H2Connection will emit SettingsUpdate
|
| 194 |
+
# frames in addition to sending the undesired defaults.
|
| 195 |
+
self._h2_state.local_settings = h2.settings.Settings(
|
| 196 |
+
client=True,
|
| 197 |
+
initial_values={
|
| 198 |
+
# Disable PUSH_PROMISE frames from the server since we don't do anything
|
| 199 |
+
# with them for now. Maybe when we support caching?
|
| 200 |
+
h2.settings.SettingCodes.ENABLE_PUSH: 0,
|
| 201 |
+
# These two are taken from h2 for safe defaults
|
| 202 |
+
h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,
|
| 203 |
+
h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536,
|
| 204 |
+
},
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
# Some websites (*cough* Yahoo *cough*) balk at this setting being
|
| 208 |
+
# present in the initial handshake since it's not defined in the original
|
| 209 |
+
# RFC despite the RFC mandating ignoring settings you don't know about.
|
| 210 |
+
del self._h2_state.local_settings[
|
| 211 |
+
h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL
|
| 212 |
+
]
|
| 213 |
+
|
| 214 |
+
self._h2_state.initiate_connection()
|
| 215 |
+
self._h2_state.increment_flow_control_window(2**24)
|
| 216 |
+
await self._write_outgoing_data(request)
|
| 217 |
+
|
| 218 |
+
# Sending the request...
|
| 219 |
+
|
| 220 |
+
async def _send_request_headers(self, request: Request, stream_id: int) -> None:
|
| 221 |
+
"""
|
| 222 |
+
Send the request headers to a given stream ID.
|
| 223 |
+
"""
|
| 224 |
+
end_stream = not has_body_headers(request)
|
| 225 |
+
|
| 226 |
+
# In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'.
|
| 227 |
+
# In order to gracefully handle HTTP/1.1 and HTTP/2 we always require
|
| 228 |
+
# HTTP/1.1 style headers, and map them appropriately if we end up on
|
| 229 |
+
# an HTTP/2 connection.
|
| 230 |
+
authority = [v for k, v in request.headers if k.lower() == b"host"][0]
|
| 231 |
+
|
| 232 |
+
headers = [
|
| 233 |
+
(b":method", request.method),
|
| 234 |
+
(b":authority", authority),
|
| 235 |
+
(b":scheme", request.url.scheme),
|
| 236 |
+
(b":path", request.url.target),
|
| 237 |
+
] + [
|
| 238 |
+
(k.lower(), v)
|
| 239 |
+
for k, v in request.headers
|
| 240 |
+
if k.lower()
|
| 241 |
+
not in (
|
| 242 |
+
b"host",
|
| 243 |
+
b"transfer-encoding",
|
| 244 |
+
)
|
| 245 |
+
]
|
| 246 |
+
|
| 247 |
+
self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)
|
| 248 |
+
self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id)
|
| 249 |
+
await self._write_outgoing_data(request)
|
| 250 |
+
|
| 251 |
+
async def _send_request_body(self, request: Request, stream_id: int) -> None:
|
| 252 |
+
"""
|
| 253 |
+
Iterate over the request body sending it to a given stream ID.
|
| 254 |
+
"""
|
| 255 |
+
if not has_body_headers(request):
|
| 256 |
+
return
|
| 257 |
+
|
| 258 |
+
assert isinstance(request.stream, typing.AsyncIterable)
|
| 259 |
+
async for data in request.stream:
|
| 260 |
+
await self._send_stream_data(request, stream_id, data)
|
| 261 |
+
await self._send_end_stream(request, stream_id)
|
| 262 |
+
|
| 263 |
+
async def _send_stream_data(
|
| 264 |
+
self, request: Request, stream_id: int, data: bytes
|
| 265 |
+
) -> None:
|
| 266 |
+
"""
|
| 267 |
+
Send a single chunk of data in one or more data frames.
|
| 268 |
+
"""
|
| 269 |
+
while data:
|
| 270 |
+
max_flow = await self._wait_for_outgoing_flow(request, stream_id)
|
| 271 |
+
chunk_size = min(len(data), max_flow)
|
| 272 |
+
chunk, data = data[:chunk_size], data[chunk_size:]
|
| 273 |
+
self._h2_state.send_data(stream_id, chunk)
|
| 274 |
+
await self._write_outgoing_data(request)
|
| 275 |
+
|
| 276 |
+
async def _send_end_stream(self, request: Request, stream_id: int) -> None:
|
| 277 |
+
"""
|
| 278 |
+
Send an empty data frame on on a given stream ID with the END_STREAM flag set.
|
| 279 |
+
"""
|
| 280 |
+
self._h2_state.end_stream(stream_id)
|
| 281 |
+
await self._write_outgoing_data(request)
|
| 282 |
+
|
| 283 |
+
# Receiving the response...
|
| 284 |
+
|
| 285 |
+
async def _receive_response(
|
| 286 |
+
self, request: Request, stream_id: int
|
| 287 |
+
) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]:
|
| 288 |
+
"""
|
| 289 |
+
Return the response status code and headers for a given stream ID.
|
| 290 |
+
"""
|
| 291 |
+
while True:
|
| 292 |
+
event = await self._receive_stream_event(request, stream_id)
|
| 293 |
+
if isinstance(event, h2.events.ResponseReceived):
|
| 294 |
+
break
|
| 295 |
+
|
| 296 |
+
status_code = 200
|
| 297 |
+
headers = []
|
| 298 |
+
for k, v in event.headers:
|
| 299 |
+
if k == b":status":
|
| 300 |
+
status_code = int(v.decode("ascii", errors="ignore"))
|
| 301 |
+
elif not k.startswith(b":"):
|
| 302 |
+
headers.append((k, v))
|
| 303 |
+
|
| 304 |
+
return (status_code, headers)
|
| 305 |
+
|
| 306 |
+
async def _receive_response_body(
|
| 307 |
+
self, request: Request, stream_id: int
|
| 308 |
+
) -> typing.AsyncIterator[bytes]:
|
| 309 |
+
"""
|
| 310 |
+
Iterator that returns the bytes of the response body for a given stream ID.
|
| 311 |
+
"""
|
| 312 |
+
while True:
|
| 313 |
+
event = await self._receive_stream_event(request, stream_id)
|
| 314 |
+
if isinstance(event, h2.events.DataReceived):
|
| 315 |
+
amount = event.flow_controlled_length
|
| 316 |
+
self._h2_state.acknowledge_received_data(amount, stream_id)
|
| 317 |
+
await self._write_outgoing_data(request)
|
| 318 |
+
yield event.data
|
| 319 |
+
elif isinstance(event, h2.events.StreamEnded):
|
| 320 |
+
break
|
| 321 |
+
|
| 322 |
+
async def _receive_stream_event(
|
| 323 |
+
self, request: Request, stream_id: int
|
| 324 |
+
) -> typing.Union[
|
| 325 |
+
h2.events.ResponseReceived, h2.events.DataReceived, h2.events.StreamEnded
|
| 326 |
+
]:
|
| 327 |
+
"""
|
| 328 |
+
Return the next available event for a given stream ID.
|
| 329 |
+
|
| 330 |
+
Will read more data from the network if required.
|
| 331 |
+
"""
|
| 332 |
+
while not self._events.get(stream_id):
|
| 333 |
+
await self._receive_events(request, stream_id)
|
| 334 |
+
event = self._events[stream_id].pop(0)
|
| 335 |
+
if isinstance(event, h2.events.StreamReset):
|
| 336 |
+
raise RemoteProtocolError(event)
|
| 337 |
+
return event
|
| 338 |
+
|
| 339 |
+
async def _receive_events(
|
| 340 |
+
self, request: Request, stream_id: typing.Optional[int] = None
|
| 341 |
+
) -> None:
|
| 342 |
+
"""
|
| 343 |
+
Read some data from the network until we see one or more events
|
| 344 |
+
for a given stream ID.
|
| 345 |
+
"""
|
| 346 |
+
async with self._read_lock:
|
| 347 |
+
if self._connection_terminated is not None:
|
| 348 |
+
last_stream_id = self._connection_terminated.last_stream_id
|
| 349 |
+
if stream_id and last_stream_id and stream_id > last_stream_id:
|
| 350 |
+
self._request_count -= 1
|
| 351 |
+
raise ConnectionNotAvailable()
|
| 352 |
+
raise RemoteProtocolError(self._connection_terminated)
|
| 353 |
+
|
| 354 |
+
# This conditional is a bit icky. We don't want to block reading if we've
|
| 355 |
+
# actually got an event to return for a given stream. We need to do that
|
| 356 |
+
# check *within* the atomic read lock. Though it also need to be optional,
|
| 357 |
+
# because when we call it from `_wait_for_outgoing_flow` we *do* want to
|
| 358 |
+
# block until we've available flow control, event when we have events
|
| 359 |
+
# pending for the stream ID we're attempting to send on.
|
| 360 |
+
if stream_id is None or not self._events.get(stream_id):
|
| 361 |
+
events = await self._read_incoming_data(request)
|
| 362 |
+
for event in events:
|
| 363 |
+
if isinstance(event, h2.events.RemoteSettingsChanged):
|
| 364 |
+
async with Trace(
|
| 365 |
+
"receive_remote_settings", logger, request
|
| 366 |
+
) as trace:
|
| 367 |
+
await self._receive_remote_settings_change(event)
|
| 368 |
+
trace.return_value = event
|
| 369 |
+
|
| 370 |
+
elif isinstance(
|
| 371 |
+
event,
|
| 372 |
+
(
|
| 373 |
+
h2.events.ResponseReceived,
|
| 374 |
+
h2.events.DataReceived,
|
| 375 |
+
h2.events.StreamEnded,
|
| 376 |
+
h2.events.StreamReset,
|
| 377 |
+
),
|
| 378 |
+
):
|
| 379 |
+
if event.stream_id in self._events:
|
| 380 |
+
self._events[event.stream_id].append(event)
|
| 381 |
+
|
| 382 |
+
elif isinstance(event, h2.events.ConnectionTerminated):
|
| 383 |
+
self._connection_terminated = event
|
| 384 |
+
|
| 385 |
+
await self._write_outgoing_data(request)
|
| 386 |
+
|
| 387 |
+
async def _receive_remote_settings_change(self, event: h2.events.Event) -> None:
|
| 388 |
+
max_concurrent_streams = event.changed_settings.get(
|
| 389 |
+
h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS
|
| 390 |
+
)
|
| 391 |
+
if max_concurrent_streams:
|
| 392 |
+
new_max_streams = min(
|
| 393 |
+
max_concurrent_streams.new_value,
|
| 394 |
+
self._h2_state.local_settings.max_concurrent_streams,
|
| 395 |
+
)
|
| 396 |
+
if new_max_streams and new_max_streams != self._max_streams:
|
| 397 |
+
while new_max_streams > self._max_streams:
|
| 398 |
+
await self._max_streams_semaphore.release()
|
| 399 |
+
self._max_streams += 1
|
| 400 |
+
while new_max_streams < self._max_streams:
|
| 401 |
+
await self._max_streams_semaphore.acquire()
|
| 402 |
+
self._max_streams -= 1
|
| 403 |
+
|
| 404 |
+
async def _response_closed(self, stream_id: int) -> None:
|
| 405 |
+
await self._max_streams_semaphore.release()
|
| 406 |
+
del self._events[stream_id]
|
| 407 |
+
async with self._state_lock:
|
| 408 |
+
if self._connection_terminated and not self._events:
|
| 409 |
+
await self.aclose()
|
| 410 |
+
|
| 411 |
+
elif self._state == HTTPConnectionState.ACTIVE and not self._events:
|
| 412 |
+
self._state = HTTPConnectionState.IDLE
|
| 413 |
+
if self._keepalive_expiry is not None:
|
| 414 |
+
now = time.monotonic()
|
| 415 |
+
self._expire_at = now + self._keepalive_expiry
|
| 416 |
+
if self._used_all_stream_ids: # pragma: nocover
|
| 417 |
+
await self.aclose()
|
| 418 |
+
|
| 419 |
+
async def aclose(self) -> None:
|
| 420 |
+
# Note that this method unilaterally closes the connection, and does
|
| 421 |
+
# not have any kind of locking in place around it.
|
| 422 |
+
self._h2_state.close_connection()
|
| 423 |
+
self._state = HTTPConnectionState.CLOSED
|
| 424 |
+
await self._network_stream.aclose()
|
| 425 |
+
|
| 426 |
+
# Wrappers around network read/write operations...
|
| 427 |
+
|
| 428 |
+
async def _read_incoming_data(
|
| 429 |
+
self, request: Request
|
| 430 |
+
) -> typing.List[h2.events.Event]:
|
| 431 |
+
timeouts = request.extensions.get("timeout", {})
|
| 432 |
+
timeout = timeouts.get("read", None)
|
| 433 |
+
|
| 434 |
+
if self._read_exception is not None:
|
| 435 |
+
raise self._read_exception # pragma: nocover
|
| 436 |
+
|
| 437 |
+
try:
|
| 438 |
+
data = await self._network_stream.read(self.READ_NUM_BYTES, timeout)
|
| 439 |
+
if data == b"":
|
| 440 |
+
raise RemoteProtocolError("Server disconnected")
|
| 441 |
+
except Exception as exc:
|
| 442 |
+
# If we get a network error we should:
|
| 443 |
+
#
|
| 444 |
+
# 1. Save the exception and just raise it immediately on any future reads.
|
| 445 |
+
# (For example, this means that a single read timeout or disconnect will
|
| 446 |
+
# immediately close all pending streams. Without requiring multiple
|
| 447 |
+
# sequential timeouts.)
|
| 448 |
+
# 2. Mark the connection as errored, so that we don't accept any other
|
| 449 |
+
# incoming requests.
|
| 450 |
+
self._read_exception = exc
|
| 451 |
+
self._connection_error = True
|
| 452 |
+
raise exc
|
| 453 |
+
|
| 454 |
+
events: typing.List[h2.events.Event] = self._h2_state.receive_data(data)
|
| 455 |
+
|
| 456 |
+
return events
|
| 457 |
+
|
| 458 |
+
async def _write_outgoing_data(self, request: Request) -> None:
|
| 459 |
+
timeouts = request.extensions.get("timeout", {})
|
| 460 |
+
timeout = timeouts.get("write", None)
|
| 461 |
+
|
| 462 |
+
async with self._write_lock:
|
| 463 |
+
data_to_send = self._h2_state.data_to_send()
|
| 464 |
+
|
| 465 |
+
if self._write_exception is not None:
|
| 466 |
+
raise self._write_exception # pragma: nocover
|
| 467 |
+
|
| 468 |
+
try:
|
| 469 |
+
await self._network_stream.write(data_to_send, timeout)
|
| 470 |
+
except Exception as exc: # pragma: nocover
|
| 471 |
+
# If we get a network error we should:
|
| 472 |
+
#
|
| 473 |
+
# 1. Save the exception and just raise it immediately on any future write.
|
| 474 |
+
# (For example, this means that a single write timeout or disconnect will
|
| 475 |
+
# immediately close all pending streams. Without requiring multiple
|
| 476 |
+
# sequential timeouts.)
|
| 477 |
+
# 2. Mark the connection as errored, so that we don't accept any other
|
| 478 |
+
# incoming requests.
|
| 479 |
+
self._write_exception = exc
|
| 480 |
+
self._connection_error = True
|
| 481 |
+
raise exc
|
| 482 |
+
|
| 483 |
+
# Flow control...
|
| 484 |
+
|
| 485 |
+
async def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int:
|
| 486 |
+
"""
|
| 487 |
+
Returns the maximum allowable outgoing flow for a given stream.
|
| 488 |
+
|
| 489 |
+
If the allowable flow is zero, then waits on the network until
|
| 490 |
+
WindowUpdated frames have increased the flow rate.
|
| 491 |
+
https://tools.ietf.org/html/rfc7540#section-6.9
|
| 492 |
+
"""
|
| 493 |
+
local_flow: int = self._h2_state.local_flow_control_window(stream_id)
|
| 494 |
+
max_frame_size: int = self._h2_state.max_outbound_frame_size
|
| 495 |
+
flow = min(local_flow, max_frame_size)
|
| 496 |
+
while flow == 0:
|
| 497 |
+
await self._receive_events(request)
|
| 498 |
+
local_flow = self._h2_state.local_flow_control_window(stream_id)
|
| 499 |
+
max_frame_size = self._h2_state.max_outbound_frame_size
|
| 500 |
+
flow = min(local_flow, max_frame_size)
|
| 501 |
+
return flow
|
| 502 |
+
|
| 503 |
+
# Interface for connection pooling...
|
| 504 |
+
|
| 505 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 506 |
+
return origin == self._origin
|
| 507 |
+
|
| 508 |
+
def is_available(self) -> bool:
|
| 509 |
+
return (
|
| 510 |
+
self._state != HTTPConnectionState.CLOSED
|
| 511 |
+
and not self._connection_error
|
| 512 |
+
and not self._used_all_stream_ids
|
| 513 |
+
and not (
|
| 514 |
+
self._h2_state.state_machine.state
|
| 515 |
+
== h2.connection.ConnectionState.CLOSED
|
| 516 |
+
)
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
def has_expired(self) -> bool:
|
| 520 |
+
now = time.monotonic()
|
| 521 |
+
return self._expire_at is not None and now > self._expire_at
|
| 522 |
+
|
| 523 |
+
def is_idle(self) -> bool:
|
| 524 |
+
return self._state == HTTPConnectionState.IDLE
|
| 525 |
+
|
| 526 |
+
def is_closed(self) -> bool:
|
| 527 |
+
return self._state == HTTPConnectionState.CLOSED
|
| 528 |
+
|
| 529 |
+
def info(self) -> str:
|
| 530 |
+
origin = str(self._origin)
|
| 531 |
+
return (
|
| 532 |
+
f"{origin!r}, HTTP/2, {self._state.name}, "
|
| 533 |
+
f"Request Count: {self._request_count}"
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
def __repr__(self) -> str:
|
| 537 |
+
class_name = self.__class__.__name__
|
| 538 |
+
origin = str(self._origin)
|
| 539 |
+
return (
|
| 540 |
+
f"<{class_name} [{origin!r}, {self._state.name}, "
|
| 541 |
+
f"Request Count: {self._request_count}]>"
|
| 542 |
+
)
|
| 543 |
+
|
| 544 |
+
# These context managers are not used in the standard flow, but are
|
| 545 |
+
# useful for testing or working with connection instances directly.
|
| 546 |
+
|
| 547 |
+
async def __aenter__(self) -> "AsyncHTTP2Connection":
|
| 548 |
+
return self
|
| 549 |
+
|
| 550 |
+
async def __aexit__(
|
| 551 |
+
self,
|
| 552 |
+
exc_type: typing.Optional[typing.Type[BaseException]] = None,
|
| 553 |
+
exc_value: typing.Optional[BaseException] = None,
|
| 554 |
+
traceback: typing.Optional[types.TracebackType] = None,
|
| 555 |
+
) -> None:
|
| 556 |
+
await self.aclose()
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
class HTTP2ConnectionByteStream:
|
| 560 |
+
def __init__(
|
| 561 |
+
self, connection: AsyncHTTP2Connection, request: Request, stream_id: int
|
| 562 |
+
) -> None:
|
| 563 |
+
self._connection = connection
|
| 564 |
+
self._request = request
|
| 565 |
+
self._stream_id = stream_id
|
| 566 |
+
self._closed = False
|
| 567 |
+
|
| 568 |
+
async def __aiter__(self) -> typing.AsyncIterator[bytes]:
|
| 569 |
+
kwargs = {"request": self._request, "stream_id": self._stream_id}
|
| 570 |
+
try:
|
| 571 |
+
async with Trace("receive_response_body", logger, self._request, kwargs):
|
| 572 |
+
async for chunk in self._connection._receive_response_body(
|
| 573 |
+
request=self._request, stream_id=self._stream_id
|
| 574 |
+
):
|
| 575 |
+
yield chunk
|
| 576 |
+
except BaseException as exc:
|
| 577 |
+
# If we get an exception while streaming the response,
|
| 578 |
+
# we want to close the response (and possibly the connection)
|
| 579 |
+
# before raising that exception.
|
| 580 |
+
with AsyncShieldCancellation():
|
| 581 |
+
await self.aclose()
|
| 582 |
+
raise exc
|
| 583 |
+
|
| 584 |
+
async def aclose(self) -> None:
|
| 585 |
+
if not self._closed:
|
| 586 |
+
self._closed = True
|
| 587 |
+
kwargs = {"stream_id": self._stream_id}
|
| 588 |
+
async with Trace("response_closed", logger, self._request, kwargs):
|
| 589 |
+
await self._connection._response_closed(stream_id=self._stream_id)
|
parrot/lib/python3.10/site-packages/httpcore/_async/http_proxy.py
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import ssl
|
| 3 |
+
from base64 import b64encode
|
| 4 |
+
from typing import Iterable, List, Mapping, Optional, Sequence, Tuple, Union
|
| 5 |
+
|
| 6 |
+
from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend
|
| 7 |
+
from .._exceptions import ProxyError
|
| 8 |
+
from .._models import (
|
| 9 |
+
URL,
|
| 10 |
+
Origin,
|
| 11 |
+
Request,
|
| 12 |
+
Response,
|
| 13 |
+
enforce_bytes,
|
| 14 |
+
enforce_headers,
|
| 15 |
+
enforce_url,
|
| 16 |
+
)
|
| 17 |
+
from .._ssl import default_ssl_context
|
| 18 |
+
from .._synchronization import AsyncLock
|
| 19 |
+
from .._trace import Trace
|
| 20 |
+
from .connection import AsyncHTTPConnection
|
| 21 |
+
from .connection_pool import AsyncConnectionPool
|
| 22 |
+
from .http11 import AsyncHTTP11Connection
|
| 23 |
+
from .interfaces import AsyncConnectionInterface
|
| 24 |
+
|
| 25 |
+
HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]]
|
| 26 |
+
HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger("httpcore.proxy")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def merge_headers(
|
| 33 |
+
default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None,
|
| 34 |
+
override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None,
|
| 35 |
+
) -> List[Tuple[bytes, bytes]]:
|
| 36 |
+
"""
|
| 37 |
+
Append default_headers and override_headers, de-duplicating if a key exists
|
| 38 |
+
in both cases.
|
| 39 |
+
"""
|
| 40 |
+
default_headers = [] if default_headers is None else list(default_headers)
|
| 41 |
+
override_headers = [] if override_headers is None else list(override_headers)
|
| 42 |
+
has_override = set(key.lower() for key, value in override_headers)
|
| 43 |
+
default_headers = [
|
| 44 |
+
(key, value)
|
| 45 |
+
for key, value in default_headers
|
| 46 |
+
if key.lower() not in has_override
|
| 47 |
+
]
|
| 48 |
+
return default_headers + override_headers
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def build_auth_header(username: bytes, password: bytes) -> bytes:
|
| 52 |
+
userpass = username + b":" + password
|
| 53 |
+
return b"Basic " + b64encode(userpass)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class AsyncHTTPProxy(AsyncConnectionPool):
|
| 57 |
+
"""
|
| 58 |
+
A connection pool that sends requests via an HTTP proxy.
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
proxy_url: Union[URL, bytes, str],
|
| 64 |
+
proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None,
|
| 65 |
+
proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None,
|
| 66 |
+
ssl_context: Optional[ssl.SSLContext] = None,
|
| 67 |
+
max_connections: Optional[int] = 10,
|
| 68 |
+
max_keepalive_connections: Optional[int] = None,
|
| 69 |
+
keepalive_expiry: Optional[float] = None,
|
| 70 |
+
http1: bool = True,
|
| 71 |
+
http2: bool = False,
|
| 72 |
+
retries: int = 0,
|
| 73 |
+
local_address: Optional[str] = None,
|
| 74 |
+
uds: Optional[str] = None,
|
| 75 |
+
network_backend: Optional[AsyncNetworkBackend] = None,
|
| 76 |
+
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
| 77 |
+
) -> None:
|
| 78 |
+
"""
|
| 79 |
+
A connection pool for making HTTP requests.
|
| 80 |
+
|
| 81 |
+
Parameters:
|
| 82 |
+
proxy_url: The URL to use when connecting to the proxy server.
|
| 83 |
+
For example `"http://127.0.0.1:8080/"`.
|
| 84 |
+
proxy_auth: Any proxy authentication as a two-tuple of
|
| 85 |
+
(username, password). May be either bytes or ascii-only str.
|
| 86 |
+
proxy_headers: Any HTTP headers to use for the proxy requests.
|
| 87 |
+
For example `{"Proxy-Authorization": "Basic <username>:<password>"}`.
|
| 88 |
+
ssl_context: An SSL context to use for verifying connections.
|
| 89 |
+
If not specified, the default `httpcore.default_ssl_context()`
|
| 90 |
+
will be used.
|
| 91 |
+
max_connections: The maximum number of concurrent HTTP connections that
|
| 92 |
+
the pool should allow. Any attempt to send a request on a pool that
|
| 93 |
+
would exceed this amount will block until a connection is available.
|
| 94 |
+
max_keepalive_connections: The maximum number of idle HTTP connections
|
| 95 |
+
that will be maintained in the pool.
|
| 96 |
+
keepalive_expiry: The duration in seconds that an idle HTTP connection
|
| 97 |
+
may be maintained for before being expired from the pool.
|
| 98 |
+
http1: A boolean indicating if HTTP/1.1 requests should be supported
|
| 99 |
+
by the connection pool. Defaults to True.
|
| 100 |
+
http2: A boolean indicating if HTTP/2 requests should be supported by
|
| 101 |
+
the connection pool. Defaults to False.
|
| 102 |
+
retries: The maximum number of retries when trying to establish
|
| 103 |
+
a connection.
|
| 104 |
+
local_address: Local address to connect from. Can also be used to
|
| 105 |
+
connect using a particular address family. Using
|
| 106 |
+
`local_address="0.0.0.0"` will connect using an `AF_INET` address
|
| 107 |
+
(IPv4), while using `local_address="::"` will connect using an
|
| 108 |
+
`AF_INET6` address (IPv6).
|
| 109 |
+
uds: Path to a Unix Domain Socket to use instead of TCP sockets.
|
| 110 |
+
network_backend: A backend instance to use for handling network I/O.
|
| 111 |
+
"""
|
| 112 |
+
super().__init__(
|
| 113 |
+
ssl_context=ssl_context,
|
| 114 |
+
max_connections=max_connections,
|
| 115 |
+
max_keepalive_connections=max_keepalive_connections,
|
| 116 |
+
keepalive_expiry=keepalive_expiry,
|
| 117 |
+
http1=http1,
|
| 118 |
+
http2=http2,
|
| 119 |
+
network_backend=network_backend,
|
| 120 |
+
retries=retries,
|
| 121 |
+
local_address=local_address,
|
| 122 |
+
uds=uds,
|
| 123 |
+
socket_options=socket_options,
|
| 124 |
+
)
|
| 125 |
+
self._ssl_context = ssl_context
|
| 126 |
+
self._proxy_url = enforce_url(proxy_url, name="proxy_url")
|
| 127 |
+
self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
|
| 128 |
+
if proxy_auth is not None:
|
| 129 |
+
username = enforce_bytes(proxy_auth[0], name="proxy_auth")
|
| 130 |
+
password = enforce_bytes(proxy_auth[1], name="proxy_auth")
|
| 131 |
+
authorization = build_auth_header(username, password)
|
| 132 |
+
self._proxy_headers = [
|
| 133 |
+
(b"Proxy-Authorization", authorization)
|
| 134 |
+
] + self._proxy_headers
|
| 135 |
+
|
| 136 |
+
def create_connection(self, origin: Origin) -> AsyncConnectionInterface:
|
| 137 |
+
if origin.scheme == b"http":
|
| 138 |
+
return AsyncForwardHTTPConnection(
|
| 139 |
+
proxy_origin=self._proxy_url.origin,
|
| 140 |
+
proxy_headers=self._proxy_headers,
|
| 141 |
+
remote_origin=origin,
|
| 142 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 143 |
+
network_backend=self._network_backend,
|
| 144 |
+
)
|
| 145 |
+
return AsyncTunnelHTTPConnection(
|
| 146 |
+
proxy_origin=self._proxy_url.origin,
|
| 147 |
+
proxy_headers=self._proxy_headers,
|
| 148 |
+
remote_origin=origin,
|
| 149 |
+
ssl_context=self._ssl_context,
|
| 150 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 151 |
+
http1=self._http1,
|
| 152 |
+
http2=self._http2,
|
| 153 |
+
network_backend=self._network_backend,
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class AsyncForwardHTTPConnection(AsyncConnectionInterface):
|
| 158 |
+
def __init__(
|
| 159 |
+
self,
|
| 160 |
+
proxy_origin: Origin,
|
| 161 |
+
remote_origin: Origin,
|
| 162 |
+
proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None,
|
| 163 |
+
keepalive_expiry: Optional[float] = None,
|
| 164 |
+
network_backend: Optional[AsyncNetworkBackend] = None,
|
| 165 |
+
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
| 166 |
+
) -> None:
|
| 167 |
+
self._connection = AsyncHTTPConnection(
|
| 168 |
+
origin=proxy_origin,
|
| 169 |
+
keepalive_expiry=keepalive_expiry,
|
| 170 |
+
network_backend=network_backend,
|
| 171 |
+
socket_options=socket_options,
|
| 172 |
+
)
|
| 173 |
+
self._proxy_origin = proxy_origin
|
| 174 |
+
self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
|
| 175 |
+
self._remote_origin = remote_origin
|
| 176 |
+
|
| 177 |
+
async def handle_async_request(self, request: Request) -> Response:
|
| 178 |
+
headers = merge_headers(self._proxy_headers, request.headers)
|
| 179 |
+
url = URL(
|
| 180 |
+
scheme=self._proxy_origin.scheme,
|
| 181 |
+
host=self._proxy_origin.host,
|
| 182 |
+
port=self._proxy_origin.port,
|
| 183 |
+
target=bytes(request.url),
|
| 184 |
+
)
|
| 185 |
+
proxy_request = Request(
|
| 186 |
+
method=request.method,
|
| 187 |
+
url=url,
|
| 188 |
+
headers=headers,
|
| 189 |
+
content=request.stream,
|
| 190 |
+
extensions=request.extensions,
|
| 191 |
+
)
|
| 192 |
+
return await self._connection.handle_async_request(proxy_request)
|
| 193 |
+
|
| 194 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 195 |
+
return origin == self._remote_origin
|
| 196 |
+
|
| 197 |
+
async def aclose(self) -> None:
|
| 198 |
+
await self._connection.aclose()
|
| 199 |
+
|
| 200 |
+
def info(self) -> str:
|
| 201 |
+
return self._connection.info()
|
| 202 |
+
|
| 203 |
+
def is_available(self) -> bool:
|
| 204 |
+
return self._connection.is_available()
|
| 205 |
+
|
| 206 |
+
def has_expired(self) -> bool:
|
| 207 |
+
return self._connection.has_expired()
|
| 208 |
+
|
| 209 |
+
def is_idle(self) -> bool:
|
| 210 |
+
return self._connection.is_idle()
|
| 211 |
+
|
| 212 |
+
def is_closed(self) -> bool:
|
| 213 |
+
return self._connection.is_closed()
|
| 214 |
+
|
| 215 |
+
def __repr__(self) -> str:
|
| 216 |
+
return f"<{self.__class__.__name__} [{self.info()}]>"
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class AsyncTunnelHTTPConnection(AsyncConnectionInterface):
|
| 220 |
+
def __init__(
|
| 221 |
+
self,
|
| 222 |
+
proxy_origin: Origin,
|
| 223 |
+
remote_origin: Origin,
|
| 224 |
+
ssl_context: Optional[ssl.SSLContext] = None,
|
| 225 |
+
proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None,
|
| 226 |
+
keepalive_expiry: Optional[float] = None,
|
| 227 |
+
http1: bool = True,
|
| 228 |
+
http2: bool = False,
|
| 229 |
+
network_backend: Optional[AsyncNetworkBackend] = None,
|
| 230 |
+
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
| 231 |
+
) -> None:
|
| 232 |
+
self._connection: AsyncConnectionInterface = AsyncHTTPConnection(
|
| 233 |
+
origin=proxy_origin,
|
| 234 |
+
keepalive_expiry=keepalive_expiry,
|
| 235 |
+
network_backend=network_backend,
|
| 236 |
+
socket_options=socket_options,
|
| 237 |
+
)
|
| 238 |
+
self._proxy_origin = proxy_origin
|
| 239 |
+
self._remote_origin = remote_origin
|
| 240 |
+
self._ssl_context = ssl_context
|
| 241 |
+
self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
|
| 242 |
+
self._keepalive_expiry = keepalive_expiry
|
| 243 |
+
self._http1 = http1
|
| 244 |
+
self._http2 = http2
|
| 245 |
+
self._connect_lock = AsyncLock()
|
| 246 |
+
self._connected = False
|
| 247 |
+
|
| 248 |
+
async def handle_async_request(self, request: Request) -> Response:
|
| 249 |
+
timeouts = request.extensions.get("timeout", {})
|
| 250 |
+
timeout = timeouts.get("connect", None)
|
| 251 |
+
|
| 252 |
+
async with self._connect_lock:
|
| 253 |
+
if not self._connected:
|
| 254 |
+
target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port)
|
| 255 |
+
|
| 256 |
+
connect_url = URL(
|
| 257 |
+
scheme=self._proxy_origin.scheme,
|
| 258 |
+
host=self._proxy_origin.host,
|
| 259 |
+
port=self._proxy_origin.port,
|
| 260 |
+
target=target,
|
| 261 |
+
)
|
| 262 |
+
connect_headers = merge_headers(
|
| 263 |
+
[(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers
|
| 264 |
+
)
|
| 265 |
+
connect_request = Request(
|
| 266 |
+
method=b"CONNECT",
|
| 267 |
+
url=connect_url,
|
| 268 |
+
headers=connect_headers,
|
| 269 |
+
extensions=request.extensions,
|
| 270 |
+
)
|
| 271 |
+
connect_response = await self._connection.handle_async_request(
|
| 272 |
+
connect_request
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
if connect_response.status < 200 or connect_response.status > 299:
|
| 276 |
+
reason_bytes = connect_response.extensions.get("reason_phrase", b"")
|
| 277 |
+
reason_str = reason_bytes.decode("ascii", errors="ignore")
|
| 278 |
+
msg = "%d %s" % (connect_response.status, reason_str)
|
| 279 |
+
await self._connection.aclose()
|
| 280 |
+
raise ProxyError(msg)
|
| 281 |
+
|
| 282 |
+
stream = connect_response.extensions["network_stream"]
|
| 283 |
+
|
| 284 |
+
# Upgrade the stream to SSL
|
| 285 |
+
ssl_context = (
|
| 286 |
+
default_ssl_context()
|
| 287 |
+
if self._ssl_context is None
|
| 288 |
+
else self._ssl_context
|
| 289 |
+
)
|
| 290 |
+
alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
|
| 291 |
+
ssl_context.set_alpn_protocols(alpn_protocols)
|
| 292 |
+
|
| 293 |
+
kwargs = {
|
| 294 |
+
"ssl_context": ssl_context,
|
| 295 |
+
"server_hostname": self._remote_origin.host.decode("ascii"),
|
| 296 |
+
"timeout": timeout,
|
| 297 |
+
}
|
| 298 |
+
async with Trace("start_tls", logger, request, kwargs) as trace:
|
| 299 |
+
stream = await stream.start_tls(**kwargs)
|
| 300 |
+
trace.return_value = stream
|
| 301 |
+
|
| 302 |
+
# Determine if we should be using HTTP/1.1 or HTTP/2
|
| 303 |
+
ssl_object = stream.get_extra_info("ssl_object")
|
| 304 |
+
http2_negotiated = (
|
| 305 |
+
ssl_object is not None
|
| 306 |
+
and ssl_object.selected_alpn_protocol() == "h2"
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
# Create the HTTP/1.1 or HTTP/2 connection
|
| 310 |
+
if http2_negotiated or (self._http2 and not self._http1):
|
| 311 |
+
from .http2 import AsyncHTTP2Connection
|
| 312 |
+
|
| 313 |
+
self._connection = AsyncHTTP2Connection(
|
| 314 |
+
origin=self._remote_origin,
|
| 315 |
+
stream=stream,
|
| 316 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 317 |
+
)
|
| 318 |
+
else:
|
| 319 |
+
self._connection = AsyncHTTP11Connection(
|
| 320 |
+
origin=self._remote_origin,
|
| 321 |
+
stream=stream,
|
| 322 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
self._connected = True
|
| 326 |
+
return await self._connection.handle_async_request(request)
|
| 327 |
+
|
| 328 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 329 |
+
return origin == self._remote_origin
|
| 330 |
+
|
| 331 |
+
async def aclose(self) -> None:
|
| 332 |
+
await self._connection.aclose()
|
| 333 |
+
|
| 334 |
+
def info(self) -> str:
|
| 335 |
+
return self._connection.info()
|
| 336 |
+
|
| 337 |
+
def is_available(self) -> bool:
|
| 338 |
+
return self._connection.is_available()
|
| 339 |
+
|
| 340 |
+
def has_expired(self) -> bool:
|
| 341 |
+
return self._connection.has_expired()
|
| 342 |
+
|
| 343 |
+
def is_idle(self) -> bool:
|
| 344 |
+
return self._connection.is_idle()
|
| 345 |
+
|
| 346 |
+
def is_closed(self) -> bool:
|
| 347 |
+
return self._connection.is_closed()
|
| 348 |
+
|
| 349 |
+
def __repr__(self) -> str:
|
| 350 |
+
return f"<{self.__class__.__name__} [{self.info()}]>"
|
parrot/lib/python3.10/site-packages/httpcore/_async/interfaces.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import asynccontextmanager
|
| 2 |
+
from typing import AsyncIterator, Optional, Union
|
| 3 |
+
|
| 4 |
+
from .._models import (
|
| 5 |
+
URL,
|
| 6 |
+
Extensions,
|
| 7 |
+
HeaderTypes,
|
| 8 |
+
Origin,
|
| 9 |
+
Request,
|
| 10 |
+
Response,
|
| 11 |
+
enforce_bytes,
|
| 12 |
+
enforce_headers,
|
| 13 |
+
enforce_url,
|
| 14 |
+
include_request_headers,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class AsyncRequestInterface:
|
| 19 |
+
async def request(
|
| 20 |
+
self,
|
| 21 |
+
method: Union[bytes, str],
|
| 22 |
+
url: Union[URL, bytes, str],
|
| 23 |
+
*,
|
| 24 |
+
headers: HeaderTypes = None,
|
| 25 |
+
content: Union[bytes, AsyncIterator[bytes], None] = None,
|
| 26 |
+
extensions: Optional[Extensions] = None,
|
| 27 |
+
) -> Response:
|
| 28 |
+
# Strict type checking on our parameters.
|
| 29 |
+
method = enforce_bytes(method, name="method")
|
| 30 |
+
url = enforce_url(url, name="url")
|
| 31 |
+
headers = enforce_headers(headers, name="headers")
|
| 32 |
+
|
| 33 |
+
# Include Host header, and optionally Content-Length or Transfer-Encoding.
|
| 34 |
+
headers = include_request_headers(headers, url=url, content=content)
|
| 35 |
+
|
| 36 |
+
request = Request(
|
| 37 |
+
method=method,
|
| 38 |
+
url=url,
|
| 39 |
+
headers=headers,
|
| 40 |
+
content=content,
|
| 41 |
+
extensions=extensions,
|
| 42 |
+
)
|
| 43 |
+
response = await self.handle_async_request(request)
|
| 44 |
+
try:
|
| 45 |
+
await response.aread()
|
| 46 |
+
finally:
|
| 47 |
+
await response.aclose()
|
| 48 |
+
return response
|
| 49 |
+
|
| 50 |
+
@asynccontextmanager
|
| 51 |
+
async def stream(
|
| 52 |
+
self,
|
| 53 |
+
method: Union[bytes, str],
|
| 54 |
+
url: Union[URL, bytes, str],
|
| 55 |
+
*,
|
| 56 |
+
headers: HeaderTypes = None,
|
| 57 |
+
content: Union[bytes, AsyncIterator[bytes], None] = None,
|
| 58 |
+
extensions: Optional[Extensions] = None,
|
| 59 |
+
) -> AsyncIterator[Response]:
|
| 60 |
+
# Strict type checking on our parameters.
|
| 61 |
+
method = enforce_bytes(method, name="method")
|
| 62 |
+
url = enforce_url(url, name="url")
|
| 63 |
+
headers = enforce_headers(headers, name="headers")
|
| 64 |
+
|
| 65 |
+
# Include Host header, and optionally Content-Length or Transfer-Encoding.
|
| 66 |
+
headers = include_request_headers(headers, url=url, content=content)
|
| 67 |
+
|
| 68 |
+
request = Request(
|
| 69 |
+
method=method,
|
| 70 |
+
url=url,
|
| 71 |
+
headers=headers,
|
| 72 |
+
content=content,
|
| 73 |
+
extensions=extensions,
|
| 74 |
+
)
|
| 75 |
+
response = await self.handle_async_request(request)
|
| 76 |
+
try:
|
| 77 |
+
yield response
|
| 78 |
+
finally:
|
| 79 |
+
await response.aclose()
|
| 80 |
+
|
| 81 |
+
async def handle_async_request(self, request: Request) -> Response:
|
| 82 |
+
raise NotImplementedError() # pragma: nocover
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class AsyncConnectionInterface(AsyncRequestInterface):
|
| 86 |
+
async def aclose(self) -> None:
|
| 87 |
+
raise NotImplementedError() # pragma: nocover
|
| 88 |
+
|
| 89 |
+
def info(self) -> str:
|
| 90 |
+
raise NotImplementedError() # pragma: nocover
|
| 91 |
+
|
| 92 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 93 |
+
raise NotImplementedError() # pragma: nocover
|
| 94 |
+
|
| 95 |
+
def is_available(self) -> bool:
|
| 96 |
+
"""
|
| 97 |
+
Return `True` if the connection is currently able to accept an
|
| 98 |
+
outgoing request.
|
| 99 |
+
|
| 100 |
+
An HTTP/1.1 connection will only be available if it is currently idle.
|
| 101 |
+
|
| 102 |
+
An HTTP/2 connection will be available so long as the stream ID space is
|
| 103 |
+
not yet exhausted, and the connection is not in an error state.
|
| 104 |
+
|
| 105 |
+
While the connection is being established we may not yet know if it is going
|
| 106 |
+
to result in an HTTP/1.1 or HTTP/2 connection. The connection should be
|
| 107 |
+
treated as being available, but might ultimately raise `NewConnectionRequired`
|
| 108 |
+
required exceptions if multiple requests are attempted over a connection
|
| 109 |
+
that ends up being established as HTTP/1.1.
|
| 110 |
+
"""
|
| 111 |
+
raise NotImplementedError() # pragma: nocover
|
| 112 |
+
|
| 113 |
+
def has_expired(self) -> bool:
|
| 114 |
+
"""
|
| 115 |
+
Return `True` if the connection is in a state where it should be closed.
|
| 116 |
+
|
| 117 |
+
This either means that the connection is idle and it has passed the
|
| 118 |
+
expiry time on its keep-alive, or that server has sent an EOF.
|
| 119 |
+
"""
|
| 120 |
+
raise NotImplementedError() # pragma: nocover
|
| 121 |
+
|
| 122 |
+
def is_idle(self) -> bool:
|
| 123 |
+
"""
|
| 124 |
+
Return `True` if the connection is currently idle.
|
| 125 |
+
"""
|
| 126 |
+
raise NotImplementedError() # pragma: nocover
|
| 127 |
+
|
| 128 |
+
def is_closed(self) -> bool:
|
| 129 |
+
"""
|
| 130 |
+
Return `True` if the connection has been closed.
|
| 131 |
+
|
| 132 |
+
Used when a response is closed to determine if the connection may be
|
| 133 |
+
returned to the connection pool or not.
|
| 134 |
+
"""
|
| 135 |
+
raise NotImplementedError() # pragma: nocover
|
parrot/lib/python3.10/site-packages/httpcore/_async/socks_proxy.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import ssl
|
| 3 |
+
import typing
|
| 4 |
+
|
| 5 |
+
from socksio import socks5
|
| 6 |
+
|
| 7 |
+
from .._backends.auto import AutoBackend
|
| 8 |
+
from .._backends.base import AsyncNetworkBackend, AsyncNetworkStream
|
| 9 |
+
from .._exceptions import ConnectionNotAvailable, ProxyError
|
| 10 |
+
from .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url
|
| 11 |
+
from .._ssl import default_ssl_context
|
| 12 |
+
from .._synchronization import AsyncLock
|
| 13 |
+
from .._trace import Trace
|
| 14 |
+
from .connection_pool import AsyncConnectionPool
|
| 15 |
+
from .http11 import AsyncHTTP11Connection
|
| 16 |
+
from .interfaces import AsyncConnectionInterface
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger("httpcore.socks")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
AUTH_METHODS = {
|
| 22 |
+
b"\x00": "NO AUTHENTICATION REQUIRED",
|
| 23 |
+
b"\x01": "GSSAPI",
|
| 24 |
+
b"\x02": "USERNAME/PASSWORD",
|
| 25 |
+
b"\xff": "NO ACCEPTABLE METHODS",
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
REPLY_CODES = {
|
| 29 |
+
b"\x00": "Succeeded",
|
| 30 |
+
b"\x01": "General SOCKS server failure",
|
| 31 |
+
b"\x02": "Connection not allowed by ruleset",
|
| 32 |
+
b"\x03": "Network unreachable",
|
| 33 |
+
b"\x04": "Host unreachable",
|
| 34 |
+
b"\x05": "Connection refused",
|
| 35 |
+
b"\x06": "TTL expired",
|
| 36 |
+
b"\x07": "Command not supported",
|
| 37 |
+
b"\x08": "Address type not supported",
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
async def _init_socks5_connection(
|
| 42 |
+
stream: AsyncNetworkStream,
|
| 43 |
+
*,
|
| 44 |
+
host: bytes,
|
| 45 |
+
port: int,
|
| 46 |
+
auth: typing.Optional[typing.Tuple[bytes, bytes]] = None,
|
| 47 |
+
) -> None:
|
| 48 |
+
conn = socks5.SOCKS5Connection()
|
| 49 |
+
|
| 50 |
+
# Auth method request
|
| 51 |
+
auth_method = (
|
| 52 |
+
socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED
|
| 53 |
+
if auth is None
|
| 54 |
+
else socks5.SOCKS5AuthMethod.USERNAME_PASSWORD
|
| 55 |
+
)
|
| 56 |
+
conn.send(socks5.SOCKS5AuthMethodsRequest([auth_method]))
|
| 57 |
+
outgoing_bytes = conn.data_to_send()
|
| 58 |
+
await stream.write(outgoing_bytes)
|
| 59 |
+
|
| 60 |
+
# Auth method response
|
| 61 |
+
incoming_bytes = await stream.read(max_bytes=4096)
|
| 62 |
+
response = conn.receive_data(incoming_bytes)
|
| 63 |
+
assert isinstance(response, socks5.SOCKS5AuthReply)
|
| 64 |
+
if response.method != auth_method:
|
| 65 |
+
requested = AUTH_METHODS.get(auth_method, "UNKNOWN")
|
| 66 |
+
responded = AUTH_METHODS.get(response.method, "UNKNOWN")
|
| 67 |
+
raise ProxyError(
|
| 68 |
+
f"Requested {requested} from proxy server, but got {responded}."
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
if response.method == socks5.SOCKS5AuthMethod.USERNAME_PASSWORD:
|
| 72 |
+
# Username/password request
|
| 73 |
+
assert auth is not None
|
| 74 |
+
username, password = auth
|
| 75 |
+
conn.send(socks5.SOCKS5UsernamePasswordRequest(username, password))
|
| 76 |
+
outgoing_bytes = conn.data_to_send()
|
| 77 |
+
await stream.write(outgoing_bytes)
|
| 78 |
+
|
| 79 |
+
# Username/password response
|
| 80 |
+
incoming_bytes = await stream.read(max_bytes=4096)
|
| 81 |
+
response = conn.receive_data(incoming_bytes)
|
| 82 |
+
assert isinstance(response, socks5.SOCKS5UsernamePasswordReply)
|
| 83 |
+
if not response.success:
|
| 84 |
+
raise ProxyError("Invalid username/password")
|
| 85 |
+
|
| 86 |
+
# Connect request
|
| 87 |
+
conn.send(
|
| 88 |
+
socks5.SOCKS5CommandRequest.from_address(
|
| 89 |
+
socks5.SOCKS5Command.CONNECT, (host, port)
|
| 90 |
+
)
|
| 91 |
+
)
|
| 92 |
+
outgoing_bytes = conn.data_to_send()
|
| 93 |
+
await stream.write(outgoing_bytes)
|
| 94 |
+
|
| 95 |
+
# Connect response
|
| 96 |
+
incoming_bytes = await stream.read(max_bytes=4096)
|
| 97 |
+
response = conn.receive_data(incoming_bytes)
|
| 98 |
+
assert isinstance(response, socks5.SOCKS5Reply)
|
| 99 |
+
if response.reply_code != socks5.SOCKS5ReplyCode.SUCCEEDED:
|
| 100 |
+
reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN")
|
| 101 |
+
raise ProxyError(f"Proxy Server could not connect: {reply_code}.")
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class AsyncSOCKSProxy(AsyncConnectionPool):
|
| 105 |
+
"""
|
| 106 |
+
A connection pool that sends requests via an HTTP proxy.
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
def __init__(
|
| 110 |
+
self,
|
| 111 |
+
proxy_url: typing.Union[URL, bytes, str],
|
| 112 |
+
proxy_auth: typing.Optional[
|
| 113 |
+
typing.Tuple[typing.Union[bytes, str], typing.Union[bytes, str]]
|
| 114 |
+
] = None,
|
| 115 |
+
ssl_context: typing.Optional[ssl.SSLContext] = None,
|
| 116 |
+
max_connections: typing.Optional[int] = 10,
|
| 117 |
+
max_keepalive_connections: typing.Optional[int] = None,
|
| 118 |
+
keepalive_expiry: typing.Optional[float] = None,
|
| 119 |
+
http1: bool = True,
|
| 120 |
+
http2: bool = False,
|
| 121 |
+
retries: int = 0,
|
| 122 |
+
network_backend: typing.Optional[AsyncNetworkBackend] = None,
|
| 123 |
+
) -> None:
|
| 124 |
+
"""
|
| 125 |
+
A connection pool for making HTTP requests.
|
| 126 |
+
|
| 127 |
+
Parameters:
|
| 128 |
+
proxy_url: The URL to use when connecting to the proxy server.
|
| 129 |
+
For example `"http://127.0.0.1:8080/"`.
|
| 130 |
+
ssl_context: An SSL context to use for verifying connections.
|
| 131 |
+
If not specified, the default `httpcore.default_ssl_context()`
|
| 132 |
+
will be used.
|
| 133 |
+
max_connections: The maximum number of concurrent HTTP connections that
|
| 134 |
+
the pool should allow. Any attempt to send a request on a pool that
|
| 135 |
+
would exceed this amount will block until a connection is available.
|
| 136 |
+
max_keepalive_connections: The maximum number of idle HTTP connections
|
| 137 |
+
that will be maintained in the pool.
|
| 138 |
+
keepalive_expiry: The duration in seconds that an idle HTTP connection
|
| 139 |
+
may be maintained for before being expired from the pool.
|
| 140 |
+
http1: A boolean indicating if HTTP/1.1 requests should be supported
|
| 141 |
+
by the connection pool. Defaults to True.
|
| 142 |
+
http2: A boolean indicating if HTTP/2 requests should be supported by
|
| 143 |
+
the connection pool. Defaults to False.
|
| 144 |
+
retries: The maximum number of retries when trying to establish
|
| 145 |
+
a connection.
|
| 146 |
+
local_address: Local address to connect from. Can also be used to
|
| 147 |
+
connect using a particular address family. Using
|
| 148 |
+
`local_address="0.0.0.0"` will connect using an `AF_INET` address
|
| 149 |
+
(IPv4), while using `local_address="::"` will connect using an
|
| 150 |
+
`AF_INET6` address (IPv6).
|
| 151 |
+
uds: Path to a Unix Domain Socket to use instead of TCP sockets.
|
| 152 |
+
network_backend: A backend instance to use for handling network I/O.
|
| 153 |
+
"""
|
| 154 |
+
super().__init__(
|
| 155 |
+
ssl_context=ssl_context,
|
| 156 |
+
max_connections=max_connections,
|
| 157 |
+
max_keepalive_connections=max_keepalive_connections,
|
| 158 |
+
keepalive_expiry=keepalive_expiry,
|
| 159 |
+
http1=http1,
|
| 160 |
+
http2=http2,
|
| 161 |
+
network_backend=network_backend,
|
| 162 |
+
retries=retries,
|
| 163 |
+
)
|
| 164 |
+
self._ssl_context = ssl_context
|
| 165 |
+
self._proxy_url = enforce_url(proxy_url, name="proxy_url")
|
| 166 |
+
if proxy_auth is not None:
|
| 167 |
+
username, password = proxy_auth
|
| 168 |
+
username_bytes = enforce_bytes(username, name="proxy_auth")
|
| 169 |
+
password_bytes = enforce_bytes(password, name="proxy_auth")
|
| 170 |
+
self._proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = (
|
| 171 |
+
username_bytes,
|
| 172 |
+
password_bytes,
|
| 173 |
+
)
|
| 174 |
+
else:
|
| 175 |
+
self._proxy_auth = None
|
| 176 |
+
|
| 177 |
+
def create_connection(self, origin: Origin) -> AsyncConnectionInterface:
|
| 178 |
+
return AsyncSocks5Connection(
|
| 179 |
+
proxy_origin=self._proxy_url.origin,
|
| 180 |
+
remote_origin=origin,
|
| 181 |
+
proxy_auth=self._proxy_auth,
|
| 182 |
+
ssl_context=self._ssl_context,
|
| 183 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 184 |
+
http1=self._http1,
|
| 185 |
+
http2=self._http2,
|
| 186 |
+
network_backend=self._network_backend,
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class AsyncSocks5Connection(AsyncConnectionInterface):
|
| 191 |
+
def __init__(
|
| 192 |
+
self,
|
| 193 |
+
proxy_origin: Origin,
|
| 194 |
+
remote_origin: Origin,
|
| 195 |
+
proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = None,
|
| 196 |
+
ssl_context: typing.Optional[ssl.SSLContext] = None,
|
| 197 |
+
keepalive_expiry: typing.Optional[float] = None,
|
| 198 |
+
http1: bool = True,
|
| 199 |
+
http2: bool = False,
|
| 200 |
+
network_backend: typing.Optional[AsyncNetworkBackend] = None,
|
| 201 |
+
) -> None:
|
| 202 |
+
self._proxy_origin = proxy_origin
|
| 203 |
+
self._remote_origin = remote_origin
|
| 204 |
+
self._proxy_auth = proxy_auth
|
| 205 |
+
self._ssl_context = ssl_context
|
| 206 |
+
self._keepalive_expiry = keepalive_expiry
|
| 207 |
+
self._http1 = http1
|
| 208 |
+
self._http2 = http2
|
| 209 |
+
|
| 210 |
+
self._network_backend: AsyncNetworkBackend = (
|
| 211 |
+
AutoBackend() if network_backend is None else network_backend
|
| 212 |
+
)
|
| 213 |
+
self._connect_lock = AsyncLock()
|
| 214 |
+
self._connection: typing.Optional[AsyncConnectionInterface] = None
|
| 215 |
+
self._connect_failed = False
|
| 216 |
+
|
| 217 |
+
async def handle_async_request(self, request: Request) -> Response:
|
| 218 |
+
timeouts = request.extensions.get("timeout", {})
|
| 219 |
+
timeout = timeouts.get("connect", None)
|
| 220 |
+
|
| 221 |
+
async with self._connect_lock:
|
| 222 |
+
if self._connection is None:
|
| 223 |
+
try:
|
| 224 |
+
# Connect to the proxy
|
| 225 |
+
kwargs = {
|
| 226 |
+
"host": self._proxy_origin.host.decode("ascii"),
|
| 227 |
+
"port": self._proxy_origin.port,
|
| 228 |
+
"timeout": timeout,
|
| 229 |
+
}
|
| 230 |
+
with Trace("connect_tcp", logger, request, kwargs) as trace:
|
| 231 |
+
stream = await self._network_backend.connect_tcp(**kwargs)
|
| 232 |
+
trace.return_value = stream
|
| 233 |
+
|
| 234 |
+
# Connect to the remote host using socks5
|
| 235 |
+
kwargs = {
|
| 236 |
+
"stream": stream,
|
| 237 |
+
"host": self._remote_origin.host.decode("ascii"),
|
| 238 |
+
"port": self._remote_origin.port,
|
| 239 |
+
"auth": self._proxy_auth,
|
| 240 |
+
}
|
| 241 |
+
with Trace(
|
| 242 |
+
"setup_socks5_connection", logger, request, kwargs
|
| 243 |
+
) as trace:
|
| 244 |
+
await _init_socks5_connection(**kwargs)
|
| 245 |
+
trace.return_value = stream
|
| 246 |
+
|
| 247 |
+
# Upgrade the stream to SSL
|
| 248 |
+
if self._remote_origin.scheme == b"https":
|
| 249 |
+
ssl_context = (
|
| 250 |
+
default_ssl_context()
|
| 251 |
+
if self._ssl_context is None
|
| 252 |
+
else self._ssl_context
|
| 253 |
+
)
|
| 254 |
+
alpn_protocols = (
|
| 255 |
+
["http/1.1", "h2"] if self._http2 else ["http/1.1"]
|
| 256 |
+
)
|
| 257 |
+
ssl_context.set_alpn_protocols(alpn_protocols)
|
| 258 |
+
|
| 259 |
+
kwargs = {
|
| 260 |
+
"ssl_context": ssl_context,
|
| 261 |
+
"server_hostname": self._remote_origin.host.decode("ascii"),
|
| 262 |
+
"timeout": timeout,
|
| 263 |
+
}
|
| 264 |
+
async with Trace("start_tls", logger, request, kwargs) as trace:
|
| 265 |
+
stream = await stream.start_tls(**kwargs)
|
| 266 |
+
trace.return_value = stream
|
| 267 |
+
|
| 268 |
+
# Determine if we should be using HTTP/1.1 or HTTP/2
|
| 269 |
+
ssl_object = stream.get_extra_info("ssl_object")
|
| 270 |
+
http2_negotiated = (
|
| 271 |
+
ssl_object is not None
|
| 272 |
+
and ssl_object.selected_alpn_protocol() == "h2"
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
# Create the HTTP/1.1 or HTTP/2 connection
|
| 276 |
+
if http2_negotiated or (
|
| 277 |
+
self._http2 and not self._http1
|
| 278 |
+
): # pragma: nocover
|
| 279 |
+
from .http2 import AsyncHTTP2Connection
|
| 280 |
+
|
| 281 |
+
self._connection = AsyncHTTP2Connection(
|
| 282 |
+
origin=self._remote_origin,
|
| 283 |
+
stream=stream,
|
| 284 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 285 |
+
)
|
| 286 |
+
else:
|
| 287 |
+
self._connection = AsyncHTTP11Connection(
|
| 288 |
+
origin=self._remote_origin,
|
| 289 |
+
stream=stream,
|
| 290 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 291 |
+
)
|
| 292 |
+
except Exception as exc:
|
| 293 |
+
self._connect_failed = True
|
| 294 |
+
raise exc
|
| 295 |
+
elif not self._connection.is_available(): # pragma: nocover
|
| 296 |
+
raise ConnectionNotAvailable()
|
| 297 |
+
|
| 298 |
+
return await self._connection.handle_async_request(request)
|
| 299 |
+
|
| 300 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 301 |
+
return origin == self._remote_origin
|
| 302 |
+
|
| 303 |
+
async def aclose(self) -> None:
|
| 304 |
+
if self._connection is not None:
|
| 305 |
+
await self._connection.aclose()
|
| 306 |
+
|
| 307 |
+
def is_available(self) -> bool:
|
| 308 |
+
if self._connection is None: # pragma: nocover
|
| 309 |
+
# If HTTP/2 support is enabled, and the resulting connection could
|
| 310 |
+
# end up as HTTP/2 then we should indicate the connection as being
|
| 311 |
+
# available to service multiple requests.
|
| 312 |
+
return (
|
| 313 |
+
self._http2
|
| 314 |
+
and (self._remote_origin.scheme == b"https" or not self._http1)
|
| 315 |
+
and not self._connect_failed
|
| 316 |
+
)
|
| 317 |
+
return self._connection.is_available()
|
| 318 |
+
|
| 319 |
+
def has_expired(self) -> bool:
|
| 320 |
+
if self._connection is None: # pragma: nocover
|
| 321 |
+
return self._connect_failed
|
| 322 |
+
return self._connection.has_expired()
|
| 323 |
+
|
| 324 |
+
def is_idle(self) -> bool:
|
| 325 |
+
if self._connection is None: # pragma: nocover
|
| 326 |
+
return self._connect_failed
|
| 327 |
+
return self._connection.is_idle()
|
| 328 |
+
|
| 329 |
+
def is_closed(self) -> bool:
|
| 330 |
+
if self._connection is None: # pragma: nocover
|
| 331 |
+
return self._connect_failed
|
| 332 |
+
return self._connection.is_closed()
|
| 333 |
+
|
| 334 |
+
def info(self) -> str:
|
| 335 |
+
if self._connection is None: # pragma: nocover
|
| 336 |
+
return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"
|
| 337 |
+
return self._connection.info()
|
| 338 |
+
|
| 339 |
+
def __repr__(self) -> str:
|
| 340 |
+
return f"<{self.__class__.__name__} [{self.info()}]>"
|
parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/auto.cpython-310.pyc
ADDED
|
Binary file (1.78 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/mock.cpython-310.pyc
ADDED
|
Binary file (5.38 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/sync.cpython-310.pyc
ADDED
|
Binary file (4.13 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/trio.cpython-310.pyc
ADDED
|
Binary file (5.13 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/httpcore/_backends/sync.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import socket
|
| 2 |
+
import ssl
|
| 3 |
+
import sys
|
| 4 |
+
import typing
|
| 5 |
+
|
| 6 |
+
from .._exceptions import (
|
| 7 |
+
ConnectError,
|
| 8 |
+
ConnectTimeout,
|
| 9 |
+
ExceptionMapping,
|
| 10 |
+
ReadError,
|
| 11 |
+
ReadTimeout,
|
| 12 |
+
WriteError,
|
| 13 |
+
WriteTimeout,
|
| 14 |
+
map_exceptions,
|
| 15 |
+
)
|
| 16 |
+
from .._utils import is_socket_readable
|
| 17 |
+
from .base import SOCKET_OPTION, NetworkBackend, NetworkStream
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class SyncStream(NetworkStream):
|
| 21 |
+
def __init__(self, sock: socket.socket) -> None:
|
| 22 |
+
self._sock = sock
|
| 23 |
+
|
| 24 |
+
def read(self, max_bytes: int, timeout: typing.Optional[float] = None) -> bytes:
|
| 25 |
+
exc_map: ExceptionMapping = {socket.timeout: ReadTimeout, OSError: ReadError}
|
| 26 |
+
with map_exceptions(exc_map):
|
| 27 |
+
self._sock.settimeout(timeout)
|
| 28 |
+
return self._sock.recv(max_bytes)
|
| 29 |
+
|
| 30 |
+
def write(self, buffer: bytes, timeout: typing.Optional[float] = None) -> None:
|
| 31 |
+
if not buffer:
|
| 32 |
+
return
|
| 33 |
+
|
| 34 |
+
exc_map: ExceptionMapping = {socket.timeout: WriteTimeout, OSError: WriteError}
|
| 35 |
+
with map_exceptions(exc_map):
|
| 36 |
+
while buffer:
|
| 37 |
+
self._sock.settimeout(timeout)
|
| 38 |
+
n = self._sock.send(buffer)
|
| 39 |
+
buffer = buffer[n:]
|
| 40 |
+
|
| 41 |
+
def close(self) -> None:
|
| 42 |
+
self._sock.close()
|
| 43 |
+
|
| 44 |
+
def start_tls(
|
| 45 |
+
self,
|
| 46 |
+
ssl_context: ssl.SSLContext,
|
| 47 |
+
server_hostname: typing.Optional[str] = None,
|
| 48 |
+
timeout: typing.Optional[float] = None,
|
| 49 |
+
) -> NetworkStream:
|
| 50 |
+
exc_map: ExceptionMapping = {
|
| 51 |
+
socket.timeout: ConnectTimeout,
|
| 52 |
+
OSError: ConnectError,
|
| 53 |
+
}
|
| 54 |
+
with map_exceptions(exc_map):
|
| 55 |
+
try:
|
| 56 |
+
self._sock.settimeout(timeout)
|
| 57 |
+
sock = ssl_context.wrap_socket(
|
| 58 |
+
self._sock, server_hostname=server_hostname
|
| 59 |
+
)
|
| 60 |
+
except Exception as exc: # pragma: nocover
|
| 61 |
+
self.close()
|
| 62 |
+
raise exc
|
| 63 |
+
return SyncStream(sock)
|
| 64 |
+
|
| 65 |
+
def get_extra_info(self, info: str) -> typing.Any:
|
| 66 |
+
if info == "ssl_object" and isinstance(self._sock, ssl.SSLSocket):
|
| 67 |
+
return self._sock._sslobj # type: ignore
|
| 68 |
+
if info == "client_addr":
|
| 69 |
+
return self._sock.getsockname()
|
| 70 |
+
if info == "server_addr":
|
| 71 |
+
return self._sock.getpeername()
|
| 72 |
+
if info == "socket":
|
| 73 |
+
return self._sock
|
| 74 |
+
if info == "is_readable":
|
| 75 |
+
return is_socket_readable(self._sock)
|
| 76 |
+
return None
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class SyncBackend(NetworkBackend):
|
| 80 |
+
def connect_tcp(
|
| 81 |
+
self,
|
| 82 |
+
host: str,
|
| 83 |
+
port: int,
|
| 84 |
+
timeout: typing.Optional[float] = None,
|
| 85 |
+
local_address: typing.Optional[str] = None,
|
| 86 |
+
socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
|
| 87 |
+
) -> NetworkStream:
|
| 88 |
+
# Note that we automatically include `TCP_NODELAY`
|
| 89 |
+
# in addition to any other custom socket options.
|
| 90 |
+
if socket_options is None:
|
| 91 |
+
socket_options = [] # pragma: no cover
|
| 92 |
+
address = (host, port)
|
| 93 |
+
source_address = None if local_address is None else (local_address, 0)
|
| 94 |
+
exc_map: ExceptionMapping = {
|
| 95 |
+
socket.timeout: ConnectTimeout,
|
| 96 |
+
OSError: ConnectError,
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
with map_exceptions(exc_map):
|
| 100 |
+
sock = socket.create_connection(
|
| 101 |
+
address,
|
| 102 |
+
timeout,
|
| 103 |
+
source_address=source_address,
|
| 104 |
+
)
|
| 105 |
+
for option in socket_options:
|
| 106 |
+
sock.setsockopt(*option) # pragma: no cover
|
| 107 |
+
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
| 108 |
+
return SyncStream(sock)
|
| 109 |
+
|
| 110 |
+
def connect_unix_socket(
|
| 111 |
+
self,
|
| 112 |
+
path: str,
|
| 113 |
+
timeout: typing.Optional[float] = None,
|
| 114 |
+
socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
|
| 115 |
+
) -> NetworkStream: # pragma: nocover
|
| 116 |
+
if sys.platform == "win32":
|
| 117 |
+
raise RuntimeError(
|
| 118 |
+
"Attempted to connect to a UNIX socket on a Windows system."
|
| 119 |
+
)
|
| 120 |
+
if socket_options is None:
|
| 121 |
+
socket_options = []
|
| 122 |
+
|
| 123 |
+
exc_map: ExceptionMapping = {
|
| 124 |
+
socket.timeout: ConnectTimeout,
|
| 125 |
+
OSError: ConnectError,
|
| 126 |
+
}
|
| 127 |
+
with map_exceptions(exc_map):
|
| 128 |
+
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
| 129 |
+
for option in socket_options:
|
| 130 |
+
sock.setsockopt(*option)
|
| 131 |
+
sock.settimeout(timeout)
|
| 132 |
+
sock.connect(path)
|
| 133 |
+
return SyncStream(sock)
|