ZTWHHH commited on
Commit
9c40a2c
·
verified ·
1 Parent(s): ee89772

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. parrot/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-310.pyc +3 -0
  3. parrot/lib/python3.10/site-packages/numpy/random/tests/data/sfc64_np126.pkl.gz +3 -0
  4. parrot/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py +795 -0
  5. parrot/lib/python3.10/site-packages/scipy/stats/_common.py +5 -0
  6. parrot/lib/python3.10/site-packages/scipy/stats/_constants.py +39 -0
  7. parrot/lib/python3.10/site-packages/scipy/stats/_continuous_distns.py +0 -0
  8. parrot/lib/python3.10/site-packages/scipy/stats/_covariance.py +633 -0
  9. parrot/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py +0 -0
  10. parrot/lib/python3.10/site-packages/scipy/stats/_mgc.py +550 -0
  11. parrot/lib/python3.10/site-packages/scipy/stats/_mstats_extras.py +521 -0
  12. parrot/lib/python3.10/site-packages/scipy/stats/_qmc.py +0 -0
  13. parrot/lib/python3.10/site-packages/scipy/stats/_qmvnt.py +533 -0
  14. parrot/lib/python3.10/site-packages/scipy/stats/_rvs_sampling.py +56 -0
  15. parrot/lib/python3.10/site-packages/scipy/stats/_sensitivity_analysis.py +712 -0
  16. parrot/lib/python3.10/site-packages/scipy/stats/_sobol.pyi +54 -0
  17. parrot/lib/python3.10/site-packages/scipy/stats/_stats_py.py +0 -0
  18. parrot/lib/python3.10/site-packages/scipy/stats/_survival.py +686 -0
  19. parrot/lib/python3.10/site-packages/scipy/stats/contingency.py +468 -0
  20. parrot/lib/python3.10/site-packages/scipy/stats/tests/__init__.py +0 -0
  21. parrot/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multicomp.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/scipy/stats/tests/common_tests.py +354 -0
  26. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_axis_nan_policy.py +1290 -0
  27. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_binned_statistic.py +568 -0
  28. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_censored_data.py +152 -0
  29. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_contingency.py +241 -0
  30. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_basic.py +1046 -0
  31. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_fit_censored.py +683 -0
  32. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_crosstab.py +115 -0
  33. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_basic.py +563 -0
  34. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_distns.py +648 -0
  35. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_distributions.py +0 -0
  36. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_entropy.py +304 -0
  37. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_fast_gen_inversion.py +432 -0
  38. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_fit.py +1038 -0
  39. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_hypotests.py +1857 -0
  40. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_kdeoth.py +608 -0
  41. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_mgc.py +217 -0
  42. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_morestats.py +0 -0
  43. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_mstats_basic.py +2066 -0
  44. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_multicomp.py +404 -0
  45. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_odds_ratio.py +148 -0
  46. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_rank.py +338 -0
  47. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_relative_risk.py +95 -0
  48. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_sampling.py +1447 -0
  49. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_sensitivity_analysis.py +301 -0
  50. parrot/lib/python3.10/site-packages/scipy/stats/tests/test_survival.py +470 -0
.gitattributes CHANGED
@@ -1444,3 +1444,4 @@ parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cp
1444
  vllm/lib/python3.10/site-packages/pyzmq.libs/libzmq-a430b4ce.so.5.2.5 filter=lfs diff=lfs merge=lfs -text
1445
  vllm/lib/python3.10/site-packages/pyzmq.libs/libsodium-1b1f72d5.so.26.1.0 filter=lfs diff=lfs merge=lfs -text
1446
  vllm/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
1444
  vllm/lib/python3.10/site-packages/pyzmq.libs/libzmq-a430b4ce.so.5.2.5 filter=lfs diff=lfs merge=lfs -text
1445
  vllm/lib/python3.10/site-packages/pyzmq.libs/libsodium-1b1f72d5.so.26.1.0 filter=lfs diff=lfs merge=lfs -text
1446
  vllm/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1447
+ parrot/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:277a3dc2935eac528151d2d17cc91998c95d164592ca754affa764751faa3691
3
+ size 100837
parrot/lib/python3.10/site-packages/numpy/random/tests/data/sfc64_np126.pkl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3156b5ca5172ec350f81404afa821e292755978518122377019ec6dec773cdac
3
+ size 290
parrot/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py ADDED
@@ -0,0 +1,795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ from warnings import catch_warnings, simplefilter
3
+ import numpy as np
4
+ from operator import index
5
+ from collections import namedtuple
6
+
7
+ __all__ = ['binned_statistic',
8
+ 'binned_statistic_2d',
9
+ 'binned_statistic_dd']
10
+
11
+
12
+ BinnedStatisticResult = namedtuple('BinnedStatisticResult',
13
+ ('statistic', 'bin_edges', 'binnumber'))
14
+
15
+
16
+ def binned_statistic(x, values, statistic='mean',
17
+ bins=10, range=None):
18
+ """
19
+ Compute a binned statistic for one or more sets of data.
20
+
21
+ This is a generalization of a histogram function. A histogram divides
22
+ the space into bins, and returns the count of the number of points in
23
+ each bin. This function allows the computation of the sum, mean, median,
24
+ or other statistic of the values (or set of values) within each bin.
25
+
26
+ Parameters
27
+ ----------
28
+ x : (N,) array_like
29
+ A sequence of values to be binned.
30
+ values : (N,) array_like or list of (N,) array_like
31
+ The data on which the statistic will be computed. This must be
32
+ the same shape as `x`, or a set of sequences - each the same shape as
33
+ `x`. If `values` is a set of sequences, the statistic will be computed
34
+ on each independently.
35
+ statistic : string or callable, optional
36
+ The statistic to compute (default is 'mean').
37
+ The following statistics are available:
38
+
39
+ * 'mean' : compute the mean of values for points within each bin.
40
+ Empty bins will be represented by NaN.
41
+ * 'std' : compute the standard deviation within each bin. This
42
+ is implicitly calculated with ddof=0.
43
+ * 'median' : compute the median of values for points within each
44
+ bin. Empty bins will be represented by NaN.
45
+ * 'count' : compute the count of points within each bin. This is
46
+ identical to an unweighted histogram. `values` array is not
47
+ referenced.
48
+ * 'sum' : compute the sum of values for points within each bin.
49
+ This is identical to a weighted histogram.
50
+ * 'min' : compute the minimum of values for points within each bin.
51
+ Empty bins will be represented by NaN.
52
+ * 'max' : compute the maximum of values for point within each bin.
53
+ Empty bins will be represented by NaN.
54
+ * function : a user-defined function which takes a 1D array of
55
+ values, and outputs a single numerical statistic. This function
56
+ will be called on the values in each bin. Empty bins will be
57
+ represented by function([]), or NaN if this returns an error.
58
+
59
+ bins : int or sequence of scalars, optional
60
+ If `bins` is an int, it defines the number of equal-width bins in the
61
+ given range (10 by default). If `bins` is a sequence, it defines the
62
+ bin edges, including the rightmost edge, allowing for non-uniform bin
63
+ widths. Values in `x` that are smaller than lowest bin edge are
64
+ assigned to bin number 0, values beyond the highest bin are assigned to
65
+ ``bins[-1]``. If the bin edges are specified, the number of bins will
66
+ be, (nx = len(bins)-1).
67
+ range : (float, float) or [(float, float)], optional
68
+ The lower and upper range of the bins. If not provided, range
69
+ is simply ``(x.min(), x.max())``. Values outside the range are
70
+ ignored.
71
+
72
+ Returns
73
+ -------
74
+ statistic : array
75
+ The values of the selected statistic in each bin.
76
+ bin_edges : array of dtype float
77
+ Return the bin edges ``(length(statistic)+1)``.
78
+ binnumber: 1-D ndarray of ints
79
+ Indices of the bins (corresponding to `bin_edges`) in which each value
80
+ of `x` belongs. Same length as `values`. A binnumber of `i` means the
81
+ corresponding value is between (bin_edges[i-1], bin_edges[i]).
82
+
83
+ See Also
84
+ --------
85
+ numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
86
+
87
+ Notes
88
+ -----
89
+ All but the last (righthand-most) bin is half-open. In other words, if
90
+ `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
91
+ but excluding 2) and the second ``[2, 3)``. The last bin, however, is
92
+ ``[3, 4]``, which *includes* 4.
93
+
94
+ .. versionadded:: 0.11.0
95
+
96
+ Examples
97
+ --------
98
+ >>> import numpy as np
99
+ >>> from scipy import stats
100
+ >>> import matplotlib.pyplot as plt
101
+
102
+ First some basic examples:
103
+
104
+ Create two evenly spaced bins in the range of the given sample, and sum the
105
+ corresponding values in each of those bins:
106
+
107
+ >>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
108
+ >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
109
+ BinnedStatisticResult(statistic=array([4. , 4.5]),
110
+ bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))
111
+
112
+ Multiple arrays of values can also be passed. The statistic is calculated
113
+ on each set independently:
114
+
115
+ >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
116
+ >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
117
+ BinnedStatisticResult(statistic=array([[4. , 4.5],
118
+ [8. , 9. ]]), bin_edges=array([1., 4., 7.]),
119
+ binnumber=array([1, 1, 1, 2, 2]))
120
+
121
+ >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
122
+ ... bins=3)
123
+ BinnedStatisticResult(statistic=array([1., 2., 4.]),
124
+ bin_edges=array([1., 2., 3., 4.]),
125
+ binnumber=array([1, 2, 1, 2, 3]))
126
+
127
+ As a second example, we now generate some random data of sailing boat speed
128
+ as a function of wind speed, and then determine how fast our boat is for
129
+ certain wind speeds:
130
+
131
+ >>> rng = np.random.default_rng()
132
+ >>> windspeed = 8 * rng.random(500)
133
+ >>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500)
134
+ >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
135
+ ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
136
+ >>> plt.figure()
137
+ >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
138
+ >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
139
+ ... label='binned statistic of data')
140
+ >>> plt.legend()
141
+
142
+ Now we can use ``binnumber`` to select all datapoints with a windspeed
143
+ below 1:
144
+
145
+ >>> low_boatspeed = boatspeed[binnumber == 0]
146
+
147
+ As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
148
+ plot of a distribution that shows the mean and distribution around that
149
+ mean per bin, on top of a regular histogram and the probability
150
+ distribution function:
151
+
152
+ >>> x = np.linspace(0, 5, num=500)
153
+ >>> x_pdf = stats.maxwell.pdf(x)
154
+ >>> samples = stats.maxwell.rvs(size=10000)
155
+
156
+ >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
157
+ ... statistic='mean', bins=25)
158
+ >>> bin_width = (bin_edges[1] - bin_edges[0])
159
+ >>> bin_centers = bin_edges[1:] - bin_width/2
160
+
161
+ >>> plt.figure()
162
+ >>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
163
+ ... alpha=0.2, label='histogram of data')
164
+ >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
165
+ >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
166
+ ... label='binned statistic of data')
167
+ >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
168
+ >>> plt.legend(fontsize=10)
169
+ >>> plt.show()
170
+
171
+ """
172
+ try:
173
+ N = len(bins)
174
+ except TypeError:
175
+ N = 1
176
+
177
+ if N != 1:
178
+ bins = [np.asarray(bins, float)]
179
+
180
+ if range is not None:
181
+ if len(range) == 2:
182
+ range = [range]
183
+
184
+ medians, edges, binnumbers = binned_statistic_dd(
185
+ [x], values, statistic, bins, range)
186
+
187
+ return BinnedStatisticResult(medians, edges[0], binnumbers)
188
+
189
+
190
+ BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
191
+ ('statistic', 'x_edge', 'y_edge',
192
+ 'binnumber'))
193
+
194
+
195
+ def binned_statistic_2d(x, y, values, statistic='mean',
196
+ bins=10, range=None, expand_binnumbers=False):
197
+ """
198
+ Compute a bidimensional binned statistic for one or more sets of data.
199
+
200
+ This is a generalization of a histogram2d function. A histogram divides
201
+ the space into bins, and returns the count of the number of points in
202
+ each bin. This function allows the computation of the sum, mean, median,
203
+ or other statistic of the values (or set of values) within each bin.
204
+
205
+ Parameters
206
+ ----------
207
+ x : (N,) array_like
208
+ A sequence of values to be binned along the first dimension.
209
+ y : (N,) array_like
210
+ A sequence of values to be binned along the second dimension.
211
+ values : (N,) array_like or list of (N,) array_like
212
+ The data on which the statistic will be computed. This must be
213
+ the same shape as `x`, or a list of sequences - each with the same
214
+ shape as `x`. If `values` is such a list, the statistic will be
215
+ computed on each independently.
216
+ statistic : string or callable, optional
217
+ The statistic to compute (default is 'mean').
218
+ The following statistics are available:
219
+
220
+ * 'mean' : compute the mean of values for points within each bin.
221
+ Empty bins will be represented by NaN.
222
+ * 'std' : compute the standard deviation within each bin. This
223
+ is implicitly calculated with ddof=0.
224
+ * 'median' : compute the median of values for points within each
225
+ bin. Empty bins will be represented by NaN.
226
+ * 'count' : compute the count of points within each bin. This is
227
+ identical to an unweighted histogram. `values` array is not
228
+ referenced.
229
+ * 'sum' : compute the sum of values for points within each bin.
230
+ This is identical to a weighted histogram.
231
+ * 'min' : compute the minimum of values for points within each bin.
232
+ Empty bins will be represented by NaN.
233
+ * 'max' : compute the maximum of values for point within each bin.
234
+ Empty bins will be represented by NaN.
235
+ * function : a user-defined function which takes a 1D array of
236
+ values, and outputs a single numerical statistic. This function
237
+ will be called on the values in each bin. Empty bins will be
238
+ represented by function([]), or NaN if this returns an error.
239
+
240
+ bins : int or [int, int] or array_like or [array, array], optional
241
+ The bin specification:
242
+
243
+ * the number of bins for the two dimensions (nx = ny = bins),
244
+ * the number of bins in each dimension (nx, ny = bins),
245
+ * the bin edges for the two dimensions (x_edge = y_edge = bins),
246
+ * the bin edges in each dimension (x_edge, y_edge = bins).
247
+
248
+ If the bin edges are specified, the number of bins will be,
249
+ (nx = len(x_edge)-1, ny = len(y_edge)-1).
250
+
251
+ range : (2,2) array_like, optional
252
+ The leftmost and rightmost edges of the bins along each dimension
253
+ (if not specified explicitly in the `bins` parameters):
254
+ [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
255
+ considered outliers and not tallied in the histogram.
256
+ expand_binnumbers : bool, optional
257
+ 'False' (default): the returned `binnumber` is a shape (N,) array of
258
+ linearized bin indices.
259
+ 'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
260
+ ndarray, where each row gives the bin numbers in the corresponding
261
+ dimension.
262
+ See the `binnumber` returned value, and the `Examples` section.
263
+
264
+ .. versionadded:: 0.17.0
265
+
266
+ Returns
267
+ -------
268
+ statistic : (nx, ny) ndarray
269
+ The values of the selected statistic in each two-dimensional bin.
270
+ x_edge : (nx + 1) ndarray
271
+ The bin edges along the first dimension.
272
+ y_edge : (ny + 1) ndarray
273
+ The bin edges along the second dimension.
274
+ binnumber : (N,) array of ints or (2,N) ndarray of ints
275
+ This assigns to each element of `sample` an integer that represents the
276
+ bin in which this observation falls. The representation depends on the
277
+ `expand_binnumbers` argument. See `Notes` for details.
278
+
279
+
280
+ See Also
281
+ --------
282
+ numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
283
+
284
+ Notes
285
+ -----
286
+ Binedges:
287
+ All but the last (righthand-most) bin is half-open. In other words, if
288
+ `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
289
+ but excluding 2) and the second ``[2, 3)``. The last bin, however, is
290
+ ``[3, 4]``, which *includes* 4.
291
+
292
+ `binnumber`:
293
+ This returned argument assigns to each element of `sample` an integer that
294
+ represents the bin in which it belongs. The representation depends on the
295
+ `expand_binnumbers` argument. If 'False' (default): The returned
296
+ `binnumber` is a shape (N,) array of linearized indices mapping each
297
+ element of `sample` to its corresponding bin (using row-major ordering).
298
+ Note that the returned linearized bin indices are used for an array with
299
+ extra bins on the outer binedges to capture values outside of the defined
300
+ bin bounds.
301
+ If 'True': The returned `binnumber` is a shape (2,N) ndarray where
302
+ each row indicates bin placements for each dimension respectively. In each
303
+ dimension, a binnumber of `i` means the corresponding value is between
304
+ (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
305
+
306
+ .. versionadded:: 0.11.0
307
+
308
+ Examples
309
+ --------
310
+ >>> from scipy import stats
311
+
312
+ Calculate the counts with explicit bin-edges:
313
+
314
+ >>> x = [0.1, 0.1, 0.1, 0.6]
315
+ >>> y = [2.1, 2.6, 2.1, 2.1]
316
+ >>> binx = [0.0, 0.5, 1.0]
317
+ >>> biny = [2.0, 2.5, 3.0]
318
+ >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny])
319
+ >>> ret.statistic
320
+ array([[2., 1.],
321
+ [1., 0.]])
322
+
323
+ The bin in which each sample is placed is given by the `binnumber`
324
+ returned parameter. By default, these are the linearized bin indices:
325
+
326
+ >>> ret.binnumber
327
+ array([5, 6, 5, 9])
328
+
329
+ The bin indices can also be expanded into separate entries for each
330
+ dimension using the `expand_binnumbers` parameter:
331
+
332
+ >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny],
333
+ ... expand_binnumbers=True)
334
+ >>> ret.binnumber
335
+ array([[1, 1, 1, 2],
336
+ [1, 2, 1, 1]])
337
+
338
+ Which shows that the first three elements belong in the xbin 1, and the
339
+ fourth into xbin 2; and so on for y.
340
+
341
+ """
342
+
343
+ # This code is based on np.histogram2d
344
+ try:
345
+ N = len(bins)
346
+ except TypeError:
347
+ N = 1
348
+
349
+ if N != 1 and N != 2:
350
+ xedges = yedges = np.asarray(bins, float)
351
+ bins = [xedges, yedges]
352
+
353
+ medians, edges, binnumbers = binned_statistic_dd(
354
+ [x, y], values, statistic, bins, range,
355
+ expand_binnumbers=expand_binnumbers)
356
+
357
+ return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
358
+
359
+
360
+ BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
361
+ ('statistic', 'bin_edges',
362
+ 'binnumber'))
363
+
364
+
365
+ def _bincount(x, weights):
366
+ if np.iscomplexobj(weights):
367
+ a = np.bincount(x, np.real(weights))
368
+ b = np.bincount(x, np.imag(weights))
369
+ z = a + b*1j
370
+
371
+ else:
372
+ z = np.bincount(x, weights)
373
+ return z
374
+
375
+
376
+ def binned_statistic_dd(sample, values, statistic='mean',
377
+ bins=10, range=None, expand_binnumbers=False,
378
+ binned_statistic_result=None):
379
+ """
380
+ Compute a multidimensional binned statistic for a set of data.
381
+
382
+ This is a generalization of a histogramdd function. A histogram divides
383
+ the space into bins, and returns the count of the number of points in
384
+ each bin. This function allows the computation of the sum, mean, median,
385
+ or other statistic of the values within each bin.
386
+
387
+ Parameters
388
+ ----------
389
+ sample : array_like
390
+ Data to histogram passed as a sequence of N arrays of length D, or
391
+ as an (N,D) array.
392
+ values : (N,) array_like or list of (N,) array_like
393
+ The data on which the statistic will be computed. This must be
394
+ the same shape as `sample`, or a list of sequences - each with the
395
+ same shape as `sample`. If `values` is such a list, the statistic
396
+ will be computed on each independently.
397
+ statistic : string or callable, optional
398
+ The statistic to compute (default is 'mean').
399
+ The following statistics are available:
400
+
401
+ * 'mean' : compute the mean of values for points within each bin.
402
+ Empty bins will be represented by NaN.
403
+ * 'median' : compute the median of values for points within each
404
+ bin. Empty bins will be represented by NaN.
405
+ * 'count' : compute the count of points within each bin. This is
406
+ identical to an unweighted histogram. `values` array is not
407
+ referenced.
408
+ * 'sum' : compute the sum of values for points within each bin.
409
+ This is identical to a weighted histogram.
410
+ * 'std' : compute the standard deviation within each bin. This
411
+ is implicitly calculated with ddof=0. If the number of values
412
+ within a given bin is 0 or 1, the computed standard deviation value
413
+ will be 0 for the bin.
414
+ * 'min' : compute the minimum of values for points within each bin.
415
+ Empty bins will be represented by NaN.
416
+ * 'max' : compute the maximum of values for point within each bin.
417
+ Empty bins will be represented by NaN.
418
+ * function : a user-defined function which takes a 1D array of
419
+ values, and outputs a single numerical statistic. This function
420
+ will be called on the values in each bin. Empty bins will be
421
+ represented by function([]), or NaN if this returns an error.
422
+
423
+ bins : sequence or positive int, optional
424
+ The bin specification must be in one of the following forms:
425
+
426
+ * A sequence of arrays describing the bin edges along each dimension.
427
+ * The number of bins for each dimension (nx, ny, ... = bins).
428
+ * The number of bins for all dimensions (nx = ny = ... = bins).
429
+ range : sequence, optional
430
+ A sequence of lower and upper bin edges to be used if the edges are
431
+ not given explicitly in `bins`. Defaults to the minimum and maximum
432
+ values along each dimension.
433
+ expand_binnumbers : bool, optional
434
+ 'False' (default): the returned `binnumber` is a shape (N,) array of
435
+ linearized bin indices.
436
+ 'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
437
+ ndarray, where each row gives the bin numbers in the corresponding
438
+ dimension.
439
+ See the `binnumber` returned value, and the `Examples` section of
440
+ `binned_statistic_2d`.
441
+ binned_statistic_result : binnedStatisticddResult
442
+ Result of a previous call to the function in order to reuse bin edges
443
+ and bin numbers with new values and/or a different statistic.
444
+ To reuse bin numbers, `expand_binnumbers` must have been set to False
445
+ (the default)
446
+
447
+ .. versionadded:: 0.17.0
448
+
449
+ Returns
450
+ -------
451
+ statistic : ndarray, shape(nx1, nx2, nx3,...)
452
+ The values of the selected statistic in each two-dimensional bin.
453
+ bin_edges : list of ndarrays
454
+ A list of D arrays describing the (nxi + 1) bin edges for each
455
+ dimension.
456
+ binnumber : (N,) array of ints or (D,N) ndarray of ints
457
+ This assigns to each element of `sample` an integer that represents the
458
+ bin in which this observation falls. The representation depends on the
459
+ `expand_binnumbers` argument. See `Notes` for details.
460
+
461
+
462
+ See Also
463
+ --------
464
+ numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
465
+
466
+ Notes
467
+ -----
468
+ Binedges:
469
+ All but the last (righthand-most) bin is half-open in each dimension. In
470
+ other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
471
+ ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
472
+ last bin, however, is ``[3, 4]``, which *includes* 4.
473
+
474
+ `binnumber`:
475
+ This returned argument assigns to each element of `sample` an integer that
476
+ represents the bin in which it belongs. The representation depends on the
477
+ `expand_binnumbers` argument. If 'False' (default): The returned
478
+ `binnumber` is a shape (N,) array of linearized indices mapping each
479
+ element of `sample` to its corresponding bin (using row-major ordering).
480
+ If 'True': The returned `binnumber` is a shape (D,N) ndarray where
481
+ each row indicates bin placements for each dimension respectively. In each
482
+ dimension, a binnumber of `i` means the corresponding value is between
483
+ (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
484
+
485
+ .. versionadded:: 0.11.0
486
+
487
+ Examples
488
+ --------
489
+ >>> import numpy as np
490
+ >>> from scipy import stats
491
+ >>> import matplotlib.pyplot as plt
492
+ >>> from mpl_toolkits.mplot3d import Axes3D
493
+
494
+ Take an array of 600 (x, y) coordinates as an example.
495
+ `binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
496
+ of dimension `D+1` is required.
497
+
498
+ >>> mu = np.array([0., 1.])
499
+ >>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
500
+ >>> multinormal = stats.multivariate_normal(mu, sigma)
501
+ >>> data = multinormal.rvs(size=600, random_state=235412)
502
+ >>> data.shape
503
+ (600, 2)
504
+
505
+ Create bins and count how many arrays fall in each bin:
506
+
507
+ >>> N = 60
508
+ >>> x = np.linspace(-3, 3, N)
509
+ >>> y = np.linspace(-3, 4, N)
510
+ >>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
511
+ ... statistic='count')
512
+ >>> bincounts = ret.statistic
513
+
514
+ Set the volume and the location of bars:
515
+
516
+ >>> dx = x[1] - x[0]
517
+ >>> dy = y[1] - y[0]
518
+ >>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
519
+ >>> z = 0
520
+
521
+ >>> bincounts = bincounts.ravel()
522
+ >>> x = x.ravel()
523
+ >>> y = y.ravel()
524
+
525
+ >>> fig = plt.figure()
526
+ >>> ax = fig.add_subplot(111, projection='3d')
527
+ >>> with np.errstate(divide='ignore'): # silence random axes3d warning
528
+ ... ax.bar3d(x, y, z, dx, dy, bincounts)
529
+
530
+ Reuse bin numbers and bin edges with new values:
531
+
532
+ >>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),
533
+ ... binned_statistic_result=ret,
534
+ ... statistic='mean')
535
+ """
536
+ known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max']
537
+ if not callable(statistic) and statistic not in known_stats:
538
+ raise ValueError(f'invalid statistic {statistic!r}')
539
+
540
+ try:
541
+ bins = index(bins)
542
+ except TypeError:
543
+ # bins is not an integer
544
+ pass
545
+ # If bins was an integer-like object, now it is an actual Python int.
546
+
547
+ # NOTE: for _bin_edges(), see e.g. gh-11365
548
+ if isinstance(bins, int) and not np.isfinite(sample).all():
549
+ raise ValueError(f'{sample!r} contains non-finite values.')
550
+
551
+ # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
552
+ # `Dlen` is the length of elements along each dimension.
553
+ # This code is based on np.histogramdd
554
+ try:
555
+ # `sample` is an ND-array.
556
+ Dlen, Ndim = sample.shape
557
+ except (AttributeError, ValueError):
558
+ # `sample` is a sequence of 1D arrays.
559
+ sample = np.atleast_2d(sample).T
560
+ Dlen, Ndim = sample.shape
561
+
562
+ # Store initial shape of `values` to preserve it in the output
563
+ values = np.asarray(values)
564
+ input_shape = list(values.shape)
565
+ # Make sure that `values` is 2D to iterate over rows
566
+ values = np.atleast_2d(values)
567
+ Vdim, Vlen = values.shape
568
+
569
+ # Make sure `values` match `sample`
570
+ if statistic != 'count' and Vlen != Dlen:
571
+ raise AttributeError('The number of `values` elements must match the '
572
+ 'length of each `sample` dimension.')
573
+
574
+ try:
575
+ M = len(bins)
576
+ if M != Ndim:
577
+ raise AttributeError('The dimension of bins must be equal '
578
+ 'to the dimension of the sample x.')
579
+ except TypeError:
580
+ bins = Ndim * [bins]
581
+
582
+ if binned_statistic_result is None:
583
+ nbin, edges, dedges = _bin_edges(sample, bins, range)
584
+ binnumbers = _bin_numbers(sample, nbin, edges, dedges)
585
+ else:
586
+ edges = binned_statistic_result.bin_edges
587
+ nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)])
588
+ # +1 for outlier bins
589
+ dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)]
590
+ binnumbers = binned_statistic_result.binnumber
591
+
592
+ # Avoid overflow with double precision. Complex `values` -> `complex128`.
593
+ result_type = np.result_type(values, np.float64)
594
+ result = np.empty([Vdim, nbin.prod()], dtype=result_type)
595
+
596
+ if statistic in {'mean', np.mean}:
597
+ result.fill(np.nan)
598
+ flatcount = _bincount(binnumbers, None)
599
+ a = flatcount.nonzero()
600
+ for vv in builtins.range(Vdim):
601
+ flatsum = _bincount(binnumbers, values[vv])
602
+ result[vv, a] = flatsum[a] / flatcount[a]
603
+ elif statistic in {'std', np.std}:
604
+ result.fill(np.nan)
605
+ flatcount = _bincount(binnumbers, None)
606
+ a = flatcount.nonzero()
607
+ for vv in builtins.range(Vdim):
608
+ flatsum = _bincount(binnumbers, values[vv])
609
+ delta = values[vv] - flatsum[binnumbers] / flatcount[binnumbers]
610
+ std = np.sqrt(
611
+ _bincount(binnumbers, delta*np.conj(delta))[a] / flatcount[a]
612
+ )
613
+ result[vv, a] = std
614
+ result = np.real(result)
615
+ elif statistic == 'count':
616
+ result = np.empty([Vdim, nbin.prod()], dtype=np.float64)
617
+ result.fill(0)
618
+ flatcount = _bincount(binnumbers, None)
619
+ a = np.arange(len(flatcount))
620
+ result[:, a] = flatcount[np.newaxis, :]
621
+ elif statistic in {'sum', np.sum}:
622
+ result.fill(0)
623
+ for vv in builtins.range(Vdim):
624
+ flatsum = _bincount(binnumbers, values[vv])
625
+ a = np.arange(len(flatsum))
626
+ result[vv, a] = flatsum
627
+ elif statistic in {'median', np.median}:
628
+ result.fill(np.nan)
629
+ for vv in builtins.range(Vdim):
630
+ i = np.lexsort((values[vv], binnumbers))
631
+ _, j, counts = np.unique(binnumbers[i],
632
+ return_index=True, return_counts=True)
633
+ mid = j + (counts - 1) / 2
634
+ mid_a = values[vv, i][np.floor(mid).astype(int)]
635
+ mid_b = values[vv, i][np.ceil(mid).astype(int)]
636
+ medians = (mid_a + mid_b) / 2
637
+ result[vv, binnumbers[i][j]] = medians
638
+ elif statistic in {'min', np.min}:
639
+ result.fill(np.nan)
640
+ for vv in builtins.range(Vdim):
641
+ i = np.argsort(values[vv])[::-1] # Reversed so the min is last
642
+ result[vv, binnumbers[i]] = values[vv, i]
643
+ elif statistic in {'max', np.max}:
644
+ result.fill(np.nan)
645
+ for vv in builtins.range(Vdim):
646
+ i = np.argsort(values[vv])
647
+ result[vv, binnumbers[i]] = values[vv, i]
648
+ elif callable(statistic):
649
+ with np.errstate(invalid='ignore'), catch_warnings():
650
+ simplefilter("ignore", RuntimeWarning)
651
+ try:
652
+ null = statistic([])
653
+ except Exception:
654
+ null = np.nan
655
+ if np.iscomplexobj(null):
656
+ result = result.astype(np.complex128)
657
+ result.fill(null)
658
+ try:
659
+ _calc_binned_statistic(
660
+ Vdim, binnumbers, result, values, statistic
661
+ )
662
+ except ValueError:
663
+ result = result.astype(np.complex128)
664
+ _calc_binned_statistic(
665
+ Vdim, binnumbers, result, values, statistic
666
+ )
667
+
668
+ # Shape into a proper matrix
669
+ result = result.reshape(np.append(Vdim, nbin))
670
+
671
+ # Remove outliers (indices 0 and -1 for each bin-dimension).
672
+ core = tuple([slice(None)] + Ndim * [slice(1, -1)])
673
+ result = result[core]
674
+
675
+ # Unravel binnumbers into an ndarray, each row the bins for each dimension
676
+ if expand_binnumbers and Ndim > 1:
677
+ binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
678
+
679
+ if np.any(result.shape[1:] != nbin - 2):
680
+ raise RuntimeError('Internal Shape Error')
681
+
682
+ # Reshape to have output (`result`) match input (`values`) shape
683
+ result = result.reshape(input_shape[:-1] + list(nbin-2))
684
+
685
+ return BinnedStatisticddResult(result, edges, binnumbers)
686
+
687
+
688
+ def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func):
689
+ unique_bin_numbers = np.unique(bin_numbers)
690
+ for vv in builtins.range(Vdim):
691
+ bin_map = _create_binned_data(bin_numbers, unique_bin_numbers,
692
+ values, vv)
693
+ for i in unique_bin_numbers:
694
+ stat = stat_func(np.array(bin_map[i]))
695
+ if np.iscomplexobj(stat) and not np.iscomplexobj(result):
696
+ raise ValueError("The statistic function returns complex ")
697
+ result[vv, i] = stat
698
+
699
+
700
+ def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv):
701
+ """ Create hashmap of bin ids to values in bins
702
+ key: bin number
703
+ value: list of binned data
704
+ """
705
+ bin_map = dict()
706
+ for i in unique_bin_numbers:
707
+ bin_map[i] = []
708
+ for i in builtins.range(len(bin_numbers)):
709
+ bin_map[bin_numbers[i]].append(values[vv, i])
710
+ return bin_map
711
+
712
+
713
+ def _bin_edges(sample, bins=None, range=None):
714
+ """ Create edge arrays
715
+ """
716
+ Dlen, Ndim = sample.shape
717
+
718
+ nbin = np.empty(Ndim, int) # Number of bins in each dimension
719
+ edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
720
+ dedges = Ndim * [None] # Spacing between edges (will be 2D array)
721
+
722
+ # Select range for each dimension
723
+ # Used only if number of bins is given.
724
+ if range is None:
725
+ smin = np.atleast_1d(np.array(sample.min(axis=0), float))
726
+ smax = np.atleast_1d(np.array(sample.max(axis=0), float))
727
+ else:
728
+ if len(range) != Ndim:
729
+ raise ValueError(
730
+ f"range given for {len(range)} dimensions; {Ndim} required")
731
+ smin = np.empty(Ndim)
732
+ smax = np.empty(Ndim)
733
+ for i in builtins.range(Ndim):
734
+ if range[i][1] < range[i][0]:
735
+ raise ValueError(
736
+ "In {}range, start must be <= stop".format(
737
+ f"dimension {i + 1} of " if Ndim > 1 else ""))
738
+ smin[i], smax[i] = range[i]
739
+
740
+ # Make sure the bins have a finite width.
741
+ for i in builtins.range(len(smin)):
742
+ if smin[i] == smax[i]:
743
+ smin[i] = smin[i] - .5
744
+ smax[i] = smax[i] + .5
745
+
746
+ # Preserve sample floating point precision in bin edges
747
+ edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating)
748
+ else float)
749
+
750
+ # Create edge arrays
751
+ for i in builtins.range(Ndim):
752
+ if np.isscalar(bins[i]):
753
+ nbin[i] = bins[i] + 2 # +2 for outlier bins
754
+ edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1,
755
+ dtype=edges_dtype)
756
+ else:
757
+ edges[i] = np.asarray(bins[i], edges_dtype)
758
+ nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
759
+ dedges[i] = np.diff(edges[i])
760
+
761
+ nbin = np.asarray(nbin)
762
+
763
+ return nbin, edges, dedges
764
+
765
+
766
+ def _bin_numbers(sample, nbin, edges, dedges):
767
+ """Compute the bin number each sample falls into, in each dimension
768
+ """
769
+ Dlen, Ndim = sample.shape
770
+
771
+ sampBin = [
772
+ np.digitize(sample[:, i], edges[i])
773
+ for i in range(Ndim)
774
+ ]
775
+
776
+ # Using `digitize`, values that fall on an edge are put in the right bin.
777
+ # For the rightmost bin, we want values equal to the right
778
+ # edge to be counted in the last bin, and not as an outlier.
779
+ for i in range(Ndim):
780
+ # Find the rounding precision
781
+ dedges_min = dedges[i].min()
782
+ if dedges_min == 0:
783
+ raise ValueError('The smallest edge difference is numerically 0.')
784
+ decimal = int(-np.log10(dedges_min)) + 6
785
+ # Find which points are on the rightmost edge.
786
+ on_edge = np.where((sample[:, i] >= edges[i][-1]) &
787
+ (np.around(sample[:, i], decimal) ==
788
+ np.around(edges[i][-1], decimal)))[0]
789
+ # Shift these points one bin to the left.
790
+ sampBin[i][on_edge] -= 1
791
+
792
+ # Compute the sample indices in the flattened statistic matrix.
793
+ binnumbers = np.ravel_multi_index(sampBin, nbin)
794
+
795
+ return binnumbers
parrot/lib/python3.10/site-packages/scipy/stats/_common.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+
3
+
4
+ ConfidenceInterval = namedtuple("ConfidenceInterval", ["low", "high"])
5
+ ConfidenceInterval. __doc__ = "Class for confidence intervals."
parrot/lib/python3.10/site-packages/scipy/stats/_constants.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Statistics-related constants.
3
+
4
+ """
5
+ import numpy as np
6
+
7
+
8
+ # The smallest representable positive number such that 1.0 + _EPS != 1.0.
9
+ _EPS = np.finfo(float).eps
10
+
11
+ # The largest [in magnitude] usable floating value.
12
+ _XMAX = np.finfo(float).max
13
+
14
+ # The log of the largest usable floating value; useful for knowing
15
+ # when exp(something) will overflow
16
+ _LOGXMAX = np.log(_XMAX)
17
+
18
+ # The smallest [in magnitude] usable (i.e. not subnormal) double precision
19
+ # floating value.
20
+ _XMIN = np.finfo(float).tiny
21
+
22
+ # The log of the smallest [in magnitude] usable (i.e not subnormal)
23
+ # double precision floating value.
24
+ _LOGXMIN = np.log(_XMIN)
25
+
26
+ # -special.psi(1)
27
+ _EULER = 0.577215664901532860606512090082402431042
28
+
29
+ # special.zeta(3, 1) Apery's constant
30
+ _ZETA3 = 1.202056903159594285399738161511449990765
31
+
32
+ # sqrt(pi)
33
+ _SQRT_PI = 1.772453850905516027298167483341145182798
34
+
35
+ # sqrt(2/pi)
36
+ _SQRT_2_OVER_PI = 0.7978845608028654
37
+
38
+ # log(sqrt(2/pi))
39
+ _LOG_SQRT_2_OVER_PI = -0.22579135264472744
parrot/lib/python3.10/site-packages/scipy/stats/_continuous_distns.py ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/scipy/stats/_covariance.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import cached_property
2
+
3
+ import numpy as np
4
+ from scipy import linalg
5
+ from scipy.stats import _multivariate
6
+
7
+
8
+ __all__ = ["Covariance"]
9
+
10
+
11
+ class Covariance:
12
+ """
13
+ Representation of a covariance matrix
14
+
15
+ Calculations involving covariance matrices (e.g. data whitening,
16
+ multivariate normal function evaluation) are often performed more
17
+ efficiently using a decomposition of the covariance matrix instead of the
18
+ covariance matrix itself. This class allows the user to construct an
19
+ object representing a covariance matrix using any of several
20
+ decompositions and perform calculations using a common interface.
21
+
22
+ .. note::
23
+
24
+ The `Covariance` class cannot be instantiated directly. Instead, use
25
+ one of the factory methods (e.g. `Covariance.from_diagonal`).
26
+
27
+ Examples
28
+ --------
29
+ The `Covariance` class is used by calling one of its
30
+ factory methods to create a `Covariance` object, then pass that
31
+ representation of the `Covariance` matrix as a shape parameter of a
32
+ multivariate distribution.
33
+
34
+ For instance, the multivariate normal distribution can accept an array
35
+ representing a covariance matrix:
36
+
37
+ >>> from scipy import stats
38
+ >>> import numpy as np
39
+ >>> d = [1, 2, 3]
40
+ >>> A = np.diag(d) # a diagonal covariance matrix
41
+ >>> x = [4, -2, 5] # a point of interest
42
+ >>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=A)
43
+ >>> dist.pdf(x)
44
+ 4.9595685102808205e-08
45
+
46
+ but the calculations are performed in a very generic way that does not
47
+ take advantage of any special properties of the covariance matrix. Because
48
+ our covariance matrix is diagonal, we can use ``Covariance.from_diagonal``
49
+ to create an object representing the covariance matrix, and
50
+ `multivariate_normal` can use this to compute the probability density
51
+ function more efficiently.
52
+
53
+ >>> cov = stats.Covariance.from_diagonal(d)
54
+ >>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=cov)
55
+ >>> dist.pdf(x)
56
+ 4.9595685102808205e-08
57
+
58
+ """
59
+ def __init__(self):
60
+ message = ("The `Covariance` class cannot be instantiated directly. "
61
+ "Please use one of the factory methods "
62
+ "(e.g. `Covariance.from_diagonal`).")
63
+ raise NotImplementedError(message)
64
+
65
+ @staticmethod
66
+ def from_diagonal(diagonal):
67
+ r"""
68
+ Return a representation of a covariance matrix from its diagonal.
69
+
70
+ Parameters
71
+ ----------
72
+ diagonal : array_like
73
+ The diagonal elements of a diagonal matrix.
74
+
75
+ Notes
76
+ -----
77
+ Let the diagonal elements of a diagonal covariance matrix :math:`D` be
78
+ stored in the vector :math:`d`.
79
+
80
+ When all elements of :math:`d` are strictly positive, whitening of a
81
+ data point :math:`x` is performed by computing
82
+ :math:`x \cdot d^{-1/2}`, where the inverse square root can be taken
83
+ element-wise.
84
+ :math:`\log\det{D}` is calculated as :math:`-2 \sum(\log{d})`,
85
+ where the :math:`\log` operation is performed element-wise.
86
+
87
+ This `Covariance` class supports singular covariance matrices. When
88
+ computing ``_log_pdet``, non-positive elements of :math:`d` are
89
+ ignored. Whitening is not well defined when the point to be whitened
90
+ does not lie in the span of the columns of the covariance matrix. The
91
+ convention taken here is to treat the inverse square root of
92
+ non-positive elements of :math:`d` as zeros.
93
+
94
+ Examples
95
+ --------
96
+ Prepare a symmetric positive definite covariance matrix ``A`` and a
97
+ data point ``x``.
98
+
99
+ >>> import numpy as np
100
+ >>> from scipy import stats
101
+ >>> rng = np.random.default_rng()
102
+ >>> n = 5
103
+ >>> A = np.diag(rng.random(n))
104
+ >>> x = rng.random(size=n)
105
+
106
+ Extract the diagonal from ``A`` and create the `Covariance` object.
107
+
108
+ >>> d = np.diag(A)
109
+ >>> cov = stats.Covariance.from_diagonal(d)
110
+
111
+ Compare the functionality of the `Covariance` object against a
112
+ reference implementations.
113
+
114
+ >>> res = cov.whiten(x)
115
+ >>> ref = np.diag(d**-0.5) @ x
116
+ >>> np.allclose(res, ref)
117
+ True
118
+ >>> res = cov.log_pdet
119
+ >>> ref = np.linalg.slogdet(A)[-1]
120
+ >>> np.allclose(res, ref)
121
+ True
122
+
123
+ """
124
+ return CovViaDiagonal(diagonal)
125
+
126
+ @staticmethod
127
+ def from_precision(precision, covariance=None):
128
+ r"""
129
+ Return a representation of a covariance from its precision matrix.
130
+
131
+ Parameters
132
+ ----------
133
+ precision : array_like
134
+ The precision matrix; that is, the inverse of a square, symmetric,
135
+ positive definite covariance matrix.
136
+ covariance : array_like, optional
137
+ The square, symmetric, positive definite covariance matrix. If not
138
+ provided, this may need to be calculated (e.g. to evaluate the
139
+ cumulative distribution function of
140
+ `scipy.stats.multivariate_normal`) by inverting `precision`.
141
+
142
+ Notes
143
+ -----
144
+ Let the covariance matrix be :math:`A`, its precision matrix be
145
+ :math:`P = A^{-1}`, and :math:`L` be the lower Cholesky factor such
146
+ that :math:`L L^T = P`.
147
+ Whitening of a data point :math:`x` is performed by computing
148
+ :math:`x^T L`. :math:`\log\det{A}` is calculated as
149
+ :math:`-2tr(\log{L})`, where the :math:`\log` operation is performed
150
+ element-wise.
151
+
152
+ This `Covariance` class does not support singular covariance matrices
153
+ because the precision matrix does not exist for a singular covariance
154
+ matrix.
155
+
156
+ Examples
157
+ --------
158
+ Prepare a symmetric positive definite precision matrix ``P`` and a
159
+ data point ``x``. (If the precision matrix is not already available,
160
+ consider the other factory methods of the `Covariance` class.)
161
+
162
+ >>> import numpy as np
163
+ >>> from scipy import stats
164
+ >>> rng = np.random.default_rng()
165
+ >>> n = 5
166
+ >>> P = rng.random(size=(n, n))
167
+ >>> P = P @ P.T # a precision matrix must be positive definite
168
+ >>> x = rng.random(size=n)
169
+
170
+ Create the `Covariance` object.
171
+
172
+ >>> cov = stats.Covariance.from_precision(P)
173
+
174
+ Compare the functionality of the `Covariance` object against
175
+ reference implementations.
176
+
177
+ >>> res = cov.whiten(x)
178
+ >>> ref = x @ np.linalg.cholesky(P)
179
+ >>> np.allclose(res, ref)
180
+ True
181
+ >>> res = cov.log_pdet
182
+ >>> ref = -np.linalg.slogdet(P)[-1]
183
+ >>> np.allclose(res, ref)
184
+ True
185
+
186
+ """
187
+ return CovViaPrecision(precision, covariance)
188
+
189
+ @staticmethod
190
+ def from_cholesky(cholesky):
191
+ r"""
192
+ Representation of a covariance provided via the (lower) Cholesky factor
193
+
194
+ Parameters
195
+ ----------
196
+ cholesky : array_like
197
+ The lower triangular Cholesky factor of the covariance matrix.
198
+
199
+ Notes
200
+ -----
201
+ Let the covariance matrix be :math:`A` and :math:`L` be the lower
202
+ Cholesky factor such that :math:`L L^T = A`.
203
+ Whitening of a data point :math:`x` is performed by computing
204
+ :math:`L^{-1} x`. :math:`\log\det{A}` is calculated as
205
+ :math:`2tr(\log{L})`, where the :math:`\log` operation is performed
206
+ element-wise.
207
+
208
+ This `Covariance` class does not support singular covariance matrices
209
+ because the Cholesky decomposition does not exist for a singular
210
+ covariance matrix.
211
+
212
+ Examples
213
+ --------
214
+ Prepare a symmetric positive definite covariance matrix ``A`` and a
215
+ data point ``x``.
216
+
217
+ >>> import numpy as np
218
+ >>> from scipy import stats
219
+ >>> rng = np.random.default_rng()
220
+ >>> n = 5
221
+ >>> A = rng.random(size=(n, n))
222
+ >>> A = A @ A.T # make the covariance symmetric positive definite
223
+ >>> x = rng.random(size=n)
224
+
225
+ Perform the Cholesky decomposition of ``A`` and create the
226
+ `Covariance` object.
227
+
228
+ >>> L = np.linalg.cholesky(A)
229
+ >>> cov = stats.Covariance.from_cholesky(L)
230
+
231
+ Compare the functionality of the `Covariance` object against
232
+ reference implementation.
233
+
234
+ >>> from scipy.linalg import solve_triangular
235
+ >>> res = cov.whiten(x)
236
+ >>> ref = solve_triangular(L, x, lower=True)
237
+ >>> np.allclose(res, ref)
238
+ True
239
+ >>> res = cov.log_pdet
240
+ >>> ref = np.linalg.slogdet(A)[-1]
241
+ >>> np.allclose(res, ref)
242
+ True
243
+
244
+ """
245
+ return CovViaCholesky(cholesky)
246
+
247
+ @staticmethod
248
+ def from_eigendecomposition(eigendecomposition):
249
+ r"""
250
+ Representation of a covariance provided via eigendecomposition
251
+
252
+ Parameters
253
+ ----------
254
+ eigendecomposition : sequence
255
+ A sequence (nominally a tuple) containing the eigenvalue and
256
+ eigenvector arrays as computed by `scipy.linalg.eigh` or
257
+ `numpy.linalg.eigh`.
258
+
259
+ Notes
260
+ -----
261
+ Let the covariance matrix be :math:`A`, let :math:`V` be matrix of
262
+ eigenvectors, and let :math:`W` be the diagonal matrix of eigenvalues
263
+ such that `V W V^T = A`.
264
+
265
+ When all of the eigenvalues are strictly positive, whitening of a
266
+ data point :math:`x` is performed by computing
267
+ :math:`x^T (V W^{-1/2})`, where the inverse square root can be taken
268
+ element-wise.
269
+ :math:`\log\det{A}` is calculated as :math:`tr(\log{W})`,
270
+ where the :math:`\log` operation is performed element-wise.
271
+
272
+ This `Covariance` class supports singular covariance matrices. When
273
+ computing ``_log_pdet``, non-positive eigenvalues are ignored.
274
+ Whitening is not well defined when the point to be whitened
275
+ does not lie in the span of the columns of the covariance matrix. The
276
+ convention taken here is to treat the inverse square root of
277
+ non-positive eigenvalues as zeros.
278
+
279
+ Examples
280
+ --------
281
+ Prepare a symmetric positive definite covariance matrix ``A`` and a
282
+ data point ``x``.
283
+
284
+ >>> import numpy as np
285
+ >>> from scipy import stats
286
+ >>> rng = np.random.default_rng()
287
+ >>> n = 5
288
+ >>> A = rng.random(size=(n, n))
289
+ >>> A = A @ A.T # make the covariance symmetric positive definite
290
+ >>> x = rng.random(size=n)
291
+
292
+ Perform the eigendecomposition of ``A`` and create the `Covariance`
293
+ object.
294
+
295
+ >>> w, v = np.linalg.eigh(A)
296
+ >>> cov = stats.Covariance.from_eigendecomposition((w, v))
297
+
298
+ Compare the functionality of the `Covariance` object against
299
+ reference implementations.
300
+
301
+ >>> res = cov.whiten(x)
302
+ >>> ref = x @ (v @ np.diag(w**-0.5))
303
+ >>> np.allclose(res, ref)
304
+ True
305
+ >>> res = cov.log_pdet
306
+ >>> ref = np.linalg.slogdet(A)[-1]
307
+ >>> np.allclose(res, ref)
308
+ True
309
+
310
+ """
311
+ return CovViaEigendecomposition(eigendecomposition)
312
+
313
+ def whiten(self, x):
314
+ """
315
+ Perform a whitening transformation on data.
316
+
317
+ "Whitening" ("white" as in "white noise", in which each frequency has
318
+ equal magnitude) transforms a set of random variables into a new set of
319
+ random variables with unit-diagonal covariance. When a whitening
320
+ transform is applied to a sample of points distributed according to
321
+ a multivariate normal distribution with zero mean, the covariance of
322
+ the transformed sample is approximately the identity matrix.
323
+
324
+ Parameters
325
+ ----------
326
+ x : array_like
327
+ An array of points. The last dimension must correspond with the
328
+ dimensionality of the space, i.e., the number of columns in the
329
+ covariance matrix.
330
+
331
+ Returns
332
+ -------
333
+ x_ : array_like
334
+ The transformed array of points.
335
+
336
+ References
337
+ ----------
338
+ .. [1] "Whitening Transformation". Wikipedia.
339
+ https://en.wikipedia.org/wiki/Whitening_transformation
340
+ .. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
341
+ coloring linear transformation". Transactions of VSB 18.2
342
+ (2018): 31-35. :doi:`10.31490/tces-2018-0013`
343
+
344
+ Examples
345
+ --------
346
+ >>> import numpy as np
347
+ >>> from scipy import stats
348
+ >>> rng = np.random.default_rng()
349
+ >>> n = 3
350
+ >>> A = rng.random(size=(n, n))
351
+ >>> cov_array = A @ A.T # make matrix symmetric positive definite
352
+ >>> precision = np.linalg.inv(cov_array)
353
+ >>> cov_object = stats.Covariance.from_precision(precision)
354
+ >>> x = rng.multivariate_normal(np.zeros(n), cov_array, size=(10000))
355
+ >>> x_ = cov_object.whiten(x)
356
+ >>> np.cov(x_, rowvar=False) # near-identity covariance
357
+ array([[0.97862122, 0.00893147, 0.02430451],
358
+ [0.00893147, 0.96719062, 0.02201312],
359
+ [0.02430451, 0.02201312, 0.99206881]])
360
+
361
+ """
362
+ return self._whiten(np.asarray(x))
363
+
364
+ def colorize(self, x):
365
+ """
366
+ Perform a colorizing transformation on data.
367
+
368
+ "Colorizing" ("color" as in "colored noise", in which different
369
+ frequencies may have different magnitudes) transforms a set of
370
+ uncorrelated random variables into a new set of random variables with
371
+ the desired covariance. When a coloring transform is applied to a
372
+ sample of points distributed according to a multivariate normal
373
+ distribution with identity covariance and zero mean, the covariance of
374
+ the transformed sample is approximately the covariance matrix used
375
+ in the coloring transform.
376
+
377
+ Parameters
378
+ ----------
379
+ x : array_like
380
+ An array of points. The last dimension must correspond with the
381
+ dimensionality of the space, i.e., the number of columns in the
382
+ covariance matrix.
383
+
384
+ Returns
385
+ -------
386
+ x_ : array_like
387
+ The transformed array of points.
388
+
389
+ References
390
+ ----------
391
+ .. [1] "Whitening Transformation". Wikipedia.
392
+ https://en.wikipedia.org/wiki/Whitening_transformation
393
+ .. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
394
+ coloring linear transformation". Transactions of VSB 18.2
395
+ (2018): 31-35. :doi:`10.31490/tces-2018-0013`
396
+
397
+ Examples
398
+ --------
399
+ >>> import numpy as np
400
+ >>> from scipy import stats
401
+ >>> rng = np.random.default_rng(1638083107694713882823079058616272161)
402
+ >>> n = 3
403
+ >>> A = rng.random(size=(n, n))
404
+ >>> cov_array = A @ A.T # make matrix symmetric positive definite
405
+ >>> cholesky = np.linalg.cholesky(cov_array)
406
+ >>> cov_object = stats.Covariance.from_cholesky(cholesky)
407
+ >>> x = rng.multivariate_normal(np.zeros(n), np.eye(n), size=(10000))
408
+ >>> x_ = cov_object.colorize(x)
409
+ >>> cov_data = np.cov(x_, rowvar=False)
410
+ >>> np.allclose(cov_data, cov_array, rtol=3e-2)
411
+ True
412
+ """
413
+ return self._colorize(np.asarray(x))
414
+
415
+ @property
416
+ def log_pdet(self):
417
+ """
418
+ Log of the pseudo-determinant of the covariance matrix
419
+ """
420
+ return np.array(self._log_pdet, dtype=float)[()]
421
+
422
+ @property
423
+ def rank(self):
424
+ """
425
+ Rank of the covariance matrix
426
+ """
427
+ return np.array(self._rank, dtype=int)[()]
428
+
429
+ @property
430
+ def covariance(self):
431
+ """
432
+ Explicit representation of the covariance matrix
433
+ """
434
+ return self._covariance
435
+
436
+ @property
437
+ def shape(self):
438
+ """
439
+ Shape of the covariance array
440
+ """
441
+ return self._shape
442
+
443
+ def _validate_matrix(self, A, name):
444
+ A = np.atleast_2d(A)
445
+ m, n = A.shape[-2:]
446
+ if m != n or A.ndim != 2 or not (np.issubdtype(A.dtype, np.integer) or
447
+ np.issubdtype(A.dtype, np.floating)):
448
+ message = (f"The input `{name}` must be a square, "
449
+ "two-dimensional array of real numbers.")
450
+ raise ValueError(message)
451
+ return A
452
+
453
+ def _validate_vector(self, A, name):
454
+ A = np.atleast_1d(A)
455
+ if A.ndim != 1 or not (np.issubdtype(A.dtype, np.integer) or
456
+ np.issubdtype(A.dtype, np.floating)):
457
+ message = (f"The input `{name}` must be a one-dimensional array "
458
+ "of real numbers.")
459
+ raise ValueError(message)
460
+ return A
461
+
462
+
463
+ class CovViaPrecision(Covariance):
464
+
465
+ def __init__(self, precision, covariance=None):
466
+ precision = self._validate_matrix(precision, 'precision')
467
+ if covariance is not None:
468
+ covariance = self._validate_matrix(covariance, 'covariance')
469
+ message = "`precision.shape` must equal `covariance.shape`."
470
+ if precision.shape != covariance.shape:
471
+ raise ValueError(message)
472
+
473
+ self._chol_P = np.linalg.cholesky(precision)
474
+ self._log_pdet = -2*np.log(np.diag(self._chol_P)).sum(axis=-1)
475
+ self._rank = precision.shape[-1] # must be full rank if invertible
476
+ self._precision = precision
477
+ self._cov_matrix = covariance
478
+ self._shape = precision.shape
479
+ self._allow_singular = False
480
+
481
+ def _whiten(self, x):
482
+ return x @ self._chol_P
483
+
484
+ @cached_property
485
+ def _covariance(self):
486
+ n = self._shape[-1]
487
+ return (linalg.cho_solve((self._chol_P, True), np.eye(n))
488
+ if self._cov_matrix is None else self._cov_matrix)
489
+
490
+ def _colorize(self, x):
491
+ return linalg.solve_triangular(self._chol_P.T, x.T, lower=False).T
492
+
493
+
494
+ def _dot_diag(x, d):
495
+ # If d were a full diagonal matrix, x @ d would always do what we want.
496
+ # Special treatment is needed for n-dimensional `d` in which each row
497
+ # includes only the diagonal elements of a covariance matrix.
498
+ return x * d if x.ndim < 2 else x * np.expand_dims(d, -2)
499
+
500
+
501
+ class CovViaDiagonal(Covariance):
502
+
503
+ def __init__(self, diagonal):
504
+ diagonal = self._validate_vector(diagonal, 'diagonal')
505
+
506
+ i_zero = diagonal <= 0
507
+ positive_diagonal = np.array(diagonal, dtype=np.float64)
508
+
509
+ positive_diagonal[i_zero] = 1 # ones don't affect determinant
510
+ self._log_pdet = np.sum(np.log(positive_diagonal), axis=-1)
511
+
512
+ psuedo_reciprocals = 1 / np.sqrt(positive_diagonal)
513
+ psuedo_reciprocals[i_zero] = 0
514
+
515
+ self._sqrt_diagonal = np.sqrt(diagonal)
516
+ self._LP = psuedo_reciprocals
517
+ self._rank = positive_diagonal.shape[-1] - i_zero.sum(axis=-1)
518
+ self._covariance = np.apply_along_axis(np.diag, -1, diagonal)
519
+ self._i_zero = i_zero
520
+ self._shape = self._covariance.shape
521
+ self._allow_singular = True
522
+
523
+ def _whiten(self, x):
524
+ return _dot_diag(x, self._LP)
525
+
526
+ def _colorize(self, x):
527
+ return _dot_diag(x, self._sqrt_diagonal)
528
+
529
+ def _support_mask(self, x):
530
+ """
531
+ Check whether x lies in the support of the distribution.
532
+ """
533
+ return ~np.any(_dot_diag(x, self._i_zero), axis=-1)
534
+
535
+
536
+ class CovViaCholesky(Covariance):
537
+
538
+ def __init__(self, cholesky):
539
+ L = self._validate_matrix(cholesky, 'cholesky')
540
+
541
+ self._factor = L
542
+ self._log_pdet = 2*np.log(np.diag(self._factor)).sum(axis=-1)
543
+ self._rank = L.shape[-1] # must be full rank for cholesky
544
+ self._shape = L.shape
545
+ self._allow_singular = False
546
+
547
+ @cached_property
548
+ def _covariance(self):
549
+ return self._factor @ self._factor.T
550
+
551
+ def _whiten(self, x):
552
+ res = linalg.solve_triangular(self._factor, x.T, lower=True).T
553
+ return res
554
+
555
+ def _colorize(self, x):
556
+ return x @ self._factor.T
557
+
558
+
559
+ class CovViaEigendecomposition(Covariance):
560
+
561
+ def __init__(self, eigendecomposition):
562
+ eigenvalues, eigenvectors = eigendecomposition
563
+ eigenvalues = self._validate_vector(eigenvalues, 'eigenvalues')
564
+ eigenvectors = self._validate_matrix(eigenvectors, 'eigenvectors')
565
+ message = ("The shapes of `eigenvalues` and `eigenvectors` "
566
+ "must be compatible.")
567
+ try:
568
+ eigenvalues = np.expand_dims(eigenvalues, -2)
569
+ eigenvectors, eigenvalues = np.broadcast_arrays(eigenvectors,
570
+ eigenvalues)
571
+ eigenvalues = eigenvalues[..., 0, :]
572
+ except ValueError:
573
+ raise ValueError(message)
574
+
575
+ i_zero = eigenvalues <= 0
576
+ positive_eigenvalues = np.array(eigenvalues, dtype=np.float64)
577
+
578
+ positive_eigenvalues[i_zero] = 1 # ones don't affect determinant
579
+ self._log_pdet = np.sum(np.log(positive_eigenvalues), axis=-1)
580
+
581
+ psuedo_reciprocals = 1 / np.sqrt(positive_eigenvalues)
582
+ psuedo_reciprocals[i_zero] = 0
583
+
584
+ self._LP = eigenvectors * psuedo_reciprocals
585
+ self._LA = eigenvectors * np.sqrt(eigenvalues)
586
+ self._rank = positive_eigenvalues.shape[-1] - i_zero.sum(axis=-1)
587
+ self._w = eigenvalues
588
+ self._v = eigenvectors
589
+ self._shape = eigenvectors.shape
590
+ self._null_basis = eigenvectors * i_zero
591
+ # This is only used for `_support_mask`, not to decide whether
592
+ # the covariance is singular or not.
593
+ self._eps = _multivariate._eigvalsh_to_eps(eigenvalues) * 10**3
594
+ self._allow_singular = True
595
+
596
+ def _whiten(self, x):
597
+ return x @ self._LP
598
+
599
+ def _colorize(self, x):
600
+ return x @ self._LA.T
601
+
602
+ @cached_property
603
+ def _covariance(self):
604
+ return (self._v * self._w) @ self._v.T
605
+
606
+ def _support_mask(self, x):
607
+ """
608
+ Check whether x lies in the support of the distribution.
609
+ """
610
+ residual = np.linalg.norm(x @ self._null_basis, axis=-1)
611
+ in_support = residual < self._eps
612
+ return in_support
613
+
614
+
615
+ class CovViaPSD(Covariance):
616
+ """
617
+ Representation of a covariance provided via an instance of _PSD
618
+ """
619
+
620
+ def __init__(self, psd):
621
+ self._LP = psd.U
622
+ self._log_pdet = psd.log_pdet
623
+ self._rank = psd.rank
624
+ self._covariance = psd._M
625
+ self._shape = psd._M.shape
626
+ self._psd = psd
627
+ self._allow_singular = False # by default
628
+
629
+ def _whiten(self, x):
630
+ return x @ self._LP
631
+
632
+ def _support_mask(self, x):
633
+ return self._psd._support_mask(x)
parrot/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/scipy/stats/_mgc.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import numpy as np
3
+
4
+ from scipy._lib._util import check_random_state, MapWrapper, rng_integers, _contains_nan
5
+ from scipy._lib._bunch import _make_tuple_bunch
6
+ from scipy.spatial.distance import cdist
7
+ from scipy.ndimage import _measurements
8
+
9
+ from ._stats import _local_correlations # type: ignore[import-not-found]
10
+ from . import distributions
11
+
12
+ __all__ = ['multiscale_graphcorr']
13
+
14
+ # FROM MGCPY: https://github.com/neurodata/mgcpy
15
+
16
+
17
+ class _ParallelP:
18
+ """Helper function to calculate parallel p-value."""
19
+
20
+ def __init__(self, x, y, random_states):
21
+ self.x = x
22
+ self.y = y
23
+ self.random_states = random_states
24
+
25
+ def __call__(self, index):
26
+ order = self.random_states[index].permutation(self.y.shape[0])
27
+ permy = self.y[order][:, order]
28
+
29
+ # calculate permuted stats, store in null distribution
30
+ perm_stat = _mgc_stat(self.x, permy)[0]
31
+
32
+ return perm_stat
33
+
34
+
35
+ def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):
36
+ r"""Helper function that calculates the p-value. See below for uses.
37
+
38
+ Parameters
39
+ ----------
40
+ x, y : ndarray
41
+ `x` and `y` have shapes `(n, p)` and `(n, q)`.
42
+ stat : float
43
+ The sample test statistic.
44
+ reps : int, optional
45
+ The number of replications used to estimate the null when using the
46
+ permutation test. The default is 1000 replications.
47
+ workers : int or map-like callable, optional
48
+ If `workers` is an int the population is subdivided into `workers`
49
+ sections and evaluated in parallel (uses
50
+ `multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
51
+ available to the Process. Alternatively supply a map-like callable,
52
+ such as `multiprocessing.Pool.map` for evaluating the population in
53
+ parallel. This evaluation is carried out as `workers(func, iterable)`.
54
+ Requires that `func` be pickleable.
55
+ random_state : {None, int, `numpy.random.Generator`,
56
+ `numpy.random.RandomState`}, optional
57
+
58
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
59
+ singleton is used.
60
+ If `seed` is an int, a new ``RandomState`` instance is used,
61
+ seeded with `seed`.
62
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
63
+ that instance is used.
64
+
65
+ Returns
66
+ -------
67
+ pvalue : float
68
+ The sample test p-value.
69
+ null_dist : list
70
+ The approximated null distribution.
71
+
72
+ """
73
+ # generate seeds for each rep (change to new parallel random number
74
+ # capabilities in numpy >= 1.17+)
75
+ random_state = check_random_state(random_state)
76
+ random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
77
+ size=4, dtype=np.uint32)) for _ in range(reps)]
78
+
79
+ # parallelizes with specified workers over number of reps and set seeds
80
+ parallelp = _ParallelP(x=x, y=y, random_states=random_states)
81
+ with MapWrapper(workers) as mapwrapper:
82
+ null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
83
+
84
+ # calculate p-value and significant permutation map through list
85
+ pvalue = (1 + (null_dist >= stat).sum()) / (1 + reps)
86
+
87
+ return pvalue, null_dist
88
+
89
+
90
+ def _euclidean_dist(x):
91
+ return cdist(x, x)
92
+
93
+
94
+ MGCResult = _make_tuple_bunch('MGCResult',
95
+ ['statistic', 'pvalue', 'mgc_dict'], [])
96
+
97
+
98
+ def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
99
+ workers=1, is_twosamp=False, random_state=None):
100
+ r"""Computes the Multiscale Graph Correlation (MGC) test statistic.
101
+
102
+ Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
103
+ one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
104
+ the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
105
+ called the "scale". A priori, however, it is not know which scales will be
106
+ most informative. So, MGC computes all distance pairs, and then efficiently
107
+ computes the distance correlations for all scales. The local correlations
108
+ illustrate which scales are relatively informative about the relationship.
109
+ The key, therefore, to successfully discover and decipher relationships
110
+ between disparate data modalities is to adaptively determine which scales
111
+ are the most informative, and the geometric implication for the most
112
+ informative scales. Doing so not only provides an estimate of whether the
113
+ modalities are related, but also provides insight into how the
114
+ determination was made. This is especially important in high-dimensional
115
+ data, where simple visualizations do not reveal relationships to the
116
+ unaided human eye. Characterizations of this implementation in particular
117
+ have been derived from and benchmarked within in [2]_.
118
+
119
+ Parameters
120
+ ----------
121
+ x, y : ndarray
122
+ If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
123
+ the number of samples and `p` and `q` are the number of dimensions,
124
+ then the MGC independence test will be run. Alternatively, ``x`` and
125
+ ``y`` can have shapes ``(n, n)`` if they are distance or similarity
126
+ matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
127
+ and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
128
+ two-sample MGC test will be run.
129
+ compute_distance : callable, optional
130
+ A function that computes the distance or similarity among the samples
131
+ within each data matrix. Set to ``None`` if ``x`` and ``y`` are
132
+ already distance matrices. The default uses the euclidean norm metric.
133
+ If you are calling a custom function, either create the distance
134
+ matrix before-hand or create a function of the form
135
+ ``compute_distance(x)`` where `x` is the data matrix for which
136
+ pairwise distances are calculated.
137
+ reps : int, optional
138
+ The number of replications used to estimate the null when using the
139
+ permutation test. The default is ``1000``.
140
+ workers : int or map-like callable, optional
141
+ If ``workers`` is an int the population is subdivided into ``workers``
142
+ sections and evaluated in parallel (uses ``multiprocessing.Pool
143
+ <multiprocessing>``). Supply ``-1`` to use all cores available to the
144
+ Process. Alternatively supply a map-like callable, such as
145
+ ``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
146
+ This evaluation is carried out as ``workers(func, iterable)``.
147
+ Requires that `func` be pickleable. The default is ``1``.
148
+ is_twosamp : bool, optional
149
+ If `True`, a two sample test will be run. If ``x`` and ``y`` have
150
+ shapes ``(n, p)`` and ``(m, p)``, this optional will be overridden and
151
+ set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
152
+ ``(n, p)`` and a two sample test is desired. The default is ``False``.
153
+ Note that this will not run if inputs are distance matrices.
154
+ random_state : {None, int, `numpy.random.Generator`,
155
+ `numpy.random.RandomState`}, optional
156
+
157
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
158
+ singleton is used.
159
+ If `seed` is an int, a new ``RandomState`` instance is used,
160
+ seeded with `seed`.
161
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
162
+ that instance is used.
163
+
164
+ Returns
165
+ -------
166
+ res : MGCResult
167
+ An object containing attributes:
168
+
169
+ statistic : float
170
+ The sample MGC test statistic within `[-1, 1]`.
171
+ pvalue : float
172
+ The p-value obtained via permutation.
173
+ mgc_dict : dict
174
+ Contains additional useful results:
175
+
176
+ - mgc_map : ndarray
177
+ A 2D representation of the latent geometry of the
178
+ relationship.
179
+ - opt_scale : (int, int)
180
+ The estimated optimal scale as a `(x, y)` pair.
181
+ - null_dist : list
182
+ The null distribution derived from the permuted matrices.
183
+
184
+ See Also
185
+ --------
186
+ pearsonr : Pearson correlation coefficient and p-value for testing
187
+ non-correlation.
188
+ kendalltau : Calculates Kendall's tau.
189
+ spearmanr : Calculates a Spearman rank-order correlation coefficient.
190
+
191
+ Notes
192
+ -----
193
+ A description of the process of MGC and applications on neuroscience data
194
+ can be found in [1]_. It is performed using the following steps:
195
+
196
+ #. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
197
+ modified to be mean zero columnwise. This results in two
198
+ :math:`n \times n` distance matrices :math:`A` and :math:`B` (the
199
+ centering and unbiased modification) [3]_.
200
+
201
+ #. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
202
+
203
+ * The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
204
+ are calculated for each property. Here, :math:`G_k (i, j)` indicates
205
+ the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
206
+ and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
207
+ the :math:`i`-th row of :math:`B`
208
+
209
+ * Let :math:`\circ` denotes the entry-wise matrix product, then local
210
+ correlations are summed and normalized using the following statistic:
211
+
212
+ .. math::
213
+
214
+ c^{kl} = \frac{\sum_{ij} A G_k B H_l}
215
+ {\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
216
+
217
+ #. The MGC test statistic is the smoothed optimal local correlation of
218
+ :math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
219
+ (which essentially set all isolated large correlations) as 0 and
220
+ connected large correlations the same as before, see [3]_.) MGC is,
221
+
222
+ .. math::
223
+
224
+ MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
225
+ \right)
226
+
227
+ The test statistic returns a value between :math:`(-1, 1)` since it is
228
+ normalized.
229
+
230
+ The p-value returned is calculated using a permutation test. This process
231
+ is completed by first randomly permuting :math:`y` to estimate the null
232
+ distribution and then calculating the probability of observing a test
233
+ statistic, under the null, at least as extreme as the observed test
234
+ statistic.
235
+
236
+ MGC requires at least 5 samples to run with reliable results. It can also
237
+ handle high-dimensional data sets.
238
+ In addition, by manipulating the input data matrices, the two-sample
239
+ testing problem can be reduced to the independence testing problem [4]_.
240
+ Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
241
+ :math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
242
+ follows:
243
+
244
+ .. math::
245
+
246
+ X = [U | V] \in \mathcal{R}^{p \times (n + m)}
247
+ Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
248
+
249
+ Then, the MGC statistic can be calculated as normal. This methodology can
250
+ be extended to similar tests such as distance correlation [4]_.
251
+
252
+ .. versionadded:: 1.4.0
253
+
254
+ References
255
+ ----------
256
+ .. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
257
+ Maggioni, M., & Shen, C. (2019). Discovering and deciphering
258
+ relationships across disparate data modalities. ELife.
259
+ .. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
260
+ Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
261
+ mgcpy: A Comprehensive High Dimensional Independence Testing Python
262
+ Package. :arXiv:`1907.02088`
263
+ .. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
264
+ correlation to multiscale graph correlation. Journal of the American
265
+ Statistical Association.
266
+ .. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
267
+ Distance and Kernel Methods for Hypothesis Testing.
268
+ :arXiv:`1806.05514`
269
+
270
+ Examples
271
+ --------
272
+ >>> import numpy as np
273
+ >>> from scipy.stats import multiscale_graphcorr
274
+ >>> x = np.arange(100)
275
+ >>> y = x
276
+ >>> res = multiscale_graphcorr(x, y)
277
+ >>> res.statistic, res.pvalue
278
+ (1.0, 0.001)
279
+
280
+ To run an unpaired two-sample test,
281
+
282
+ >>> x = np.arange(100)
283
+ >>> y = np.arange(79)
284
+ >>> res = multiscale_graphcorr(x, y)
285
+ >>> res.statistic, res.pvalue # doctest: +SKIP
286
+ (0.033258146255703246, 0.023)
287
+
288
+ or, if shape of the inputs are the same,
289
+
290
+ >>> x = np.arange(100)
291
+ >>> y = x
292
+ >>> res = multiscale_graphcorr(x, y, is_twosamp=True)
293
+ >>> res.statistic, res.pvalue # doctest: +SKIP
294
+ (-0.008021809890200488, 1.0)
295
+
296
+ """
297
+ if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
298
+ raise ValueError("x and y must be ndarrays")
299
+
300
+ # convert arrays of type (n,) to (n, 1)
301
+ if x.ndim == 1:
302
+ x = x[:, np.newaxis]
303
+ elif x.ndim != 2:
304
+ raise ValueError(f"Expected a 2-D array `x`, found shape {x.shape}")
305
+ if y.ndim == 1:
306
+ y = y[:, np.newaxis]
307
+ elif y.ndim != 2:
308
+ raise ValueError(f"Expected a 2-D array `y`, found shape {y.shape}")
309
+
310
+ nx, px = x.shape
311
+ ny, py = y.shape
312
+
313
+ # check for NaNs
314
+ _contains_nan(x, nan_policy='raise')
315
+ _contains_nan(y, nan_policy='raise')
316
+
317
+ # check for positive or negative infinity and raise error
318
+ if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
319
+ raise ValueError("Inputs contain infinities")
320
+
321
+ if nx != ny:
322
+ if px == py:
323
+ # reshape x and y for two sample testing
324
+ is_twosamp = True
325
+ else:
326
+ raise ValueError("Shape mismatch, x and y must have shape [n, p] "
327
+ "and [n, q] or have shape [n, p] and [m, p].")
328
+
329
+ if nx < 5 or ny < 5:
330
+ raise ValueError("MGC requires at least 5 samples to give reasonable "
331
+ "results.")
332
+
333
+ # convert x and y to float
334
+ x = x.astype(np.float64)
335
+ y = y.astype(np.float64)
336
+
337
+ # check if compute_distance_matrix if a callable()
338
+ if not callable(compute_distance) and compute_distance is not None:
339
+ raise ValueError("Compute_distance must be a function.")
340
+
341
+ # check if number of reps exists, integer, or > 0 (if under 1000 raises
342
+ # warning)
343
+ if not isinstance(reps, int) or reps < 0:
344
+ raise ValueError("Number of reps must be an integer greater than 0.")
345
+ elif reps < 1000:
346
+ msg = ("The number of replications is low (under 1000), and p-value "
347
+ "calculations may be unreliable. Use the p-value result, with "
348
+ "caution!")
349
+ warnings.warn(msg, RuntimeWarning, stacklevel=2)
350
+
351
+ if is_twosamp:
352
+ if compute_distance is None:
353
+ raise ValueError("Cannot run if inputs are distance matrices")
354
+ x, y = _two_sample_transform(x, y)
355
+
356
+ if compute_distance is not None:
357
+ # compute distance matrices for x and y
358
+ x = compute_distance(x)
359
+ y = compute_distance(y)
360
+
361
+ # calculate MGC stat
362
+ stat, stat_dict = _mgc_stat(x, y)
363
+ stat_mgc_map = stat_dict["stat_mgc_map"]
364
+ opt_scale = stat_dict["opt_scale"]
365
+
366
+ # calculate permutation MGC p-value
367
+ pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers,
368
+ random_state=random_state)
369
+
370
+ # save all stats (other than stat/p-value) in dictionary
371
+ mgc_dict = {"mgc_map": stat_mgc_map,
372
+ "opt_scale": opt_scale,
373
+ "null_dist": null_dist}
374
+
375
+ # create result object with alias for backward compatibility
376
+ res = MGCResult(stat, pvalue, mgc_dict)
377
+ res.stat = stat
378
+ return res
379
+
380
+
381
+ def _mgc_stat(distx, disty):
382
+ r"""Helper function that calculates the MGC stat. See above for use.
383
+
384
+ Parameters
385
+ ----------
386
+ distx, disty : ndarray
387
+ `distx` and `disty` have shapes `(n, p)` and `(n, q)` or
388
+ `(n, n)` and `(n, n)`
389
+ if distance matrices.
390
+
391
+ Returns
392
+ -------
393
+ stat : float
394
+ The sample MGC test statistic within `[-1, 1]`.
395
+ stat_dict : dict
396
+ Contains additional useful additional returns containing the following
397
+ keys:
398
+
399
+ - stat_mgc_map : ndarray
400
+ MGC-map of the statistics.
401
+ - opt_scale : (float, float)
402
+ The estimated optimal scale as a `(x, y)` pair.
403
+
404
+ """
405
+ # calculate MGC map and optimal scale
406
+ stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
407
+
408
+ n, m = stat_mgc_map.shape
409
+ if m == 1 or n == 1:
410
+ # the global scale at is the statistic calculated at maximial nearest
411
+ # neighbors. There is not enough local scale to search over, so
412
+ # default to global scale
413
+ stat = stat_mgc_map[m - 1][n - 1]
414
+ opt_scale = m * n
415
+ else:
416
+ samp_size = len(distx) - 1
417
+
418
+ # threshold to find connected region of significant local correlations
419
+ sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
420
+
421
+ # maximum within the significant region
422
+ stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
423
+
424
+ stat_dict = {"stat_mgc_map": stat_mgc_map,
425
+ "opt_scale": opt_scale}
426
+
427
+ return stat, stat_dict
428
+
429
+
430
+ def _threshold_mgc_map(stat_mgc_map, samp_size):
431
+ r"""
432
+ Finds a connected region of significance in the MGC-map by thresholding.
433
+
434
+ Parameters
435
+ ----------
436
+ stat_mgc_map : ndarray
437
+ All local correlations within `[-1,1]`.
438
+ samp_size : int
439
+ The sample size of original data.
440
+
441
+ Returns
442
+ -------
443
+ sig_connect : ndarray
444
+ A binary matrix with 1's indicating the significant region.
445
+
446
+ """
447
+ m, n = stat_mgc_map.shape
448
+
449
+ # 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
450
+ # with varying levels of performance. Threshold is based on a beta
451
+ # approximation.
452
+ per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
453
+ threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
454
+ threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
455
+
456
+ # the global scale at is the statistic calculated at maximial nearest
457
+ # neighbors. Threshold is the maximum on the global and local scales
458
+ threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
459
+
460
+ # find the largest connected component of significant correlations
461
+ sig_connect = stat_mgc_map > threshold
462
+ if np.sum(sig_connect) > 0:
463
+ sig_connect, _ = _measurements.label(sig_connect)
464
+ _, label_counts = np.unique(sig_connect, return_counts=True)
465
+
466
+ # skip the first element in label_counts, as it is count(zeros)
467
+ max_label = np.argmax(label_counts[1:]) + 1
468
+ sig_connect = sig_connect == max_label
469
+ else:
470
+ sig_connect = np.array([[False]])
471
+
472
+ return sig_connect
473
+
474
+
475
+ def _smooth_mgc_map(sig_connect, stat_mgc_map):
476
+ """Finds the smoothed maximal within the significant region R.
477
+
478
+ If area of R is too small it returns the last local correlation. Otherwise,
479
+ returns the maximum within significant_connected_region.
480
+
481
+ Parameters
482
+ ----------
483
+ sig_connect : ndarray
484
+ A binary matrix with 1's indicating the significant region.
485
+ stat_mgc_map : ndarray
486
+ All local correlations within `[-1, 1]`.
487
+
488
+ Returns
489
+ -------
490
+ stat : float
491
+ The sample MGC statistic within `[-1, 1]`.
492
+ opt_scale: (float, float)
493
+ The estimated optimal scale as an `(x, y)` pair.
494
+
495
+ """
496
+ m, n = stat_mgc_map.shape
497
+
498
+ # the global scale at is the statistic calculated at maximial nearest
499
+ # neighbors. By default, statistic and optimal scale are global.
500
+ stat = stat_mgc_map[m - 1][n - 1]
501
+ opt_scale = [m, n]
502
+
503
+ if np.linalg.norm(sig_connect) != 0:
504
+ # proceed only when the connected region's area is sufficiently large
505
+ # 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
506
+ # with varying levels of performance
507
+ if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
508
+ max_corr = max(stat_mgc_map[sig_connect])
509
+
510
+ # find all scales within significant_connected_region that maximize
511
+ # the local correlation
512
+ max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
513
+
514
+ if max_corr >= stat:
515
+ stat = max_corr
516
+
517
+ k, l = max_corr_index
518
+ one_d_indices = k * n + l # 2D to 1D indexing
519
+ k = np.max(one_d_indices) // n
520
+ l = np.max(one_d_indices) % n
521
+ opt_scale = [k+1, l+1] # adding 1s to match R indexing
522
+
523
+ return stat, opt_scale
524
+
525
+
526
+ def _two_sample_transform(u, v):
527
+ """Helper function that concatenates x and y for two sample MGC stat.
528
+
529
+ See above for use.
530
+
531
+ Parameters
532
+ ----------
533
+ u, v : ndarray
534
+ `u` and `v` have shapes `(n, p)` and `(m, p)`.
535
+
536
+ Returns
537
+ -------
538
+ x : ndarray
539
+ Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
540
+ `(2n, p)`.
541
+ y : ndarray
542
+ Label matrix for `x` where 0 refers to samples that comes from `u` and
543
+ 1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
544
+
545
+ """
546
+ nx = u.shape[0]
547
+ ny = v.shape[0]
548
+ x = np.concatenate([u, v], axis=0)
549
+ y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
550
+ return x, y
parrot/lib/python3.10/site-packages/scipy/stats/_mstats_extras.py ADDED
@@ -0,0 +1,521 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Additional statistics functions with support for masked arrays.
3
+
4
+ """
5
+
6
+ # Original author (2007): Pierre GF Gerard-Marchant
7
+
8
+
9
+ __all__ = ['compare_medians_ms',
10
+ 'hdquantiles', 'hdmedian', 'hdquantiles_sd',
11
+ 'idealfourths',
12
+ 'median_cihs','mjci','mquantiles_cimj',
13
+ 'rsh',
14
+ 'trimmed_mean_ci',]
15
+
16
+
17
+ import numpy as np
18
+ from numpy import float64, ndarray
19
+
20
+ import numpy.ma as ma
21
+ from numpy.ma import MaskedArray
22
+
23
+ from . import _mstats_basic as mstats
24
+
25
+ from scipy.stats.distributions import norm, beta, t, binom
26
+
27
+
28
+ def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,):
29
+ """
30
+ Computes quantile estimates with the Harrell-Davis method.
31
+
32
+ The quantile estimates are calculated as a weighted linear combination
33
+ of order statistics.
34
+
35
+ Parameters
36
+ ----------
37
+ data : array_like
38
+ Data array.
39
+ prob : sequence, optional
40
+ Sequence of probabilities at which to compute the quantiles.
41
+ axis : int or None, optional
42
+ Axis along which to compute the quantiles. If None, use a flattened
43
+ array.
44
+ var : bool, optional
45
+ Whether to return the variance of the estimate.
46
+
47
+ Returns
48
+ -------
49
+ hdquantiles : MaskedArray
50
+ A (p,) array of quantiles (if `var` is False), or a (2,p) array of
51
+ quantiles and variances (if `var` is True), where ``p`` is the
52
+ number of quantiles.
53
+
54
+ See Also
55
+ --------
56
+ hdquantiles_sd
57
+
58
+ Examples
59
+ --------
60
+ >>> import numpy as np
61
+ >>> from scipy.stats.mstats import hdquantiles
62
+ >>>
63
+ >>> # Sample data
64
+ >>> data = np.array([1.2, 2.5, 3.7, 4.0, 5.1, 6.3, 7.0, 8.2, 9.4])
65
+ >>>
66
+ >>> # Probabilities at which to compute quantiles
67
+ >>> probabilities = [0.25, 0.5, 0.75]
68
+ >>>
69
+ >>> # Compute Harrell-Davis quantile estimates
70
+ >>> quantile_estimates = hdquantiles(data, prob=probabilities)
71
+ >>>
72
+ >>> # Display the quantile estimates
73
+ >>> for i, quantile in enumerate(probabilities):
74
+ ... print(f"{int(quantile * 100)}th percentile: {quantile_estimates[i]}")
75
+ 25th percentile: 3.1505820231763066 # may vary
76
+ 50th percentile: 5.194344084883956
77
+ 75th percentile: 7.430626414674935
78
+
79
+ """
80
+ def _hd_1D(data,prob,var):
81
+ "Computes the HD quantiles for a 1D array. Returns nan for invalid data."
82
+ xsorted = np.squeeze(np.sort(data.compressed().view(ndarray)))
83
+ # Don't use length here, in case we have a numpy scalar
84
+ n = xsorted.size
85
+
86
+ hd = np.empty((2,len(prob)), float64)
87
+ if n < 2:
88
+ hd.flat = np.nan
89
+ if var:
90
+ return hd
91
+ return hd[0]
92
+
93
+ v = np.arange(n+1) / float(n)
94
+ betacdf = beta.cdf
95
+ for (i,p) in enumerate(prob):
96
+ _w = betacdf(v, (n+1)*p, (n+1)*(1-p))
97
+ w = _w[1:] - _w[:-1]
98
+ hd_mean = np.dot(w, xsorted)
99
+ hd[0,i] = hd_mean
100
+ #
101
+ hd[1,i] = np.dot(w, (xsorted-hd_mean)**2)
102
+ #
103
+ hd[0, prob == 0] = xsorted[0]
104
+ hd[0, prob == 1] = xsorted[-1]
105
+ if var:
106
+ hd[1, prob == 0] = hd[1, prob == 1] = np.nan
107
+ return hd
108
+ return hd[0]
109
+ # Initialization & checks
110
+ data = ma.array(data, copy=False, dtype=float64)
111
+ p = np.atleast_1d(np.asarray(prob))
112
+ # Computes quantiles along axis (or globally)
113
+ if (axis is None) or (data.ndim == 1):
114
+ result = _hd_1D(data, p, var)
115
+ else:
116
+ if data.ndim > 2:
117
+ raise ValueError("Array 'data' must be at most two dimensional, "
118
+ "but got data.ndim = %d" % data.ndim)
119
+ result = ma.apply_along_axis(_hd_1D, axis, data, p, var)
120
+
121
+ return ma.fix_invalid(result, copy=False)
122
+
123
+
124
+ def hdmedian(data, axis=-1, var=False):
125
+ """
126
+ Returns the Harrell-Davis estimate of the median along the given axis.
127
+
128
+ Parameters
129
+ ----------
130
+ data : ndarray
131
+ Data array.
132
+ axis : int, optional
133
+ Axis along which to compute the quantiles. If None, use a flattened
134
+ array.
135
+ var : bool, optional
136
+ Whether to return the variance of the estimate.
137
+
138
+ Returns
139
+ -------
140
+ hdmedian : MaskedArray
141
+ The median values. If ``var=True``, the variance is returned inside
142
+ the masked array. E.g. for a 1-D array the shape change from (1,) to
143
+ (2,).
144
+
145
+ """
146
+ result = hdquantiles(data,[0.5], axis=axis, var=var)
147
+ return result.squeeze()
148
+
149
+
150
+ def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None):
151
+ """
152
+ The standard error of the Harrell-Davis quantile estimates by jackknife.
153
+
154
+ Parameters
155
+ ----------
156
+ data : array_like
157
+ Data array.
158
+ prob : sequence, optional
159
+ Sequence of quantiles to compute.
160
+ axis : int, optional
161
+ Axis along which to compute the quantiles. If None, use a flattened
162
+ array.
163
+
164
+ Returns
165
+ -------
166
+ hdquantiles_sd : MaskedArray
167
+ Standard error of the Harrell-Davis quantile estimates.
168
+
169
+ See Also
170
+ --------
171
+ hdquantiles
172
+
173
+ """
174
+ def _hdsd_1D(data, prob):
175
+ "Computes the std error for 1D arrays."
176
+ xsorted = np.sort(data.compressed())
177
+ n = len(xsorted)
178
+
179
+ hdsd = np.empty(len(prob), float64)
180
+ if n < 2:
181
+ hdsd.flat = np.nan
182
+
183
+ vv = np.arange(n) / float(n-1)
184
+ betacdf = beta.cdf
185
+
186
+ for (i,p) in enumerate(prob):
187
+ _w = betacdf(vv, n*p, n*(1-p))
188
+ w = _w[1:] - _w[:-1]
189
+ # cumulative sum of weights and data points if
190
+ # ith point is left out for jackknife
191
+ mx_ = np.zeros_like(xsorted)
192
+ mx_[1:] = np.cumsum(w * xsorted[:-1])
193
+ # similar but from the right
194
+ mx_[:-1] += np.cumsum(w[::-1] * xsorted[:0:-1])[::-1]
195
+ hdsd[i] = np.sqrt(mx_.var() * (n - 1))
196
+ return hdsd
197
+
198
+ # Initialization & checks
199
+ data = ma.array(data, copy=False, dtype=float64)
200
+ p = np.atleast_1d(np.asarray(prob))
201
+ # Computes quantiles along axis (or globally)
202
+ if (axis is None):
203
+ result = _hdsd_1D(data, p)
204
+ else:
205
+ if data.ndim > 2:
206
+ raise ValueError("Array 'data' must be at most two dimensional, "
207
+ "but got data.ndim = %d" % data.ndim)
208
+ result = ma.apply_along_axis(_hdsd_1D, axis, data, p)
209
+
210
+ return ma.fix_invalid(result, copy=False).ravel()
211
+
212
+
213
+ def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True),
214
+ alpha=0.05, axis=None):
215
+ """
216
+ Selected confidence interval of the trimmed mean along the given axis.
217
+
218
+ Parameters
219
+ ----------
220
+ data : array_like
221
+ Input data.
222
+ limits : {None, tuple}, optional
223
+ None or a two item tuple.
224
+ Tuple of the percentages to cut on each side of the array, with respect
225
+ to the number of unmasked data, as floats between 0. and 1. If ``n``
226
+ is the number of unmasked data before trimming, then
227
+ (``n * limits[0]``)th smallest data and (``n * limits[1]``)th
228
+ largest data are masked. The total number of unmasked data after
229
+ trimming is ``n * (1. - sum(limits))``.
230
+ The value of one limit can be set to None to indicate an open interval.
231
+
232
+ Defaults to (0.2, 0.2).
233
+ inclusive : (2,) tuple of boolean, optional
234
+ If relative==False, tuple indicating whether values exactly equal to
235
+ the absolute limits are allowed.
236
+ If relative==True, tuple indicating whether the number of data being
237
+ masked on each side should be rounded (True) or truncated (False).
238
+
239
+ Defaults to (True, True).
240
+ alpha : float, optional
241
+ Confidence level of the intervals.
242
+
243
+ Defaults to 0.05.
244
+ axis : int, optional
245
+ Axis along which to cut. If None, uses a flattened version of `data`.
246
+
247
+ Defaults to None.
248
+
249
+ Returns
250
+ -------
251
+ trimmed_mean_ci : (2,) ndarray
252
+ The lower and upper confidence intervals of the trimmed data.
253
+
254
+ """
255
+ data = ma.array(data, copy=False)
256
+ trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis)
257
+ tmean = trimmed.mean(axis)
258
+ tstde = mstats.trimmed_stde(data,limits=limits,inclusive=inclusive,axis=axis)
259
+ df = trimmed.count(axis) - 1
260
+ tppf = t.ppf(1-alpha/2.,df)
261
+ return np.array((tmean - tppf*tstde, tmean+tppf*tstde))
262
+
263
+
264
+ def mjci(data, prob=[0.25,0.5,0.75], axis=None):
265
+ """
266
+ Returns the Maritz-Jarrett estimators of the standard error of selected
267
+ experimental quantiles of the data.
268
+
269
+ Parameters
270
+ ----------
271
+ data : ndarray
272
+ Data array.
273
+ prob : sequence, optional
274
+ Sequence of quantiles to compute.
275
+ axis : int or None, optional
276
+ Axis along which to compute the quantiles. If None, use a flattened
277
+ array.
278
+
279
+ """
280
+ def _mjci_1D(data, p):
281
+ data = np.sort(data.compressed())
282
+ n = data.size
283
+ prob = (np.array(p) * n + 0.5).astype(int)
284
+ betacdf = beta.cdf
285
+
286
+ mj = np.empty(len(prob), float64)
287
+ x = np.arange(1,n+1, dtype=float64) / n
288
+ y = x - 1./n
289
+ for (i,m) in enumerate(prob):
290
+ W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m)
291
+ C1 = np.dot(W,data)
292
+ C2 = np.dot(W,data**2)
293
+ mj[i] = np.sqrt(C2 - C1**2)
294
+ return mj
295
+
296
+ data = ma.array(data, copy=False)
297
+ if data.ndim > 2:
298
+ raise ValueError("Array 'data' must be at most two dimensional, "
299
+ "but got data.ndim = %d" % data.ndim)
300
+
301
+ p = np.atleast_1d(np.asarray(prob))
302
+ # Computes quantiles along axis (or globally)
303
+ if (axis is None):
304
+ return _mjci_1D(data, p)
305
+ else:
306
+ return ma.apply_along_axis(_mjci_1D, axis, data, p)
307
+
308
+
309
+ def mquantiles_cimj(data, prob=[0.25,0.50,0.75], alpha=0.05, axis=None):
310
+ """
311
+ Computes the alpha confidence interval for the selected quantiles of the
312
+ data, with Maritz-Jarrett estimators.
313
+
314
+ Parameters
315
+ ----------
316
+ data : ndarray
317
+ Data array.
318
+ prob : sequence, optional
319
+ Sequence of quantiles to compute.
320
+ alpha : float, optional
321
+ Confidence level of the intervals.
322
+ axis : int or None, optional
323
+ Axis along which to compute the quantiles.
324
+ If None, use a flattened array.
325
+
326
+ Returns
327
+ -------
328
+ ci_lower : ndarray
329
+ The lower boundaries of the confidence interval. Of the same length as
330
+ `prob`.
331
+ ci_upper : ndarray
332
+ The upper boundaries of the confidence interval. Of the same length as
333
+ `prob`.
334
+
335
+ """
336
+ alpha = min(alpha, 1 - alpha)
337
+ z = norm.ppf(1 - alpha/2.)
338
+ xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis)
339
+ smj = mjci(data, prob, axis=axis)
340
+ return (xq - z * smj, xq + z * smj)
341
+
342
+
343
+ def median_cihs(data, alpha=0.05, axis=None):
344
+ """
345
+ Computes the alpha-level confidence interval for the median of the data.
346
+
347
+ Uses the Hettmasperger-Sheather method.
348
+
349
+ Parameters
350
+ ----------
351
+ data : array_like
352
+ Input data. Masked values are discarded. The input should be 1D only,
353
+ or `axis` should be set to None.
354
+ alpha : float, optional
355
+ Confidence level of the intervals.
356
+ axis : int or None, optional
357
+ Axis along which to compute the quantiles. If None, use a flattened
358
+ array.
359
+
360
+ Returns
361
+ -------
362
+ median_cihs
363
+ Alpha level confidence interval.
364
+
365
+ """
366
+ def _cihs_1D(data, alpha):
367
+ data = np.sort(data.compressed())
368
+ n = len(data)
369
+ alpha = min(alpha, 1-alpha)
370
+ k = int(binom._ppf(alpha/2., n, 0.5))
371
+ gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
372
+ if gk < 1-alpha:
373
+ k -= 1
374
+ gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
375
+ gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5)
376
+ I = (gk - 1 + alpha)/(gk - gkk)
377
+ lambd = (n-k) * I / float(k + (n-2*k)*I)
378
+ lims = (lambd*data[k] + (1-lambd)*data[k-1],
379
+ lambd*data[n-k-1] + (1-lambd)*data[n-k])
380
+ return lims
381
+ data = ma.array(data, copy=False)
382
+ # Computes quantiles along axis (or globally)
383
+ if (axis is None):
384
+ result = _cihs_1D(data, alpha)
385
+ else:
386
+ if data.ndim > 2:
387
+ raise ValueError("Array 'data' must be at most two dimensional, "
388
+ "but got data.ndim = %d" % data.ndim)
389
+ result = ma.apply_along_axis(_cihs_1D, axis, data, alpha)
390
+
391
+ return result
392
+
393
+
394
+ def compare_medians_ms(group_1, group_2, axis=None):
395
+ """
396
+ Compares the medians from two independent groups along the given axis.
397
+
398
+ The comparison is performed using the McKean-Schrader estimate of the
399
+ standard error of the medians.
400
+
401
+ Parameters
402
+ ----------
403
+ group_1 : array_like
404
+ First dataset. Has to be of size >=7.
405
+ group_2 : array_like
406
+ Second dataset. Has to be of size >=7.
407
+ axis : int, optional
408
+ Axis along which the medians are estimated. If None, the arrays are
409
+ flattened. If `axis` is not None, then `group_1` and `group_2`
410
+ should have the same shape.
411
+
412
+ Returns
413
+ -------
414
+ compare_medians_ms : {float, ndarray}
415
+ If `axis` is None, then returns a float, otherwise returns a 1-D
416
+ ndarray of floats with a length equal to the length of `group_1`
417
+ along `axis`.
418
+
419
+ Examples
420
+ --------
421
+
422
+ >>> from scipy import stats
423
+ >>> a = [1, 2, 3, 4, 5, 6, 7]
424
+ >>> b = [8, 9, 10, 11, 12, 13, 14]
425
+ >>> stats.mstats.compare_medians_ms(a, b, axis=None)
426
+ 1.0693225866553746e-05
427
+
428
+ The function is vectorized to compute along a given axis.
429
+
430
+ >>> import numpy as np
431
+ >>> rng = np.random.default_rng()
432
+ >>> x = rng.random(size=(3, 7))
433
+ >>> y = rng.random(size=(3, 8))
434
+ >>> stats.mstats.compare_medians_ms(x, y, axis=1)
435
+ array([0.36908985, 0.36092538, 0.2765313 ])
436
+
437
+ References
438
+ ----------
439
+ .. [1] McKean, Joseph W., and Ronald M. Schrader. "A comparison of methods
440
+ for studentizing the sample median." Communications in
441
+ Statistics-Simulation and Computation 13.6 (1984): 751-773.
442
+
443
+ """
444
+ (med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
445
+ (std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
446
+ mstats.stde_median(group_2, axis=axis))
447
+ W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
448
+ return 1 - norm.cdf(W)
449
+
450
+
451
+ def idealfourths(data, axis=None):
452
+ """
453
+ Returns an estimate of the lower and upper quartiles.
454
+
455
+ Uses the ideal fourths algorithm.
456
+
457
+ Parameters
458
+ ----------
459
+ data : array_like
460
+ Input array.
461
+ axis : int, optional
462
+ Axis along which the quartiles are estimated. If None, the arrays are
463
+ flattened.
464
+
465
+ Returns
466
+ -------
467
+ idealfourths : {list of floats, masked array}
468
+ Returns the two internal values that divide `data` into four parts
469
+ using the ideal fourths algorithm either along the flattened array
470
+ (if `axis` is None) or along `axis` of `data`.
471
+
472
+ """
473
+ def _idf(data):
474
+ x = data.compressed()
475
+ n = len(x)
476
+ if n < 3:
477
+ return [np.nan,np.nan]
478
+ (j,h) = divmod(n/4. + 5/12.,1)
479
+ j = int(j)
480
+ qlo = (1-h)*x[j-1] + h*x[j]
481
+ k = n - j
482
+ qup = (1-h)*x[k] + h*x[k-1]
483
+ return [qlo, qup]
484
+ data = ma.sort(data, axis=axis).view(MaskedArray)
485
+ if (axis is None):
486
+ return _idf(data)
487
+ else:
488
+ return ma.apply_along_axis(_idf, axis, data)
489
+
490
+
491
+ def rsh(data, points=None):
492
+ """
493
+ Evaluates Rosenblatt's shifted histogram estimators for each data point.
494
+
495
+ Rosenblatt's estimator is a centered finite-difference approximation to the
496
+ derivative of the empirical cumulative distribution function.
497
+
498
+ Parameters
499
+ ----------
500
+ data : sequence
501
+ Input data, should be 1-D. Masked values are ignored.
502
+ points : sequence or None, optional
503
+ Sequence of points where to evaluate Rosenblatt shifted histogram.
504
+ If None, use the data.
505
+
506
+ """
507
+ data = ma.array(data, copy=False)
508
+ if points is None:
509
+ points = data
510
+ else:
511
+ points = np.atleast_1d(np.asarray(points))
512
+
513
+ if data.ndim != 1:
514
+ raise AttributeError("The input array should be 1D only !")
515
+
516
+ n = data.count()
517
+ r = idealfourths(data, axis=None)
518
+ h = 1.2 * (r[-1]-r[0]) / n**(1./5)
519
+ nhi = (data[:,None] <= points[None,:] + h).sum(0)
520
+ nlo = (data[:,None] < points[None,:] - h).sum(0)
521
+ return (nhi-nlo) / (2.*n*h)
parrot/lib/python3.10/site-packages/scipy/stats/_qmc.py ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/scipy/stats/_qmvnt.py ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Integration of multivariate normal and t distributions.
2
+
3
+ # Adapted from the MATLAB original implementations by Dr. Alan Genz.
4
+
5
+ # http://www.math.wsu.edu/faculty/genz/software/software.html
6
+
7
+ # Copyright (C) 2013, Alan Genz, All rights reserved.
8
+ # Python implementation is copyright (C) 2022, Robert Kern, All rights
9
+ # reserved.
10
+
11
+ # Redistribution and use in source and binary forms, with or without
12
+ # modification, are permitted provided the following conditions are met:
13
+ # 1. Redistributions of source code must retain the above copyright
14
+ # notice, this list of conditions and the following disclaimer.
15
+ # 2. Redistributions in binary form must reproduce the above copyright
16
+ # notice, this list of conditions and the following disclaimer in
17
+ # the documentation and/or other materials provided with the
18
+ # distribution.
19
+ # 3. The contributor name(s) may not be used to endorse or promote
20
+ # products derived from this software without specific prior
21
+ # written permission.
22
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23
+ # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24
+ # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25
+ # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26
+ # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27
+ # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28
+ # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
29
+ # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30
+ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
31
+ # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF USE
32
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
+
34
+
35
+ import numpy as np
36
+
37
+ from scipy.fft import fft, ifft
38
+ from scipy.special import gammaincinv, ndtr, ndtri
39
+ from scipy.stats._qmc import primes_from_2_to
40
+
41
+
42
+ phi = ndtr
43
+ phinv = ndtri
44
+
45
+
46
+ def _factorize_int(n):
47
+ """Return a sorted list of the unique prime factors of a positive integer.
48
+ """
49
+ # NOTE: There are lots faster ways to do this, but this isn't terrible.
50
+ factors = set()
51
+ for p in primes_from_2_to(int(np.sqrt(n)) + 1):
52
+ while not (n % p):
53
+ factors.add(p)
54
+ n //= p
55
+ if n == 1:
56
+ break
57
+ if n != 1:
58
+ factors.add(n)
59
+ return sorted(factors)
60
+
61
+
62
+ def _primitive_root(p):
63
+ """Compute a primitive root of the prime number `p`.
64
+
65
+ Used in the CBC lattice construction.
66
+
67
+ References
68
+ ----------
69
+ .. [1] https://en.wikipedia.org/wiki/Primitive_root_modulo_n
70
+ """
71
+ # p is prime
72
+ pm = p - 1
73
+ factors = _factorize_int(pm)
74
+ n = len(factors)
75
+ r = 2
76
+ k = 0
77
+ while k < n:
78
+ d = pm // factors[k]
79
+ # pow() doesn't like numpy scalar types.
80
+ rd = pow(int(r), int(d), int(p))
81
+ if rd == 1:
82
+ r += 1
83
+ k = 0
84
+ else:
85
+ k += 1
86
+ return r
87
+
88
+
89
+ def _cbc_lattice(n_dim, n_qmc_samples):
90
+ """Compute a QMC lattice generator using a Fast CBC construction.
91
+
92
+ Parameters
93
+ ----------
94
+ n_dim : int > 0
95
+ The number of dimensions for the lattice.
96
+ n_qmc_samples : int > 0
97
+ The desired number of QMC samples. This will be rounded down to the
98
+ nearest prime to enable the CBC construction.
99
+
100
+ Returns
101
+ -------
102
+ q : float array : shape=(n_dim,)
103
+ The lattice generator vector. All values are in the open interval
104
+ `(0, 1)`.
105
+ actual_n_qmc_samples : int
106
+ The prime number of QMC samples that must be used with this lattice,
107
+ no more, no less.
108
+
109
+ References
110
+ ----------
111
+ .. [1] Nuyens, D. and Cools, R. "Fast Component-by-Component Construction,
112
+ a Reprise for Different Kernels", In H. Niederreiter and D. Talay,
113
+ editors, Monte-Carlo and Quasi-Monte Carlo Methods 2004,
114
+ Springer-Verlag, 2006, 371-385.
115
+ """
116
+ # Round down to the nearest prime number.
117
+ primes = primes_from_2_to(n_qmc_samples + 1)
118
+ n_qmc_samples = primes[-1]
119
+
120
+ bt = np.ones(n_dim)
121
+ gm = np.hstack([1.0, 0.8 ** np.arange(n_dim - 1)])
122
+ q = 1
123
+ w = 0
124
+ z = np.arange(1, n_dim + 1)
125
+ m = (n_qmc_samples - 1) // 2
126
+ g = _primitive_root(n_qmc_samples)
127
+ # Slightly faster way to compute perm[j] = pow(g, j, n_qmc_samples)
128
+ # Shame that we don't have modulo pow() implemented as a ufunc.
129
+ perm = np.ones(m, dtype=int)
130
+ for j in range(m - 1):
131
+ perm[j + 1] = (g * perm[j]) % n_qmc_samples
132
+ perm = np.minimum(n_qmc_samples - perm, perm)
133
+ pn = perm / n_qmc_samples
134
+ c = pn * pn - pn + 1.0 / 6
135
+ fc = fft(c)
136
+ for s in range(1, n_dim):
137
+ reordered = np.hstack([
138
+ c[:w+1][::-1],
139
+ c[w+1:m][::-1],
140
+ ])
141
+ q = q * (bt[s-1] + gm[s-1] * reordered)
142
+ w = ifft(fc * fft(q)).real.argmin()
143
+ z[s] = perm[w]
144
+ q = z / n_qmc_samples
145
+ return q, n_qmc_samples
146
+
147
+
148
+ # Note: this function is not currently used or tested by any SciPy code. It is
149
+ # included in this file to facilitate the development of a parameter for users
150
+ # to set the desired CDF accuracy, but must be reviewed and tested before use.
151
+ def _qauto(func, covar, low, high, rng, error=1e-3, limit=10_000, **kwds):
152
+ """Automatically rerun the integration to get the required error bound.
153
+
154
+ Parameters
155
+ ----------
156
+ func : callable
157
+ Either :func:`_qmvn` or :func:`_qmvt`.
158
+ covar, low, high : array
159
+ As specified in :func:`_qmvn` and :func:`_qmvt`.
160
+ rng : Generator, optional
161
+ default_rng(), yada, yada
162
+ error : float > 0
163
+ The desired error bound.
164
+ limit : int > 0:
165
+ The rough limit of the number of integration points to consider. The
166
+ integration will stop looping once this limit has been *exceeded*.
167
+ **kwds :
168
+ Other keyword arguments to pass to `func`. When using :func:`_qmvt`, be
169
+ sure to include ``nu=`` as one of these.
170
+
171
+ Returns
172
+ -------
173
+ prob : float
174
+ The estimated probability mass within the bounds.
175
+ est_error : float
176
+ 3 times the standard error of the batch estimates.
177
+ n_samples : int
178
+ The number of integration points actually used.
179
+ """
180
+ n = len(covar)
181
+ n_samples = 0
182
+ if n == 1:
183
+ prob = phi(high) - phi(low)
184
+ # More or less
185
+ est_error = 1e-15
186
+ else:
187
+ mi = min(limit, n * 1000)
188
+ prob = 0.0
189
+ est_error = 1.0
190
+ ei = 0.0
191
+ while est_error > error and n_samples < limit:
192
+ mi = round(np.sqrt(2) * mi)
193
+ pi, ei, ni = func(mi, covar, low, high, rng=rng, **kwds)
194
+ n_samples += ni
195
+ wt = 1.0 / (1 + (ei / est_error)**2)
196
+ prob += wt * (pi - prob)
197
+ est_error = np.sqrt(wt) * ei
198
+ return prob, est_error, n_samples
199
+
200
+
201
+ # Note: this function is not currently used or tested by any SciPy code. It is
202
+ # included in this file to facilitate the resolution of gh-8367, gh-16142, and
203
+ # possibly gh-14286, but must be reviewed and tested before use.
204
+ def _qmvn(m, covar, low, high, rng, lattice='cbc', n_batches=10):
205
+ """Multivariate normal integration over box bounds.
206
+
207
+ Parameters
208
+ ----------
209
+ m : int > n_batches
210
+ The number of points to sample. This number will be divided into
211
+ `n_batches` batches that apply random offsets of the sampling lattice
212
+ for each batch in order to estimate the error.
213
+ covar : (n, n) float array
214
+ Possibly singular, positive semidefinite symmetric covariance matrix.
215
+ low, high : (n,) float array
216
+ The low and high integration bounds.
217
+ rng : Generator, optional
218
+ default_rng(), yada, yada
219
+ lattice : 'cbc' or callable
220
+ The type of lattice rule to use to construct the integration points.
221
+ n_batches : int > 0, optional
222
+ The number of QMC batches to apply.
223
+
224
+ Returns
225
+ -------
226
+ prob : float
227
+ The estimated probability mass within the bounds.
228
+ est_error : float
229
+ 3 times the standard error of the batch estimates.
230
+ """
231
+ cho, lo, hi = _permuted_cholesky(covar, low, high)
232
+ n = cho.shape[0]
233
+ ct = cho[0, 0]
234
+ c = phi(lo[0] / ct)
235
+ d = phi(hi[0] / ct)
236
+ ci = c
237
+ dci = d - ci
238
+ prob = 0.0
239
+ error_var = 0.0
240
+ q, n_qmc_samples = _cbc_lattice(n - 1, max(m // n_batches, 1))
241
+ y = np.zeros((n - 1, n_qmc_samples))
242
+ i_samples = np.arange(n_qmc_samples) + 1
243
+ for j in range(n_batches):
244
+ c = np.full(n_qmc_samples, ci)
245
+ dc = np.full(n_qmc_samples, dci)
246
+ pv = dc.copy()
247
+ for i in range(1, n):
248
+ # Pseudorandomly-shifted lattice coordinate.
249
+ z = q[i - 1] * i_samples + rng.random()
250
+ # Fast remainder(z, 1.0)
251
+ z -= z.astype(int)
252
+ # Tent periodization transform.
253
+ x = abs(2 * z - 1)
254
+ y[i - 1, :] = phinv(c + x * dc)
255
+ s = cho[i, :i] @ y[:i, :]
256
+ ct = cho[i, i]
257
+ c = phi((lo[i] - s) / ct)
258
+ d = phi((hi[i] - s) / ct)
259
+ dc = d - c
260
+ pv = pv * dc
261
+ # Accumulate the mean and error variances with online formulations.
262
+ d = (pv.mean() - prob) / (j + 1)
263
+ prob += d
264
+ error_var = (j - 1) * error_var / (j + 1) + d * d
265
+ # Error bounds are 3 times the standard error of the estimates.
266
+ est_error = 3 * np.sqrt(error_var)
267
+ n_samples = n_qmc_samples * n_batches
268
+ return prob, est_error, n_samples
269
+
270
+
271
+ # Note: this function is not currently used or tested by any SciPy code. It is
272
+ # included in this file to facilitate the resolution of gh-8367, gh-16142, and
273
+ # possibly gh-14286, but must be reviewed and tested before use.
274
+ def _mvn_qmc_integrand(covar, low, high, use_tent=False):
275
+ """Transform the multivariate normal integration into a QMC integrand over
276
+ a unit hypercube.
277
+
278
+ The dimensionality of the resulting hypercube integration domain is one
279
+ less than the dimensionality of the original integrand. Note that this
280
+ transformation subsumes the integration bounds in order to account for
281
+ infinite bounds. The QMC integration one does with the returned integrand
282
+ should be on the unit hypercube.
283
+
284
+ Parameters
285
+ ----------
286
+ covar : (n, n) float array
287
+ Possibly singular, positive semidefinite symmetric covariance matrix.
288
+ low, high : (n,) float array
289
+ The low and high integration bounds.
290
+ use_tent : bool, optional
291
+ If True, then use tent periodization. Only helpful for lattice rules.
292
+
293
+ Returns
294
+ -------
295
+ integrand : Callable[[NDArray], NDArray]
296
+ The QMC-integrable integrand. It takes an
297
+ ``(n_qmc_samples, ndim_integrand)`` array of QMC samples in the unit
298
+ hypercube and returns the ``(n_qmc_samples,)`` evaluations of at these
299
+ QMC points.
300
+ ndim_integrand : int
301
+ The dimensionality of the integrand. Equal to ``n-1``.
302
+ """
303
+ cho, lo, hi = _permuted_cholesky(covar, low, high)
304
+ n = cho.shape[0]
305
+ ndim_integrand = n - 1
306
+ ct = cho[0, 0]
307
+ c = phi(lo[0] / ct)
308
+ d = phi(hi[0] / ct)
309
+ ci = c
310
+ dci = d - ci
311
+
312
+ def integrand(*zs):
313
+ ndim_qmc = len(zs)
314
+ n_qmc_samples = len(np.atleast_1d(zs[0]))
315
+ assert ndim_qmc == ndim_integrand
316
+ y = np.zeros((ndim_qmc, n_qmc_samples))
317
+ c = np.full(n_qmc_samples, ci)
318
+ dc = np.full(n_qmc_samples, dci)
319
+ pv = dc.copy()
320
+ for i in range(1, n):
321
+ if use_tent:
322
+ # Tent periodization transform.
323
+ x = abs(2 * zs[i-1] - 1)
324
+ else:
325
+ x = zs[i-1]
326
+ y[i - 1, :] = phinv(c + x * dc)
327
+ s = cho[i, :i] @ y[:i, :]
328
+ ct = cho[i, i]
329
+ c = phi((lo[i] - s) / ct)
330
+ d = phi((hi[i] - s) / ct)
331
+ dc = d - c
332
+ pv = pv * dc
333
+ return pv
334
+
335
+ return integrand, ndim_integrand
336
+
337
+
338
+ def _qmvt(m, nu, covar, low, high, rng, lattice='cbc', n_batches=10):
339
+ """Multivariate t integration over box bounds.
340
+
341
+ Parameters
342
+ ----------
343
+ m : int > n_batches
344
+ The number of points to sample. This number will be divided into
345
+ `n_batches` batches that apply random offsets of the sampling lattice
346
+ for each batch in order to estimate the error.
347
+ nu : float >= 0
348
+ The shape parameter of the multivariate t distribution.
349
+ covar : (n, n) float array
350
+ Possibly singular, positive semidefinite symmetric covariance matrix.
351
+ low, high : (n,) float array
352
+ The low and high integration bounds.
353
+ rng : Generator, optional
354
+ default_rng(), yada, yada
355
+ lattice : 'cbc' or callable
356
+ The type of lattice rule to use to construct the integration points.
357
+ n_batches : int > 0, optional
358
+ The number of QMC batches to apply.
359
+
360
+ Returns
361
+ -------
362
+ prob : float
363
+ The estimated probability mass within the bounds.
364
+ est_error : float
365
+ 3 times the standard error of the batch estimates.
366
+ n_samples : int
367
+ The number of samples actually used.
368
+ """
369
+ sn = max(1.0, np.sqrt(nu))
370
+ low = np.asarray(low, dtype=np.float64)
371
+ high = np.asarray(high, dtype=np.float64)
372
+ cho, lo, hi = _permuted_cholesky(covar, low / sn, high / sn)
373
+ n = cho.shape[0]
374
+ prob = 0.0
375
+ error_var = 0.0
376
+ q, n_qmc_samples = _cbc_lattice(n, max(m // n_batches, 1))
377
+ i_samples = np.arange(n_qmc_samples) + 1
378
+ for j in range(n_batches):
379
+ pv = np.ones(n_qmc_samples)
380
+ s = np.zeros((n, n_qmc_samples))
381
+ for i in range(n):
382
+ # Pseudorandomly-shifted lattice coordinate.
383
+ z = q[i] * i_samples + rng.random()
384
+ # Fast remainder(z, 1.0)
385
+ z -= z.astype(int)
386
+ # Tent periodization transform.
387
+ x = abs(2 * z - 1)
388
+ # FIXME: Lift the i==0 case out of the loop to make the logic
389
+ # easier to follow.
390
+ if i == 0:
391
+ # We'll use one of the QR variates to pull out the
392
+ # t-distribution scaling.
393
+ if nu > 0:
394
+ r = np.sqrt(2 * gammaincinv(nu / 2, x))
395
+ else:
396
+ r = np.ones_like(x)
397
+ else:
398
+ y = phinv(c + x * dc) # noqa: F821
399
+ with np.errstate(invalid='ignore'):
400
+ s[i:, :] += cho[i:, i - 1][:, np.newaxis] * y
401
+ si = s[i, :]
402
+
403
+ c = np.ones(n_qmc_samples)
404
+ d = np.ones(n_qmc_samples)
405
+ with np.errstate(invalid='ignore'):
406
+ lois = lo[i] * r - si
407
+ hiis = hi[i] * r - si
408
+ c[lois < -9] = 0.0
409
+ d[hiis < -9] = 0.0
410
+ lo_mask = abs(lois) < 9
411
+ hi_mask = abs(hiis) < 9
412
+ c[lo_mask] = phi(lois[lo_mask])
413
+ d[hi_mask] = phi(hiis[hi_mask])
414
+
415
+ dc = d - c
416
+ pv *= dc
417
+
418
+ # Accumulate the mean and error variances with online formulations.
419
+ d = (pv.mean() - prob) / (j + 1)
420
+ prob += d
421
+ error_var = (j - 1) * error_var / (j + 1) + d * d
422
+ # Error bounds are 3 times the standard error of the estimates.
423
+ est_error = 3 * np.sqrt(error_var)
424
+ n_samples = n_qmc_samples * n_batches
425
+ return prob, est_error, n_samples
426
+
427
+
428
+ def _permuted_cholesky(covar, low, high, tol=1e-10):
429
+ """Compute a scaled, permuted Cholesky factor, with integration bounds.
430
+
431
+ The scaling and permuting of the dimensions accomplishes part of the
432
+ transformation of the original integration problem into a more numerically
433
+ tractable form. The lower-triangular Cholesky factor will then be used in
434
+ the subsequent integration. The integration bounds will be scaled and
435
+ permuted as well.
436
+
437
+ Parameters
438
+ ----------
439
+ covar : (n, n) float array
440
+ Possibly singular, positive semidefinite symmetric covariance matrix.
441
+ low, high : (n,) float array
442
+ The low and high integration bounds.
443
+ tol : float, optional
444
+ The singularity tolerance.
445
+
446
+ Returns
447
+ -------
448
+ cho : (n, n) float array
449
+ Lower Cholesky factor, scaled and permuted.
450
+ new_low, new_high : (n,) float array
451
+ The scaled and permuted low and high integration bounds.
452
+ """
453
+ # Make copies for outputting.
454
+ cho = np.array(covar, dtype=np.float64)
455
+ new_lo = np.array(low, dtype=np.float64)
456
+ new_hi = np.array(high, dtype=np.float64)
457
+ n = cho.shape[0]
458
+ if cho.shape != (n, n):
459
+ raise ValueError("expected a square symmetric array")
460
+ if new_lo.shape != (n,) or new_hi.shape != (n,):
461
+ raise ValueError(
462
+ "expected integration boundaries the same dimensions "
463
+ "as the covariance matrix"
464
+ )
465
+ # Scale by the sqrt of the diagonal.
466
+ dc = np.sqrt(np.maximum(np.diag(cho), 0.0))
467
+ # But don't divide by 0.
468
+ dc[dc == 0.0] = 1.0
469
+ new_lo /= dc
470
+ new_hi /= dc
471
+ cho /= dc
472
+ cho /= dc[:, np.newaxis]
473
+
474
+ y = np.zeros(n)
475
+ sqtp = np.sqrt(2 * np.pi)
476
+ for k in range(n):
477
+ epk = (k + 1) * tol
478
+ im = k
479
+ ck = 0.0
480
+ dem = 1.0
481
+ s = 0.0
482
+ lo_m = 0.0
483
+ hi_m = 0.0
484
+ for i in range(k, n):
485
+ if cho[i, i] > tol:
486
+ ci = np.sqrt(cho[i, i])
487
+ if i > 0:
488
+ s = cho[i, :k] @ y[:k]
489
+ lo_i = (new_lo[i] - s) / ci
490
+ hi_i = (new_hi[i] - s) / ci
491
+ de = phi(hi_i) - phi(lo_i)
492
+ if de <= dem:
493
+ ck = ci
494
+ dem = de
495
+ lo_m = lo_i
496
+ hi_m = hi_i
497
+ im = i
498
+ if im > k:
499
+ # Swap im and k
500
+ cho[im, im] = cho[k, k]
501
+ _swap_slices(cho, np.s_[im, :k], np.s_[k, :k])
502
+ _swap_slices(cho, np.s_[im + 1:, im], np.s_[im + 1:, k])
503
+ _swap_slices(cho, np.s_[k + 1:im, k], np.s_[im, k + 1:im])
504
+ _swap_slices(new_lo, k, im)
505
+ _swap_slices(new_hi, k, im)
506
+ if ck > epk:
507
+ cho[k, k] = ck
508
+ cho[k, k + 1:] = 0.0
509
+ for i in range(k + 1, n):
510
+ cho[i, k] /= ck
511
+ cho[i, k + 1:i + 1] -= cho[i, k] * cho[k + 1:i + 1, k]
512
+ if abs(dem) > tol:
513
+ y[k] = ((np.exp(-lo_m * lo_m / 2) - np.exp(-hi_m * hi_m / 2)) /
514
+ (sqtp * dem))
515
+ else:
516
+ y[k] = (lo_m + hi_m) / 2
517
+ if lo_m < -10:
518
+ y[k] = hi_m
519
+ elif hi_m > 10:
520
+ y[k] = lo_m
521
+ cho[k, :k + 1] /= ck
522
+ new_lo[k] /= ck
523
+ new_hi[k] /= ck
524
+ else:
525
+ cho[k:, k] = 0.0
526
+ y[k] = (new_lo[k] + new_hi[k]) / 2
527
+ return cho, new_lo, new_hi
528
+
529
+
530
+ def _swap_slices(x, slc1, slc2):
531
+ t = x[slc1].copy()
532
+ x[slc1] = x[slc2].copy()
533
+ x[slc2] = t
parrot/lib/python3.10/site-packages/scipy/stats/_rvs_sampling.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from scipy.stats.sampling import RatioUniforms
3
+
4
+ def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
5
+ """
6
+ Generate random samples from a probability density function using the
7
+ ratio-of-uniforms method.
8
+
9
+ .. deprecated:: 1.12.0
10
+ `rvs_ratio_uniforms` is deprecated in favour of
11
+ `scipy.stats.sampling.RatioUniforms` from version 1.12.0 and will
12
+ be removed in SciPy 1.15.0
13
+
14
+ Parameters
15
+ ----------
16
+ pdf : callable
17
+ A function with signature `pdf(x)` that is proportional to the
18
+ probability density function of the distribution.
19
+ umax : float
20
+ The upper bound of the bounding rectangle in the u-direction.
21
+ vmin : float
22
+ The lower bound of the bounding rectangle in the v-direction.
23
+ vmax : float
24
+ The upper bound of the bounding rectangle in the v-direction.
25
+ size : int or tuple of ints, optional
26
+ Defining number of random variates (default is 1).
27
+ c : float, optional.
28
+ Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
29
+ random_state : {None, int, `numpy.random.Generator`,
30
+ `numpy.random.RandomState`}, optional
31
+
32
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
33
+ singleton is used.
34
+ If `seed` is an int, a new ``RandomState`` instance is used,
35
+ seeded with `seed`.
36
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
37
+ that instance is used.
38
+
39
+ Returns
40
+ -------
41
+ rvs : ndarray
42
+ The random variates distributed according to the probability
43
+ distribution defined by the pdf.
44
+
45
+ Notes
46
+ -----
47
+ Please refer to `scipy.stats.sampling.RatioUniforms` for the documentation.
48
+ """
49
+ warnings.warn("Please use `RatioUniforms` from the "
50
+ "`scipy.stats.sampling` namespace. The "
51
+ "`scipy.stats.rvs_ratio_uniforms` namespace is deprecated "
52
+ "and will be removed in SciPy 1.15.0",
53
+ category=DeprecationWarning, stacklevel=2)
54
+ gen = RatioUniforms(pdf, umax=umax, vmin=vmin, vmax=vmax,
55
+ c=c, random_state=random_state)
56
+ return gen.rvs(size)
parrot/lib/python3.10/site-packages/scipy/stats/_sensitivity_analysis.py ADDED
@@ -0,0 +1,712 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ from dataclasses import dataclass
5
+ from typing import (
6
+ Callable, Literal, Protocol, TYPE_CHECKING
7
+ )
8
+
9
+ import numpy as np
10
+
11
+ from scipy.stats._common import ConfidenceInterval
12
+ from scipy.stats._qmc import check_random_state
13
+ from scipy.stats._resampling import BootstrapResult
14
+ from scipy.stats import qmc, bootstrap
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ import numpy.typing as npt
19
+ from scipy._lib._util import DecimalNumber, IntNumber, SeedType
20
+
21
+
22
+ __all__ = [
23
+ 'sobol_indices'
24
+ ]
25
+
26
+
27
+ def f_ishigami(x: npt.ArrayLike) -> np.ndarray:
28
+ r"""Ishigami function.
29
+
30
+ .. math::
31
+
32
+ Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1
33
+
34
+ with :math:`\mathbf{x} \in [-\pi, \pi]^3`.
35
+
36
+ Parameters
37
+ ----------
38
+ x : array_like ([x1, x2, x3], n)
39
+
40
+ Returns
41
+ -------
42
+ f : array_like (n,)
43
+ Function evaluation.
44
+
45
+ References
46
+ ----------
47
+ .. [1] Ishigami, T. and T. Homma. "An importance quantification technique
48
+ in uncertainty analysis for computer models." IEEE,
49
+ :doi:`10.1109/ISUMA.1990.151285`, 1990.
50
+ """
51
+ x = np.atleast_2d(x)
52
+ f_eval = (
53
+ np.sin(x[0])
54
+ + 7 * np.sin(x[1])**2
55
+ + 0.1 * (x[2]**4) * np.sin(x[0])
56
+ )
57
+ return f_eval
58
+
59
+
60
+ def sample_A_B(
61
+ n: IntNumber,
62
+ dists: list[PPFDist],
63
+ random_state: SeedType = None
64
+ ) -> np.ndarray:
65
+ """Sample two matrices A and B.
66
+
67
+ Uses a Sobol' sequence with 2`d` columns to have 2 uncorrelated matrices.
68
+ This is more efficient than using 2 random draw of Sobol'.
69
+ See sec. 5 from [1]_.
70
+
71
+ Output shape is (d, n).
72
+
73
+ References
74
+ ----------
75
+ .. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
76
+ S. Tarantola. "Variance based sensitivity analysis of model
77
+ output. Design and estimator for the total sensitivity index."
78
+ Computer Physics Communications, 181(2):259-270,
79
+ :doi:`10.1016/j.cpc.2009.09.018`, 2010.
80
+ """
81
+ d = len(dists)
82
+ A_B = qmc.Sobol(d=2*d, seed=random_state, bits=64).random(n).T
83
+ A_B = A_B.reshape(2, d, -1)
84
+ try:
85
+ for d_, dist in enumerate(dists):
86
+ A_B[:, d_] = dist.ppf(A_B[:, d_])
87
+ except AttributeError as exc:
88
+ message = "Each distribution in `dists` must have method `ppf`."
89
+ raise ValueError(message) from exc
90
+ return A_B
91
+
92
+
93
+ def sample_AB(A: np.ndarray, B: np.ndarray) -> np.ndarray:
94
+ """AB matrix.
95
+
96
+ AB: rows of B into A. Shape (d, d, n).
97
+ - Copy A into d "pages"
98
+ - In the first page, replace 1st rows of A with 1st row of B.
99
+ ...
100
+ - In the dth page, replace dth row of A with dth row of B.
101
+ - return the stack of pages
102
+ """
103
+ d, n = A.shape
104
+ AB = np.tile(A, (d, 1, 1))
105
+ i = np.arange(d)
106
+ AB[i, i] = B[i]
107
+ return AB
108
+
109
+
110
+ def saltelli_2010(
111
+ f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray
112
+ ) -> tuple[np.ndarray, np.ndarray]:
113
+ r"""Saltelli2010 formulation.
114
+
115
+ .. math::
116
+
117
+ S_i = \frac{1}{N} \sum_{j=1}^N
118
+ f(\mathbf{B})_j (f(\mathbf{AB}^{(i)})_j - f(\mathbf{A})_j)
119
+
120
+ .. math::
121
+
122
+ S_{T_i} = \frac{1}{N} \sum_{j=1}^N
123
+ (f(\mathbf{A})_j - f(\mathbf{AB}^{(i)})_j)^2
124
+
125
+ Parameters
126
+ ----------
127
+ f_A, f_B : array_like (s, n)
128
+ Function values at A and B, respectively
129
+ f_AB : array_like (d, s, n)
130
+ Function values at each of the AB pages
131
+
132
+ Returns
133
+ -------
134
+ s, st : array_like (s, d)
135
+ First order and total order Sobol' indices.
136
+
137
+ References
138
+ ----------
139
+ .. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
140
+ S. Tarantola. "Variance based sensitivity analysis of model
141
+ output. Design and estimator for the total sensitivity index."
142
+ Computer Physics Communications, 181(2):259-270,
143
+ :doi:`10.1016/j.cpc.2009.09.018`, 2010.
144
+ """
145
+ # Empirical variance calculated using output from A and B which are
146
+ # independent. Output of AB is not independent and cannot be used
147
+ var = np.var([f_A, f_B], axis=(0, -1))
148
+
149
+ # We divide by the variance to have a ratio of variance
150
+ # this leads to eq. 2
151
+ s = np.mean(f_B * (f_AB - f_A), axis=-1) / var # Table 2 (b)
152
+ st = 0.5 * np.mean((f_A - f_AB) ** 2, axis=-1) / var # Table 2 (f)
153
+
154
+ return s.T, st.T
155
+
156
+
157
+ @dataclass
158
+ class BootstrapSobolResult:
159
+ first_order: BootstrapResult
160
+ total_order: BootstrapResult
161
+
162
+
163
+ @dataclass
164
+ class SobolResult:
165
+ first_order: np.ndarray
166
+ total_order: np.ndarray
167
+ _indices_method: Callable
168
+ _f_A: np.ndarray
169
+ _f_B: np.ndarray
170
+ _f_AB: np.ndarray
171
+ _A: np.ndarray | None = None
172
+ _B: np.ndarray | None = None
173
+ _AB: np.ndarray | None = None
174
+ _bootstrap_result: BootstrapResult | None = None
175
+
176
+ def bootstrap(
177
+ self,
178
+ confidence_level: DecimalNumber = 0.95,
179
+ n_resamples: IntNumber = 999
180
+ ) -> BootstrapSobolResult:
181
+ """Bootstrap Sobol' indices to provide confidence intervals.
182
+
183
+ Parameters
184
+ ----------
185
+ confidence_level : float, default: ``0.95``
186
+ The confidence level of the confidence intervals.
187
+ n_resamples : int, default: ``999``
188
+ The number of resamples performed to form the bootstrap
189
+ distribution of the indices.
190
+
191
+ Returns
192
+ -------
193
+ res : BootstrapSobolResult
194
+ Bootstrap result containing the confidence intervals and the
195
+ bootstrap distribution of the indices.
196
+
197
+ An object with attributes:
198
+
199
+ first_order : BootstrapResult
200
+ Bootstrap result of the first order indices.
201
+ total_order : BootstrapResult
202
+ Bootstrap result of the total order indices.
203
+ See `BootstrapResult` for more details.
204
+
205
+ """
206
+ def statistic(idx):
207
+ f_A_ = self._f_A[:, idx]
208
+ f_B_ = self._f_B[:, idx]
209
+ f_AB_ = self._f_AB[..., idx]
210
+ return self._indices_method(f_A_, f_B_, f_AB_)
211
+
212
+ n = self._f_A.shape[1]
213
+
214
+ res = bootstrap(
215
+ [np.arange(n)], statistic=statistic, method="BCa",
216
+ n_resamples=n_resamples,
217
+ confidence_level=confidence_level,
218
+ bootstrap_result=self._bootstrap_result
219
+ )
220
+ self._bootstrap_result = res
221
+
222
+ first_order = BootstrapResult(
223
+ confidence_interval=ConfidenceInterval(
224
+ res.confidence_interval.low[0], res.confidence_interval.high[0]
225
+ ),
226
+ bootstrap_distribution=res.bootstrap_distribution[0],
227
+ standard_error=res.standard_error[0],
228
+ )
229
+ total_order = BootstrapResult(
230
+ confidence_interval=ConfidenceInterval(
231
+ res.confidence_interval.low[1], res.confidence_interval.high[1]
232
+ ),
233
+ bootstrap_distribution=res.bootstrap_distribution[1],
234
+ standard_error=res.standard_error[1],
235
+ )
236
+
237
+ return BootstrapSobolResult(
238
+ first_order=first_order, total_order=total_order
239
+ )
240
+
241
+
242
+ class PPFDist(Protocol):
243
+ @property
244
+ def ppf(self) -> Callable[..., float]:
245
+ ...
246
+
247
+
248
+ def sobol_indices(
249
+ *,
250
+ func: Callable[[np.ndarray], npt.ArrayLike] |
251
+ dict[Literal['f_A', 'f_B', 'f_AB'], np.ndarray],
252
+ n: IntNumber,
253
+ dists: list[PPFDist] | None = None,
254
+ method: Callable | Literal['saltelli_2010'] = 'saltelli_2010',
255
+ random_state: SeedType = None
256
+ ) -> SobolResult:
257
+ r"""Global sensitivity indices of Sobol'.
258
+
259
+ Parameters
260
+ ----------
261
+ func : callable or dict(str, array_like)
262
+ If `func` is a callable, function to compute the Sobol' indices from.
263
+ Its signature must be::
264
+
265
+ func(x: ArrayLike) -> ArrayLike
266
+
267
+ with ``x`` of shape ``(d, n)`` and output of shape ``(s, n)`` where:
268
+
269
+ - ``d`` is the input dimensionality of `func`
270
+ (number of input variables),
271
+ - ``s`` is the output dimensionality of `func`
272
+ (number of output variables), and
273
+ - ``n`` is the number of samples (see `n` below).
274
+
275
+ Function evaluation values must be finite.
276
+
277
+ If `func` is a dictionary, contains the function evaluations from three
278
+ different arrays. Keys must be: ``f_A``, ``f_B`` and ``f_AB``.
279
+ ``f_A`` and ``f_B`` should have a shape ``(s, n)`` and ``f_AB``
280
+ should have a shape ``(d, s, n)``.
281
+ This is an advanced feature and misuse can lead to wrong analysis.
282
+ n : int
283
+ Number of samples used to generate the matrices ``A`` and ``B``.
284
+ Must be a power of 2. The total number of points at which `func` is
285
+ evaluated will be ``n*(d+2)``.
286
+ dists : list(distributions), optional
287
+ List of each parameter's distribution. The distribution of parameters
288
+ depends on the application and should be carefully chosen.
289
+ Parameters are assumed to be independently distributed, meaning there
290
+ is no constraint nor relationship between their values.
291
+
292
+ Distributions must be an instance of a class with a ``ppf``
293
+ method.
294
+
295
+ Must be specified if `func` is a callable, and ignored otherwise.
296
+ method : Callable or str, default: 'saltelli_2010'
297
+ Method used to compute the first and total Sobol' indices.
298
+
299
+ If a callable, its signature must be::
300
+
301
+ func(f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray)
302
+ -> Tuple[np.ndarray, np.ndarray]
303
+
304
+ with ``f_A, f_B`` of shape ``(s, n)`` and ``f_AB`` of shape
305
+ ``(d, s, n)``.
306
+ These arrays contain the function evaluations from three different sets
307
+ of samples.
308
+ The output is a tuple of the first and total indices with
309
+ shape ``(s, d)``.
310
+ This is an advanced feature and misuse can lead to wrong analysis.
311
+ random_state : {None, int, `numpy.random.Generator`}, optional
312
+ If `random_state` is an int or None, a new `numpy.random.Generator` is
313
+ created using ``np.random.default_rng(random_state)``.
314
+ If `random_state` is already a ``Generator`` instance, then the
315
+ provided instance is used.
316
+
317
+ Returns
318
+ -------
319
+ res : SobolResult
320
+ An object with attributes:
321
+
322
+ first_order : ndarray of shape (s, d)
323
+ First order Sobol' indices.
324
+ total_order : ndarray of shape (s, d)
325
+ Total order Sobol' indices.
326
+
327
+ And method:
328
+
329
+ bootstrap(confidence_level: float, n_resamples: int)
330
+ -> BootstrapSobolResult
331
+
332
+ A method providing confidence intervals on the indices.
333
+ See `scipy.stats.bootstrap` for more details.
334
+
335
+ The bootstrapping is done on both first and total order indices,
336
+ and they are available in `BootstrapSobolResult` as attributes
337
+ ``first_order`` and ``total_order``.
338
+
339
+ Notes
340
+ -----
341
+ The Sobol' method [1]_, [2]_ is a variance-based Sensitivity Analysis which
342
+ obtains the contribution of each parameter to the variance of the
343
+ quantities of interest (QoIs; i.e., the outputs of `func`).
344
+ Respective contributions can be used to rank the parameters and
345
+ also gauge the complexity of the model by computing the
346
+ model's effective (or mean) dimension.
347
+
348
+ .. note::
349
+
350
+ Parameters are assumed to be independently distributed. Each
351
+ parameter can still follow any distribution. In fact, the distribution
352
+ is very important and should match the real distribution of the
353
+ parameters.
354
+
355
+ It uses a functional decomposition of the variance of the function to
356
+ explore
357
+
358
+ .. math::
359
+
360
+ \mathbb{V}(Y) = \sum_{i}^{d} \mathbb{V}_i (Y) + \sum_{i<j}^{d}
361
+ \mathbb{V}_{ij}(Y) + ... + \mathbb{V}_{1,2,...,d}(Y),
362
+
363
+ introducing conditional variances:
364
+
365
+ .. math::
366
+
367
+ \mathbb{V}_i(Y) = \mathbb{\mathbb{V}}[\mathbb{E}(Y|x_i)]
368
+ \qquad
369
+ \mathbb{V}_{ij}(Y) = \mathbb{\mathbb{V}}[\mathbb{E}(Y|x_i x_j)]
370
+ - \mathbb{V}_i(Y) - \mathbb{V}_j(Y),
371
+
372
+ Sobol' indices are expressed as
373
+
374
+ .. math::
375
+
376
+ S_i = \frac{\mathbb{V}_i(Y)}{\mathbb{V}[Y]}
377
+ \qquad
378
+ S_{ij} =\frac{\mathbb{V}_{ij}(Y)}{\mathbb{V}[Y]}.
379
+
380
+ :math:`S_{i}` corresponds to the first-order term which apprises the
381
+ contribution of the i-th parameter, while :math:`S_{ij}` corresponds to the
382
+ second-order term which informs about the contribution of interactions
383
+ between the i-th and the j-th parameters. These equations can be
384
+ generalized to compute higher order terms; however, they are expensive to
385
+ compute and their interpretation is complex.
386
+ This is why only first order indices are provided.
387
+
388
+ Total order indices represent the global contribution of the parameters
389
+ to the variance of the QoI and are defined as:
390
+
391
+ .. math::
392
+
393
+ S_{T_i} = S_i + \sum_j S_{ij} + \sum_{j,k} S_{ijk} + ...
394
+ = 1 - \frac{\mathbb{V}[\mathbb{E}(Y|x_{\sim i})]}{\mathbb{V}[Y]}.
395
+
396
+ First order indices sum to at most 1, while total order indices sum to at
397
+ least 1. If there are no interactions, then first and total order indices
398
+ are equal, and both first and total order indices sum to 1.
399
+
400
+ .. warning::
401
+
402
+ Negative Sobol' values are due to numerical errors. Increasing the
403
+ number of points `n` should help.
404
+
405
+ The number of sample required to have a good analysis increases with
406
+ the dimensionality of the problem. e.g. for a 3 dimension problem,
407
+ consider at minima ``n >= 2**12``. The more complex the model is,
408
+ the more samples will be needed.
409
+
410
+ Even for a purely addiditive model, the indices may not sum to 1 due
411
+ to numerical noise.
412
+
413
+ References
414
+ ----------
415
+ .. [1] Sobol, I. M.. "Sensitivity analysis for nonlinear mathematical
416
+ models." Mathematical Modeling and Computational Experiment, 1:407-414,
417
+ 1993.
418
+ .. [2] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
419
+ mathematical models and their Monte Carlo estimates." Mathematics
420
+ and Computers in Simulation, 55(1-3):271-280,
421
+ :doi:`10.1016/S0378-4754(00)00270-6`, 2001.
422
+ .. [3] Saltelli, A. "Making best use of model evaluations to
423
+ compute sensitivity indices." Computer Physics Communications,
424
+ 145(2):280-297, :doi:`10.1016/S0010-4655(02)00280-1`, 2002.
425
+ .. [4] Saltelli, A., M. Ratto, T. Andres, F. Campolongo, J. Cariboni,
426
+ D. Gatelli, M. Saisana, and S. Tarantola. "Global Sensitivity Analysis.
427
+ The Primer." 2007.
428
+ .. [5] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
429
+ S. Tarantola. "Variance based sensitivity analysis of model
430
+ output. Design and estimator for the total sensitivity index."
431
+ Computer Physics Communications, 181(2):259-270,
432
+ :doi:`10.1016/j.cpc.2009.09.018`, 2010.
433
+ .. [6] Ishigami, T. and T. Homma. "An importance quantification technique
434
+ in uncertainty analysis for computer models." IEEE,
435
+ :doi:`10.1109/ISUMA.1990.151285`, 1990.
436
+
437
+ Examples
438
+ --------
439
+ The following is an example with the Ishigami function [6]_
440
+
441
+ .. math::
442
+
443
+ Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1,
444
+
445
+ with :math:`\mathbf{x} \in [-\pi, \pi]^3`. This function exhibits strong
446
+ non-linearity and non-monotonicity.
447
+
448
+ Remember, Sobol' indices assumes that samples are independently
449
+ distributed. In this case we use a uniform distribution on each marginals.
450
+
451
+ >>> import numpy as np
452
+ >>> from scipy.stats import sobol_indices, uniform
453
+ >>> rng = np.random.default_rng()
454
+ >>> def f_ishigami(x):
455
+ ... f_eval = (
456
+ ... np.sin(x[0])
457
+ ... + 7 * np.sin(x[1])**2
458
+ ... + 0.1 * (x[2]**4) * np.sin(x[0])
459
+ ... )
460
+ ... return f_eval
461
+ >>> indices = sobol_indices(
462
+ ... func=f_ishigami, n=1024,
463
+ ... dists=[
464
+ ... uniform(loc=-np.pi, scale=2*np.pi),
465
+ ... uniform(loc=-np.pi, scale=2*np.pi),
466
+ ... uniform(loc=-np.pi, scale=2*np.pi)
467
+ ... ],
468
+ ... random_state=rng
469
+ ... )
470
+ >>> indices.first_order
471
+ array([0.31637954, 0.43781162, 0.00318825])
472
+ >>> indices.total_order
473
+ array([0.56122127, 0.44287857, 0.24229595])
474
+
475
+ Confidence interval can be obtained using bootstrapping.
476
+
477
+ >>> boot = indices.bootstrap()
478
+
479
+ Then, this information can be easily visualized.
480
+
481
+ >>> import matplotlib.pyplot as plt
482
+ >>> fig, axs = plt.subplots(1, 2, figsize=(9, 4))
483
+ >>> _ = axs[0].errorbar(
484
+ ... [1, 2, 3], indices.first_order, fmt='o',
485
+ ... yerr=[
486
+ ... indices.first_order - boot.first_order.confidence_interval.low,
487
+ ... boot.first_order.confidence_interval.high - indices.first_order
488
+ ... ],
489
+ ... )
490
+ >>> axs[0].set_ylabel("First order Sobol' indices")
491
+ >>> axs[0].set_xlabel('Input parameters')
492
+ >>> axs[0].set_xticks([1, 2, 3])
493
+ >>> _ = axs[1].errorbar(
494
+ ... [1, 2, 3], indices.total_order, fmt='o',
495
+ ... yerr=[
496
+ ... indices.total_order - boot.total_order.confidence_interval.low,
497
+ ... boot.total_order.confidence_interval.high - indices.total_order
498
+ ... ],
499
+ ... )
500
+ >>> axs[1].set_ylabel("Total order Sobol' indices")
501
+ >>> axs[1].set_xlabel('Input parameters')
502
+ >>> axs[1].set_xticks([1, 2, 3])
503
+ >>> plt.tight_layout()
504
+ >>> plt.show()
505
+
506
+ .. note::
507
+
508
+ By default, `scipy.stats.uniform` has support ``[0, 1]``.
509
+ Using the parameters ``loc`` and ``scale``, one obtains the uniform
510
+ distribution on ``[loc, loc + scale]``.
511
+
512
+ This result is particularly interesting because the first order index
513
+ :math:`S_{x_3} = 0` whereas its total order is :math:`S_{T_{x_3}} = 0.244`.
514
+ This means that higher order interactions with :math:`x_3` are responsible
515
+ for the difference. Almost 25% of the observed variance
516
+ on the QoI is due to the correlations between :math:`x_3` and :math:`x_1`,
517
+ although :math:`x_3` by itself has no impact on the QoI.
518
+
519
+ The following gives a visual explanation of Sobol' indices on this
520
+ function. Let's generate 1024 samples in :math:`[-\pi, \pi]^3` and
521
+ calculate the value of the output.
522
+
523
+ >>> from scipy.stats import qmc
524
+ >>> n_dim = 3
525
+ >>> p_labels = ['$x_1$', '$x_2$', '$x_3$']
526
+ >>> sample = qmc.Sobol(d=n_dim, seed=rng).random(1024)
527
+ >>> sample = qmc.scale(
528
+ ... sample=sample,
529
+ ... l_bounds=[-np.pi, -np.pi, -np.pi],
530
+ ... u_bounds=[np.pi, np.pi, np.pi]
531
+ ... )
532
+ >>> output = f_ishigami(sample.T)
533
+
534
+ Now we can do scatter plots of the output with respect to each parameter.
535
+ This gives a visual way to understand how each parameter impacts the
536
+ output of the function.
537
+
538
+ >>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4))
539
+ >>> for i in range(n_dim):
540
+ ... xi = sample[:, i]
541
+ ... ax[i].scatter(xi, output, marker='+')
542
+ ... ax[i].set_xlabel(p_labels[i])
543
+ >>> ax[0].set_ylabel('Y')
544
+ >>> plt.tight_layout()
545
+ >>> plt.show()
546
+
547
+ Now Sobol' goes a step further:
548
+ by conditioning the output value by given values of the parameter
549
+ (black lines), the conditional output mean is computed. It corresponds to
550
+ the term :math:`\mathbb{E}(Y|x_i)`. Taking the variance of this term gives
551
+ the numerator of the Sobol' indices.
552
+
553
+ >>> mini = np.min(output)
554
+ >>> maxi = np.max(output)
555
+ >>> n_bins = 10
556
+ >>> bins = np.linspace(-np.pi, np.pi, num=n_bins, endpoint=False)
557
+ >>> dx = bins[1] - bins[0]
558
+ >>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4))
559
+ >>> for i in range(n_dim):
560
+ ... xi = sample[:, i]
561
+ ... ax[i].scatter(xi, output, marker='+')
562
+ ... ax[i].set_xlabel(p_labels[i])
563
+ ... for bin_ in bins:
564
+ ... idx = np.where((bin_ <= xi) & (xi <= bin_ + dx))
565
+ ... xi_ = xi[idx]
566
+ ... y_ = output[idx]
567
+ ... ave_y_ = np.mean(y_)
568
+ ... ax[i].plot([bin_ + dx/2] * 2, [mini, maxi], c='k')
569
+ ... ax[i].scatter(bin_ + dx/2, ave_y_, c='r')
570
+ >>> ax[0].set_ylabel('Y')
571
+ >>> plt.tight_layout()
572
+ >>> plt.show()
573
+
574
+ Looking at :math:`x_3`, the variance
575
+ of the mean is zero leading to :math:`S_{x_3} = 0`. But we can further
576
+ observe that the variance of the output is not constant along the parameter
577
+ values of :math:`x_3`. This heteroscedasticity is explained by higher order
578
+ interactions. Moreover, an heteroscedasticity is also noticeable on
579
+ :math:`x_1` leading to an interaction between :math:`x_3` and :math:`x_1`.
580
+ On :math:`x_2`, the variance seems to be constant and thus null interaction
581
+ with this parameter can be supposed.
582
+
583
+ This case is fairly simple to analyse visually---although it is only a
584
+ qualitative analysis. Nevertheless, when the number of input parameters
585
+ increases such analysis becomes unrealistic as it would be difficult to
586
+ conclude on high-order terms. Hence the benefit of using Sobol' indices.
587
+
588
+ """
589
+ random_state = check_random_state(random_state)
590
+
591
+ n_ = int(n)
592
+ if not (n_ & (n_ - 1) == 0) or n != n_:
593
+ raise ValueError(
594
+ "The balance properties of Sobol' points require 'n' "
595
+ "to be a power of 2."
596
+ )
597
+ n = n_
598
+
599
+ if not callable(method):
600
+ indices_methods: dict[str, Callable] = {
601
+ "saltelli_2010": saltelli_2010,
602
+ }
603
+ try:
604
+ method = method.lower() # type: ignore[assignment]
605
+ indices_method_ = indices_methods[method]
606
+ except KeyError as exc:
607
+ message = (
608
+ f"{method!r} is not a valid 'method'. It must be one of"
609
+ f" {set(indices_methods)!r} or a callable."
610
+ )
611
+ raise ValueError(message) from exc
612
+ else:
613
+ indices_method_ = method
614
+ sig = inspect.signature(indices_method_)
615
+
616
+ if set(sig.parameters) != {'f_A', 'f_B', 'f_AB'}:
617
+ message = (
618
+ "If 'method' is a callable, it must have the following"
619
+ f" signature: {inspect.signature(saltelli_2010)}"
620
+ )
621
+ raise ValueError(message)
622
+
623
+ def indices_method(f_A, f_B, f_AB):
624
+ """Wrap indices method to ensure proper output dimension.
625
+
626
+ 1D when single output, 2D otherwise.
627
+ """
628
+ return np.squeeze(indices_method_(f_A=f_A, f_B=f_B, f_AB=f_AB))
629
+
630
+ if callable(func):
631
+ if dists is None:
632
+ raise ValueError(
633
+ "'dists' must be defined when 'func' is a callable."
634
+ )
635
+
636
+ def wrapped_func(x):
637
+ return np.atleast_2d(func(x))
638
+
639
+ A, B = sample_A_B(n=n, dists=dists, random_state=random_state)
640
+ AB = sample_AB(A=A, B=B)
641
+
642
+ f_A = wrapped_func(A)
643
+
644
+ if f_A.shape[1] != n:
645
+ raise ValueError(
646
+ "'func' output should have a shape ``(s, -1)`` with ``s`` "
647
+ "the number of output."
648
+ )
649
+
650
+ def funcAB(AB):
651
+ d, d, n = AB.shape
652
+ AB = np.moveaxis(AB, 0, -1).reshape(d, n*d)
653
+ f_AB = wrapped_func(AB)
654
+ return np.moveaxis(f_AB.reshape((-1, n, d)), -1, 0)
655
+
656
+ f_B = wrapped_func(B)
657
+ f_AB = funcAB(AB)
658
+ else:
659
+ message = (
660
+ "When 'func' is a dictionary, it must contain the following "
661
+ "keys: 'f_A', 'f_B' and 'f_AB'."
662
+ "'f_A' and 'f_B' should have a shape ``(s, n)`` and 'f_AB' "
663
+ "should have a shape ``(d, s, n)``."
664
+ )
665
+ try:
666
+ f_A, f_B, f_AB = np.atleast_2d(
667
+ func['f_A'], func['f_B'], func['f_AB']
668
+ )
669
+ except KeyError as exc:
670
+ raise ValueError(message) from exc
671
+
672
+ if f_A.shape[1] != n or f_A.shape != f_B.shape or \
673
+ f_AB.shape == f_A.shape or f_AB.shape[-1] % n != 0:
674
+ raise ValueError(message)
675
+
676
+ # Normalization by mean
677
+ # Sobol', I. and Levitan, Y. L. (1999). On the use of variance reducing
678
+ # multipliers in monte carlo computations of a global sensitivity index.
679
+ # Computer Physics Communications, 117(1) :52-61.
680
+ mean = np.mean([f_A, f_B], axis=(0, -1)).reshape(-1, 1)
681
+ f_A -= mean
682
+ f_B -= mean
683
+ f_AB -= mean
684
+
685
+ # Compute indices
686
+ # Filter warnings for constant output as var = 0
687
+ with np.errstate(divide='ignore', invalid='ignore'):
688
+ first_order, total_order = indices_method(f_A=f_A, f_B=f_B, f_AB=f_AB)
689
+
690
+ # null variance means null indices
691
+ first_order[~np.isfinite(first_order)] = 0
692
+ total_order[~np.isfinite(total_order)] = 0
693
+
694
+ res = dict(
695
+ first_order=first_order,
696
+ total_order=total_order,
697
+ _indices_method=indices_method,
698
+ _f_A=f_A,
699
+ _f_B=f_B,
700
+ _f_AB=f_AB
701
+ )
702
+
703
+ if callable(func):
704
+ res.update(
705
+ dict(
706
+ _A=A,
707
+ _B=B,
708
+ _AB=AB,
709
+ )
710
+ )
711
+
712
+ return SobolResult(**res)
parrot/lib/python3.10/site-packages/scipy/stats/_sobol.pyi ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy._lib._util import IntNumber
3
+ from typing import Literal
4
+
5
+ def _initialize_v(
6
+ v : np.ndarray,
7
+ dim : IntNumber,
8
+ bits: IntNumber
9
+ ) -> None: ...
10
+
11
+ def _cscramble (
12
+ dim : IntNumber,
13
+ bits: IntNumber,
14
+ ltm : np.ndarray,
15
+ sv: np.ndarray
16
+ ) -> None: ...
17
+
18
+ def _fill_p_cumulative(
19
+ p: np.ndarray,
20
+ p_cumulative: np.ndarray
21
+ ) -> None: ...
22
+
23
+ def _draw(
24
+ n : IntNumber,
25
+ num_gen: IntNumber,
26
+ dim: IntNumber,
27
+ scale: float,
28
+ sv: np.ndarray,
29
+ quasi: np.ndarray,
30
+ sample: np.ndarray
31
+ ) -> None: ...
32
+
33
+ def _fast_forward(
34
+ n: IntNumber,
35
+ num_gen: IntNumber,
36
+ dim: IntNumber,
37
+ sv: np.ndarray,
38
+ quasi: np.ndarray
39
+ ) -> None: ...
40
+
41
+ def _categorize(
42
+ draws: np.ndarray,
43
+ p_cumulative: np.ndarray,
44
+ result: np.ndarray
45
+ ) -> None: ...
46
+
47
+ _MAXDIM: Literal[21201]
48
+ _MAXDEG: Literal[18]
49
+
50
+ def _test_find_index(
51
+ p_cumulative: np.ndarray,
52
+ size: int,
53
+ value: float
54
+ ) -> int: ...
parrot/lib/python3.10/site-packages/scipy/stats/_stats_py.py ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/scipy/stats/_survival.py ADDED
@@ -0,0 +1,686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import TYPE_CHECKING
5
+ import warnings
6
+
7
+ import numpy as np
8
+ from scipy import special, interpolate, stats
9
+ from scipy.stats._censored_data import CensoredData
10
+ from scipy.stats._common import ConfidenceInterval
11
+
12
+ if TYPE_CHECKING:
13
+ from typing import Literal
14
+ import numpy.typing as npt
15
+
16
+
17
+ __all__ = ['ecdf', 'logrank']
18
+
19
+
20
+ @dataclass
21
+ class EmpiricalDistributionFunction:
22
+ """An empirical distribution function produced by `scipy.stats.ecdf`
23
+
24
+ Attributes
25
+ ----------
26
+ quantiles : ndarray
27
+ The unique values of the sample from which the
28
+ `EmpiricalDistributionFunction` was estimated.
29
+ probabilities : ndarray
30
+ The point estimates of the cumulative distribution function (CDF) or
31
+ its complement, the survival function (SF), corresponding with
32
+ `quantiles`.
33
+ """
34
+ quantiles: np.ndarray
35
+ probabilities: np.ndarray
36
+ # Exclude these from __str__
37
+ _n: np.ndarray = field(repr=False) # number "at risk"
38
+ _d: np.ndarray = field(repr=False) # number of "deaths"
39
+ _sf: np.ndarray = field(repr=False) # survival function for var estimate
40
+ _kind: str = field(repr=False) # type of function: "cdf" or "sf"
41
+
42
+ def __init__(self, q, p, n, d, kind):
43
+ self.probabilities = p
44
+ self.quantiles = q
45
+ self._n = n
46
+ self._d = d
47
+ self._sf = p if kind == 'sf' else 1 - p
48
+ self._kind = kind
49
+
50
+ f0 = 1 if kind == 'sf' else 0 # leftmost function value
51
+ f1 = 1 - f0
52
+ # fill_value can't handle edge cases at infinity
53
+ x = np.insert(q, [0, len(q)], [-np.inf, np.inf])
54
+ y = np.insert(p, [0, len(p)], [f0, f1])
55
+ # `or` conditions handle the case of empty x, points
56
+ self._f = interpolate.interp1d(x, y, kind='previous',
57
+ assume_sorted=True)
58
+
59
+ def evaluate(self, x):
60
+ """Evaluate the empirical CDF/SF function at the input.
61
+
62
+ Parameters
63
+ ----------
64
+ x : ndarray
65
+ Argument to the CDF/SF
66
+
67
+ Returns
68
+ -------
69
+ y : ndarray
70
+ The CDF/SF evaluated at the input
71
+ """
72
+ return self._f(x)
73
+
74
+ def plot(self, ax=None, **matplotlib_kwargs):
75
+ """Plot the empirical distribution function
76
+
77
+ Available only if ``matplotlib`` is installed.
78
+
79
+ Parameters
80
+ ----------
81
+ ax : matplotlib.axes.Axes
82
+ Axes object to draw the plot onto, otherwise uses the current Axes.
83
+
84
+ **matplotlib_kwargs : dict, optional
85
+ Keyword arguments passed directly to `matplotlib.axes.Axes.step`.
86
+ Unless overridden, ``where='post'``.
87
+
88
+ Returns
89
+ -------
90
+ lines : list of `matplotlib.lines.Line2D`
91
+ Objects representing the plotted data
92
+ """
93
+ try:
94
+ import matplotlib # noqa: F401
95
+ except ModuleNotFoundError as exc:
96
+ message = "matplotlib must be installed to use method `plot`."
97
+ raise ModuleNotFoundError(message) from exc
98
+
99
+ if ax is None:
100
+ import matplotlib.pyplot as plt
101
+ ax = plt.gca()
102
+
103
+ kwargs = {'where': 'post'}
104
+ kwargs.update(matplotlib_kwargs)
105
+
106
+ delta = np.ptp(self.quantiles)*0.05 # how far past sample edge to plot
107
+ q = self.quantiles
108
+ q = [q[0] - delta] + list(q) + [q[-1] + delta]
109
+
110
+ return ax.step(q, self.evaluate(q), **kwargs)
111
+
112
+ def confidence_interval(self, confidence_level=0.95, *, method='linear'):
113
+ """Compute a confidence interval around the CDF/SF point estimate
114
+
115
+ Parameters
116
+ ----------
117
+ confidence_level : float, default: 0.95
118
+ Confidence level for the computed confidence interval
119
+
120
+ method : str, {"linear", "log-log"}
121
+ Method used to compute the confidence interval. Options are
122
+ "linear" for the conventional Greenwood confidence interval
123
+ (default) and "log-log" for the "exponential Greenwood",
124
+ log-negative-log-transformed confidence interval.
125
+
126
+ Returns
127
+ -------
128
+ ci : ``ConfidenceInterval``
129
+ An object with attributes ``low`` and ``high``, instances of
130
+ `~scipy.stats._result_classes.EmpiricalDistributionFunction` that
131
+ represent the lower and upper bounds (respectively) of the
132
+ confidence interval.
133
+
134
+ Notes
135
+ -----
136
+ Confidence intervals are computed according to the Greenwood formula
137
+ (``method='linear'``) or the more recent "exponential Greenwood"
138
+ formula (``method='log-log'``) as described in [1]_. The conventional
139
+ Greenwood formula can result in lower confidence limits less than 0
140
+ and upper confidence limits greater than 1; these are clipped to the
141
+ unit interval. NaNs may be produced by either method; these are
142
+ features of the formulas.
143
+
144
+ References
145
+ ----------
146
+ .. [1] Sawyer, Stanley. "The Greenwood and Exponential Greenwood
147
+ Confidence Intervals in Survival Analysis."
148
+ https://www.math.wustl.edu/~sawyer/handouts/greenwood.pdf
149
+
150
+ """
151
+ message = ("Confidence interval bounds do not implement a "
152
+ "`confidence_interval` method.")
153
+ if self._n is None:
154
+ raise NotImplementedError(message)
155
+
156
+ methods = {'linear': self._linear_ci,
157
+ 'log-log': self._loglog_ci}
158
+
159
+ message = f"`method` must be one of {set(methods)}."
160
+ if method.lower() not in methods:
161
+ raise ValueError(message)
162
+
163
+ message = "`confidence_level` must be a scalar between 0 and 1."
164
+ confidence_level = np.asarray(confidence_level)[()]
165
+ if confidence_level.shape or not (0 <= confidence_level <= 1):
166
+ raise ValueError(message)
167
+
168
+ method_fun = methods[method.lower()]
169
+ low, high = method_fun(confidence_level)
170
+
171
+ message = ("The confidence interval is undefined at some observations."
172
+ " This is a feature of the mathematical formula used, not"
173
+ " an error in its implementation.")
174
+ if np.any(np.isnan(low) | np.isnan(high)):
175
+ warnings.warn(message, RuntimeWarning, stacklevel=2)
176
+
177
+ low, high = np.clip(low, 0, 1), np.clip(high, 0, 1)
178
+ low = EmpiricalDistributionFunction(self.quantiles, low, None, None,
179
+ self._kind)
180
+ high = EmpiricalDistributionFunction(self.quantiles, high, None, None,
181
+ self._kind)
182
+ return ConfidenceInterval(low, high)
183
+
184
+ def _linear_ci(self, confidence_level):
185
+ sf, d, n = self._sf, self._d, self._n
186
+ # When n == d, Greenwood's formula divides by zero.
187
+ # When s != 0, this can be ignored: var == inf, and CI is [0, 1]
188
+ # When s == 0, this results in NaNs. Produce an informative warning.
189
+ with np.errstate(divide='ignore', invalid='ignore'):
190
+ var = sf ** 2 * np.cumsum(d / (n * (n - d)))
191
+
192
+ se = np.sqrt(var)
193
+ z = special.ndtri(1 / 2 + confidence_level / 2)
194
+
195
+ z_se = z * se
196
+ low = self.probabilities - z_se
197
+ high = self.probabilities + z_se
198
+
199
+ return low, high
200
+
201
+ def _loglog_ci(self, confidence_level):
202
+ sf, d, n = self._sf, self._d, self._n
203
+
204
+ with np.errstate(divide='ignore', invalid='ignore'):
205
+ var = 1 / np.log(sf) ** 2 * np.cumsum(d / (n * (n - d)))
206
+
207
+ se = np.sqrt(var)
208
+ z = special.ndtri(1 / 2 + confidence_level / 2)
209
+
210
+ with np.errstate(divide='ignore'):
211
+ lnl_points = np.log(-np.log(sf))
212
+
213
+ z_se = z * se
214
+ low = np.exp(-np.exp(lnl_points + z_se))
215
+ high = np.exp(-np.exp(lnl_points - z_se))
216
+ if self._kind == "cdf":
217
+ low, high = 1-high, 1-low
218
+
219
+ return low, high
220
+
221
+
222
+ @dataclass
223
+ class ECDFResult:
224
+ """ Result object returned by `scipy.stats.ecdf`
225
+
226
+ Attributes
227
+ ----------
228
+ cdf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
229
+ An object representing the empirical cumulative distribution function.
230
+ sf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
231
+ An object representing the complement of the empirical cumulative
232
+ distribution function.
233
+ """
234
+ cdf: EmpiricalDistributionFunction
235
+ sf: EmpiricalDistributionFunction
236
+
237
+ def __init__(self, q, cdf, sf, n, d):
238
+ self.cdf = EmpiricalDistributionFunction(q, cdf, n, d, "cdf")
239
+ self.sf = EmpiricalDistributionFunction(q, sf, n, d, "sf")
240
+
241
+
242
+ def _iv_CensoredData(
243
+ sample: npt.ArrayLike | CensoredData, param_name: str = 'sample'
244
+ ) -> CensoredData:
245
+ """Attempt to convert `sample` to `CensoredData`."""
246
+ if not isinstance(sample, CensoredData):
247
+ try: # takes care of input standardization/validation
248
+ sample = CensoredData(uncensored=sample)
249
+ except ValueError as e:
250
+ message = str(e).replace('uncensored', param_name)
251
+ raise type(e)(message) from e
252
+ return sample
253
+
254
+
255
+ def ecdf(sample: npt.ArrayLike | CensoredData) -> ECDFResult:
256
+ """Empirical cumulative distribution function of a sample.
257
+
258
+ The empirical cumulative distribution function (ECDF) is a step function
259
+ estimate of the CDF of the distribution underlying a sample. This function
260
+ returns objects representing both the empirical distribution function and
261
+ its complement, the empirical survival function.
262
+
263
+ Parameters
264
+ ----------
265
+ sample : 1D array_like or `scipy.stats.CensoredData`
266
+ Besides array_like, instances of `scipy.stats.CensoredData` containing
267
+ uncensored and right-censored observations are supported. Currently,
268
+ other instances of `scipy.stats.CensoredData` will result in a
269
+ ``NotImplementedError``.
270
+
271
+ Returns
272
+ -------
273
+ res : `~scipy.stats._result_classes.ECDFResult`
274
+ An object with the following attributes.
275
+
276
+ cdf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
277
+ An object representing the empirical cumulative distribution
278
+ function.
279
+ sf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
280
+ An object representing the empirical survival function.
281
+
282
+ The `cdf` and `sf` attributes themselves have the following attributes.
283
+
284
+ quantiles : ndarray
285
+ The unique values in the sample that defines the empirical CDF/SF.
286
+ probabilities : ndarray
287
+ The point estimates of the probabilities corresponding with
288
+ `quantiles`.
289
+
290
+ And the following methods:
291
+
292
+ evaluate(x) :
293
+ Evaluate the CDF/SF at the argument.
294
+
295
+ plot(ax) :
296
+ Plot the CDF/SF on the provided axes.
297
+
298
+ confidence_interval(confidence_level=0.95) :
299
+ Compute the confidence interval around the CDF/SF at the values in
300
+ `quantiles`.
301
+
302
+ Notes
303
+ -----
304
+ When each observation of the sample is a precise measurement, the ECDF
305
+ steps up by ``1/len(sample)`` at each of the observations [1]_.
306
+
307
+ When observations are lower bounds, upper bounds, or both upper and lower
308
+ bounds, the data is said to be "censored", and `sample` may be provided as
309
+ an instance of `scipy.stats.CensoredData`.
310
+
311
+ For right-censored data, the ECDF is given by the Kaplan-Meier estimator
312
+ [2]_; other forms of censoring are not supported at this time.
313
+
314
+ Confidence intervals are computed according to the Greenwood formula or the
315
+ more recent "Exponential Greenwood" formula as described in [4]_.
316
+
317
+ References
318
+ ----------
319
+ .. [1] Conover, William Jay. Practical nonparametric statistics. Vol. 350.
320
+ John Wiley & Sons, 1999.
321
+
322
+ .. [2] Kaplan, Edward L., and Paul Meier. "Nonparametric estimation from
323
+ incomplete observations." Journal of the American statistical
324
+ association 53.282 (1958): 457-481.
325
+
326
+ .. [3] Goel, Manish Kumar, Pardeep Khanna, and Jugal Kishore.
327
+ "Understanding survival analysis: Kaplan-Meier estimate."
328
+ International journal of Ayurveda research 1.4 (2010): 274.
329
+
330
+ .. [4] Sawyer, Stanley. "The Greenwood and Exponential Greenwood Confidence
331
+ Intervals in Survival Analysis."
332
+ https://www.math.wustl.edu/~sawyer/handouts/greenwood.pdf
333
+
334
+ Examples
335
+ --------
336
+ **Uncensored Data**
337
+
338
+ As in the example from [1]_ page 79, five boys were selected at random from
339
+ those in a single high school. Their one-mile run times were recorded as
340
+ follows.
341
+
342
+ >>> sample = [6.23, 5.58, 7.06, 6.42, 5.20] # one-mile run times (minutes)
343
+
344
+ The empirical distribution function, which approximates the distribution
345
+ function of one-mile run times of the population from which the boys were
346
+ sampled, is calculated as follows.
347
+
348
+ >>> from scipy import stats
349
+ >>> res = stats.ecdf(sample)
350
+ >>> res.cdf.quantiles
351
+ array([5.2 , 5.58, 6.23, 6.42, 7.06])
352
+ >>> res.cdf.probabilities
353
+ array([0.2, 0.4, 0.6, 0.8, 1. ])
354
+
355
+ To plot the result as a step function:
356
+
357
+ >>> import matplotlib.pyplot as plt
358
+ >>> ax = plt.subplot()
359
+ >>> res.cdf.plot(ax)
360
+ >>> ax.set_xlabel('One-Mile Run Time (minutes)')
361
+ >>> ax.set_ylabel('Empirical CDF')
362
+ >>> plt.show()
363
+
364
+ **Right-censored Data**
365
+
366
+ As in the example from [1]_ page 91, the lives of ten car fanbelts were
367
+ tested. Five tests concluded because the fanbelt being tested broke, but
368
+ the remaining tests concluded for other reasons (e.g. the study ran out of
369
+ funding, but the fanbelt was still functional). The mileage driven
370
+ with the fanbelts were recorded as follows.
371
+
372
+ >>> broken = [77, 47, 81, 56, 80] # in thousands of miles driven
373
+ >>> unbroken = [62, 60, 43, 71, 37]
374
+
375
+ Precise survival times of the fanbelts that were still functional at the
376
+ end of the tests are unknown, but they are known to exceed the values
377
+ recorded in ``unbroken``. Therefore, these observations are said to be
378
+ "right-censored", and the data is represented using
379
+ `scipy.stats.CensoredData`.
380
+
381
+ >>> sample = stats.CensoredData(uncensored=broken, right=unbroken)
382
+
383
+ The empirical survival function is calculated as follows.
384
+
385
+ >>> res = stats.ecdf(sample)
386
+ >>> res.sf.quantiles
387
+ array([37., 43., 47., 56., 60., 62., 71., 77., 80., 81.])
388
+ >>> res.sf.probabilities
389
+ array([1. , 1. , 0.875, 0.75 , 0.75 , 0.75 , 0.75 , 0.5 , 0.25 , 0. ])
390
+
391
+ To plot the result as a step function:
392
+
393
+ >>> ax = plt.subplot()
394
+ >>> res.cdf.plot(ax)
395
+ >>> ax.set_xlabel('Fanbelt Survival Time (thousands of miles)')
396
+ >>> ax.set_ylabel('Empirical SF')
397
+ >>> plt.show()
398
+
399
+ """
400
+ sample = _iv_CensoredData(sample)
401
+
402
+ if sample.num_censored() == 0:
403
+ res = _ecdf_uncensored(sample._uncensor())
404
+ elif sample.num_censored() == sample._right.size:
405
+ res = _ecdf_right_censored(sample)
406
+ else:
407
+ # Support additional censoring options in follow-up PRs
408
+ message = ("Currently, only uncensored and right-censored data is "
409
+ "supported.")
410
+ raise NotImplementedError(message)
411
+
412
+ t, cdf, sf, n, d = res
413
+ return ECDFResult(t, cdf, sf, n, d)
414
+
415
+
416
+ def _ecdf_uncensored(sample):
417
+ sample = np.sort(sample)
418
+ x, counts = np.unique(sample, return_counts=True)
419
+
420
+ # [1].81 "the fraction of [observations] that are less than or equal to x
421
+ events = np.cumsum(counts)
422
+ n = sample.size
423
+ cdf = events / n
424
+
425
+ # [1].89 "the relative frequency of the sample that exceeds x in value"
426
+ sf = 1 - cdf
427
+
428
+ at_risk = np.concatenate(([n], n - events[:-1]))
429
+ return x, cdf, sf, at_risk, counts
430
+
431
+
432
+ def _ecdf_right_censored(sample):
433
+ # It is conventional to discuss right-censored data in terms of
434
+ # "survival time", "death", and "loss" (e.g. [2]). We'll use that
435
+ # terminology here.
436
+ # This implementation was influenced by the references cited and also
437
+ # https://www.youtube.com/watch?v=lxoWsVco_iM
438
+ # https://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
439
+ # In retrospect it is probably most easily compared against [3].
440
+ # Ultimately, the data needs to be sorted, so this implementation is
441
+ # written to avoid a separate call to `unique` after sorting. In hope of
442
+ # better performance on large datasets, it also computes survival
443
+ # probabilities at unique times only rather than at each observation.
444
+ tod = sample._uncensored # time of "death"
445
+ tol = sample._right # time of "loss"
446
+ times = np.concatenate((tod, tol))
447
+ died = np.asarray([1]*tod.size + [0]*tol.size)
448
+
449
+ # sort by times
450
+ i = np.argsort(times)
451
+ times = times[i]
452
+ died = died[i]
453
+ at_risk = np.arange(times.size, 0, -1)
454
+
455
+ # logical indices of unique times
456
+ j = np.diff(times, prepend=-np.inf, append=np.inf) > 0
457
+ j_l = j[:-1] # first instances of unique times
458
+ j_r = j[1:] # last instances of unique times
459
+
460
+ # get number at risk and deaths at each unique time
461
+ t = times[j_l] # unique times
462
+ n = at_risk[j_l] # number at risk at each unique time
463
+ cd = np.cumsum(died)[j_r] # cumulative deaths up to/including unique times
464
+ d = np.diff(cd, prepend=0) # deaths at each unique time
465
+
466
+ # compute survival function
467
+ sf = np.cumprod((n - d) / n)
468
+ cdf = 1 - sf
469
+ return t, cdf, sf, n, d
470
+
471
+
472
+ @dataclass
473
+ class LogRankResult:
474
+ """Result object returned by `scipy.stats.logrank`.
475
+
476
+ Attributes
477
+ ----------
478
+ statistic : float ndarray
479
+ The computed statistic (defined below). Its magnitude is the
480
+ square root of the magnitude returned by most other logrank test
481
+ implementations.
482
+ pvalue : float ndarray
483
+ The computed p-value of the test.
484
+ """
485
+ statistic: np.ndarray
486
+ pvalue: np.ndarray
487
+
488
+
489
+ def logrank(
490
+ x: npt.ArrayLike | CensoredData,
491
+ y: npt.ArrayLike | CensoredData,
492
+ alternative: Literal['two-sided', 'less', 'greater'] = "two-sided"
493
+ ) -> LogRankResult:
494
+ r"""Compare the survival distributions of two samples via the logrank test.
495
+
496
+ Parameters
497
+ ----------
498
+ x, y : array_like or CensoredData
499
+ Samples to compare based on their empirical survival functions.
500
+ alternative : {'two-sided', 'less', 'greater'}, optional
501
+ Defines the alternative hypothesis.
502
+
503
+ The null hypothesis is that the survival distributions of the two
504
+ groups, say *X* and *Y*, are identical.
505
+
506
+ The following alternative hypotheses [4]_ are available (default is
507
+ 'two-sided'):
508
+
509
+ * 'two-sided': the survival distributions of the two groups are not
510
+ identical.
511
+ * 'less': survival of group *X* is favored: the group *X* failure rate
512
+ function is less than the group *Y* failure rate function at some
513
+ times.
514
+ * 'greater': survival of group *Y* is favored: the group *X* failure
515
+ rate function is greater than the group *Y* failure rate function at
516
+ some times.
517
+
518
+ Returns
519
+ -------
520
+ res : `~scipy.stats._result_classes.LogRankResult`
521
+ An object containing attributes:
522
+
523
+ statistic : float ndarray
524
+ The computed statistic (defined below). Its magnitude is the
525
+ square root of the magnitude returned by most other logrank test
526
+ implementations.
527
+ pvalue : float ndarray
528
+ The computed p-value of the test.
529
+
530
+ See Also
531
+ --------
532
+ scipy.stats.ecdf
533
+
534
+ Notes
535
+ -----
536
+ The logrank test [1]_ compares the observed number of events to
537
+ the expected number of events under the null hypothesis that the two
538
+ samples were drawn from the same distribution. The statistic is
539
+
540
+ .. math::
541
+
542
+ Z_i = \frac{\sum_{j=1}^J(O_{i,j}-E_{i,j})}{\sqrt{\sum_{j=1}^J V_{i,j}}}
543
+ \rightarrow \mathcal{N}(0,1)
544
+
545
+ where
546
+
547
+ .. math::
548
+
549
+ E_{i,j} = O_j \frac{N_{i,j}}{N_j},
550
+ \qquad
551
+ V_{i,j} = E_{i,j} \left(\frac{N_j-O_j}{N_j}\right)
552
+ \left(\frac{N_j-N_{i,j}}{N_j-1}\right),
553
+
554
+ :math:`i` denotes the group (i.e. it may assume values :math:`x` or
555
+ :math:`y`, or it may be omitted to refer to the combined sample)
556
+ :math:`j` denotes the time (at which an event occurred),
557
+ :math:`N` is the number of subjects at risk just before an event occurred,
558
+ and :math:`O` is the observed number of events at that time.
559
+
560
+ The ``statistic`` :math:`Z_x` returned by `logrank` is the (signed) square
561
+ root of the statistic returned by many other implementations. Under the
562
+ null hypothesis, :math:`Z_x**2` is asymptotically distributed according to
563
+ the chi-squared distribution with one degree of freedom. Consequently,
564
+ :math:`Z_x` is asymptotically distributed according to the standard normal
565
+ distribution. The advantage of using :math:`Z_x` is that the sign
566
+ information (i.e. whether the observed number of events tends to be less
567
+ than or greater than the number expected under the null hypothesis) is
568
+ preserved, allowing `scipy.stats.logrank` to offer one-sided alternative
569
+ hypotheses.
570
+
571
+ References
572
+ ----------
573
+ .. [1] Mantel N. "Evaluation of survival data and two new rank order
574
+ statistics arising in its consideration."
575
+ Cancer Chemotherapy Reports, 50(3):163-170, PMID: 5910392, 1966
576
+ .. [2] Bland, Altman, "The logrank test", BMJ, 328:1073,
577
+ :doi:`10.1136/bmj.328.7447.1073`, 2004
578
+ .. [3] "Logrank test", Wikipedia,
579
+ https://en.wikipedia.org/wiki/Logrank_test
580
+ .. [4] Brown, Mark. "On the choice of variance for the log rank test."
581
+ Biometrika 71.1 (1984): 65-74.
582
+ .. [5] Klein, John P., and Melvin L. Moeschberger. Survival analysis:
583
+ techniques for censored and truncated data. Vol. 1230. New York:
584
+ Springer, 2003.
585
+
586
+ Examples
587
+ --------
588
+ Reference [2]_ compared the survival times of patients with two different
589
+ types of recurrent malignant gliomas. The samples below record the time
590
+ (number of weeks) for which each patient participated in the study. The
591
+ `scipy.stats.CensoredData` class is used because the data is
592
+ right-censored: the uncensored observations correspond with observed deaths
593
+ whereas the censored observations correspond with the patient leaving the
594
+ study for another reason.
595
+
596
+ >>> from scipy import stats
597
+ >>> x = stats.CensoredData(
598
+ ... uncensored=[6, 13, 21, 30, 37, 38, 49, 50,
599
+ ... 63, 79, 86, 98, 202, 219],
600
+ ... right=[31, 47, 80, 82, 82, 149]
601
+ ... )
602
+ >>> y = stats.CensoredData(
603
+ ... uncensored=[10, 10, 12, 13, 14, 15, 16, 17, 18, 20, 24, 24,
604
+ ... 25, 28,30, 33, 35, 37, 40, 40, 46, 48, 76, 81,
605
+ ... 82, 91, 112, 181],
606
+ ... right=[34, 40, 70]
607
+ ... )
608
+
609
+ We can calculate and visualize the empirical survival functions
610
+ of both groups as follows.
611
+
612
+ >>> import numpy as np
613
+ >>> import matplotlib.pyplot as plt
614
+ >>> ax = plt.subplot()
615
+ >>> ecdf_x = stats.ecdf(x)
616
+ >>> ecdf_x.sf.plot(ax, label='Astrocytoma')
617
+ >>> ecdf_y = stats.ecdf(y)
618
+ >>> ecdf_y.sf.plot(ax, label='Glioblastoma')
619
+ >>> ax.set_xlabel('Time to death (weeks)')
620
+ >>> ax.set_ylabel('Empirical SF')
621
+ >>> plt.legend()
622
+ >>> plt.show()
623
+
624
+ Visual inspection of the empirical survival functions suggests that the
625
+ survival times tend to be different between the two groups. To formally
626
+ assess whether the difference is significant at the 1% level, we use the
627
+ logrank test.
628
+
629
+ >>> res = stats.logrank(x=x, y=y)
630
+ >>> res.statistic
631
+ -2.73799
632
+ >>> res.pvalue
633
+ 0.00618
634
+
635
+ The p-value is less than 1%, so we can consider the data to be evidence
636
+ against the null hypothesis in favor of the alternative that there is a
637
+ difference between the two survival functions.
638
+
639
+ """
640
+ # Input validation. `alternative` IV handled in `_get_pvalue` below.
641
+ x = _iv_CensoredData(sample=x, param_name='x')
642
+ y = _iv_CensoredData(sample=y, param_name='y')
643
+
644
+ # Combined sample. (Under H0, the two groups are identical.)
645
+ xy = CensoredData(
646
+ uncensored=np.concatenate((x._uncensored, y._uncensored)),
647
+ right=np.concatenate((x._right, y._right))
648
+ )
649
+
650
+ # Extract data from the combined sample
651
+ res = ecdf(xy)
652
+ idx = res.sf._d.astype(bool) # indices of observed events
653
+ times_xy = res.sf.quantiles[idx] # unique times of observed events
654
+ at_risk_xy = res.sf._n[idx] # combined number of subjects at risk
655
+ deaths_xy = res.sf._d[idx] # combined number of events
656
+
657
+ # Get the number at risk within each sample.
658
+ # First compute the number at risk in group X at each of the `times_xy`.
659
+ # Could use `interpolate_1d`, but this is more compact.
660
+ res_x = ecdf(x)
661
+ i = np.searchsorted(res_x.sf.quantiles, times_xy)
662
+ at_risk_x = np.append(res_x.sf._n, 0)[i] # 0 at risk after last time
663
+ # Subtract from the combined number at risk to get number at risk in Y
664
+ at_risk_y = at_risk_xy - at_risk_x
665
+
666
+ # Compute the variance.
667
+ num = at_risk_x * at_risk_y * deaths_xy * (at_risk_xy - deaths_xy)
668
+ den = at_risk_xy**2 * (at_risk_xy - 1)
669
+ # Note: when `at_risk_xy == 1`, we would have `at_risk_xy - 1 == 0` in the
670
+ # numerator and denominator. Simplifying the fraction symbolically, we
671
+ # would always find the overall quotient to be zero, so don't compute it.
672
+ i = at_risk_xy > 1
673
+ sum_var = np.sum(num[i]/den[i])
674
+
675
+ # Get the observed and expected number of deaths in group X
676
+ n_died_x = x._uncensored.size
677
+ sum_exp_deaths_x = np.sum(at_risk_x * (deaths_xy/at_risk_xy))
678
+
679
+ # Compute the statistic. This is the square root of that in references.
680
+ statistic = (n_died_x - sum_exp_deaths_x)/np.sqrt(sum_var)
681
+
682
+ # Equivalent to chi2(df=1).sf(statistic**2) when alternative='two-sided'
683
+ norm = stats._stats_py._SimpleNormal()
684
+ pvalue = stats._stats_py._get_pvalue(statistic, norm, alternative, xp=np)
685
+
686
+ return LogRankResult(statistic=statistic[()], pvalue=pvalue[()])
parrot/lib/python3.10/site-packages/scipy/stats/contingency.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contingency table functions (:mod:`scipy.stats.contingency`)
3
+ ============================================================
4
+
5
+ Functions for creating and analyzing contingency tables.
6
+
7
+ .. currentmodule:: scipy.stats.contingency
8
+
9
+ .. autosummary::
10
+ :toctree: generated/
11
+
12
+ chi2_contingency
13
+ relative_risk
14
+ odds_ratio
15
+ crosstab
16
+ association
17
+
18
+ expected_freq
19
+ margins
20
+
21
+ """
22
+
23
+
24
+ from functools import reduce
25
+ import math
26
+ import numpy as np
27
+ from ._stats_py import power_divergence
28
+ from ._relative_risk import relative_risk
29
+ from ._crosstab import crosstab
30
+ from ._odds_ratio import odds_ratio
31
+ from scipy._lib._bunch import _make_tuple_bunch
32
+
33
+
34
+ __all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab',
35
+ 'association', 'relative_risk', 'odds_ratio']
36
+
37
+
38
+ def margins(a):
39
+ """Return a list of the marginal sums of the array `a`.
40
+
41
+ Parameters
42
+ ----------
43
+ a : ndarray
44
+ The array for which to compute the marginal sums.
45
+
46
+ Returns
47
+ -------
48
+ margsums : list of ndarrays
49
+ A list of length `a.ndim`. `margsums[k]` is the result
50
+ of summing `a` over all axes except `k`; it has the same
51
+ number of dimensions as `a`, but the length of each axis
52
+ except axis `k` will be 1.
53
+
54
+ Examples
55
+ --------
56
+ >>> import numpy as np
57
+ >>> from scipy.stats.contingency import margins
58
+
59
+ >>> a = np.arange(12).reshape(2, 6)
60
+ >>> a
61
+ array([[ 0, 1, 2, 3, 4, 5],
62
+ [ 6, 7, 8, 9, 10, 11]])
63
+ >>> m0, m1 = margins(a)
64
+ >>> m0
65
+ array([[15],
66
+ [51]])
67
+ >>> m1
68
+ array([[ 6, 8, 10, 12, 14, 16]])
69
+
70
+ >>> b = np.arange(24).reshape(2,3,4)
71
+ >>> m0, m1, m2 = margins(b)
72
+ >>> m0
73
+ array([[[ 66]],
74
+ [[210]]])
75
+ >>> m1
76
+ array([[[ 60],
77
+ [ 92],
78
+ [124]]])
79
+ >>> m2
80
+ array([[[60, 66, 72, 78]]])
81
+ """
82
+ margsums = []
83
+ ranged = list(range(a.ndim))
84
+ for k in ranged:
85
+ marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
86
+ margsums.append(marg)
87
+ return margsums
88
+
89
+
90
+ def expected_freq(observed):
91
+ """
92
+ Compute the expected frequencies from a contingency table.
93
+
94
+ Given an n-dimensional contingency table of observed frequencies,
95
+ compute the expected frequencies for the table based on the marginal
96
+ sums under the assumption that the groups associated with each
97
+ dimension are independent.
98
+
99
+ Parameters
100
+ ----------
101
+ observed : array_like
102
+ The table of observed frequencies. (While this function can handle
103
+ a 1-D array, that case is trivial. Generally `observed` is at
104
+ least 2-D.)
105
+
106
+ Returns
107
+ -------
108
+ expected : ndarray of float64
109
+ The expected frequencies, based on the marginal sums of the table.
110
+ Same shape as `observed`.
111
+
112
+ Examples
113
+ --------
114
+ >>> import numpy as np
115
+ >>> from scipy.stats.contingency import expected_freq
116
+ >>> observed = np.array([[10, 10, 20],[20, 20, 20]])
117
+ >>> expected_freq(observed)
118
+ array([[ 12., 12., 16.],
119
+ [ 18., 18., 24.]])
120
+
121
+ """
122
+ # Typically `observed` is an integer array. If `observed` has a large
123
+ # number of dimensions or holds large values, some of the following
124
+ # computations may overflow, so we first switch to floating point.
125
+ observed = np.asarray(observed, dtype=np.float64)
126
+
127
+ # Create a list of the marginal sums.
128
+ margsums = margins(observed)
129
+
130
+ # Create the array of expected frequencies. The shapes of the
131
+ # marginal sums returned by apply_over_axes() are just what we
132
+ # need for broadcasting in the following product.
133
+ d = observed.ndim
134
+ expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
135
+ return expected
136
+
137
+
138
+ Chi2ContingencyResult = _make_tuple_bunch(
139
+ 'Chi2ContingencyResult',
140
+ ['statistic', 'pvalue', 'dof', 'expected_freq'], []
141
+ )
142
+
143
+
144
+ def chi2_contingency(observed, correction=True, lambda_=None):
145
+ """Chi-square test of independence of variables in a contingency table.
146
+
147
+ This function computes the chi-square statistic and p-value for the
148
+ hypothesis test of independence of the observed frequencies in the
149
+ contingency table [1]_ `observed`. The expected frequencies are computed
150
+ based on the marginal sums under the assumption of independence; see
151
+ `scipy.stats.contingency.expected_freq`. The number of degrees of
152
+ freedom is (expressed using numpy functions and attributes)::
153
+
154
+ dof = observed.size - sum(observed.shape) + observed.ndim - 1
155
+
156
+
157
+ Parameters
158
+ ----------
159
+ observed : array_like
160
+ The contingency table. The table contains the observed frequencies
161
+ (i.e. number of occurrences) in each category. In the two-dimensional
162
+ case, the table is often described as an "R x C table".
163
+ correction : bool, optional
164
+ If True, *and* the degrees of freedom is 1, apply Yates' correction
165
+ for continuity. The effect of the correction is to adjust each
166
+ observed value by 0.5 towards the corresponding expected value.
167
+ lambda_ : float or str, optional
168
+ By default, the statistic computed in this test is Pearson's
169
+ chi-squared statistic [2]_. `lambda_` allows a statistic from the
170
+ Cressie-Read power divergence family [3]_ to be used instead. See
171
+ `scipy.stats.power_divergence` for details.
172
+
173
+ Returns
174
+ -------
175
+ res : Chi2ContingencyResult
176
+ An object containing attributes:
177
+
178
+ statistic : float
179
+ The test statistic.
180
+ pvalue : float
181
+ The p-value of the test.
182
+ dof : int
183
+ The degrees of freedom.
184
+ expected_freq : ndarray, same shape as `observed`
185
+ The expected frequencies, based on the marginal sums of the table.
186
+
187
+ See Also
188
+ --------
189
+ scipy.stats.contingency.expected_freq
190
+ scipy.stats.fisher_exact
191
+ scipy.stats.chisquare
192
+ scipy.stats.power_divergence
193
+ scipy.stats.barnard_exact
194
+ scipy.stats.boschloo_exact
195
+
196
+ Notes
197
+ -----
198
+ An often quoted guideline for the validity of this calculation is that
199
+ the test should be used only if the observed and expected frequencies
200
+ in each cell are at least 5.
201
+
202
+ This is a test for the independence of different categories of a
203
+ population. The test is only meaningful when the dimension of
204
+ `observed` is two or more. Applying the test to a one-dimensional
205
+ table will always result in `expected` equal to `observed` and a
206
+ chi-square statistic equal to 0.
207
+
208
+ This function does not handle masked arrays, because the calculation
209
+ does not make sense with missing values.
210
+
211
+ Like `scipy.stats.chisquare`, this function computes a chi-square
212
+ statistic; the convenience this function provides is to figure out the
213
+ expected frequencies and degrees of freedom from the given contingency
214
+ table. If these were already known, and if the Yates' correction was not
215
+ required, one could use `scipy.stats.chisquare`. That is, if one calls::
216
+
217
+ res = chi2_contingency(obs, correction=False)
218
+
219
+ then the following is true::
220
+
221
+ (res.statistic, res.pvalue) == stats.chisquare(obs.ravel(),
222
+ f_exp=ex.ravel(),
223
+ ddof=obs.size - 1 - dof)
224
+
225
+ The `lambda_` argument was added in version 0.13.0 of scipy.
226
+
227
+ References
228
+ ----------
229
+ .. [1] "Contingency table",
230
+ https://en.wikipedia.org/wiki/Contingency_table
231
+ .. [2] "Pearson's chi-squared test",
232
+ https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
233
+ .. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
234
+ Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
235
+ pp. 440-464.
236
+ .. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of
237
+ Cardiovascular Events in Women and Men: A Sex-Specific
238
+ Meta-analysis of Randomized Controlled Trials."
239
+ JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006.
240
+
241
+ Examples
242
+ --------
243
+ In [4]_, the use of aspirin to prevent cardiovascular events in women
244
+ and men was investigated. The study notably concluded:
245
+
246
+ ...aspirin therapy reduced the risk of a composite of
247
+ cardiovascular events due to its effect on reducing the risk of
248
+ ischemic stroke in women [...]
249
+
250
+ The article lists studies of various cardiovascular events. Let's
251
+ focus on the ischemic stoke in women.
252
+
253
+ The following table summarizes the results of the experiment in which
254
+ participants took aspirin or a placebo on a regular basis for several
255
+ years. Cases of ischemic stroke were recorded::
256
+
257
+ Aspirin Control/Placebo
258
+ Ischemic stroke 176 230
259
+ No stroke 21035 21018
260
+
261
+ Is there evidence that the aspirin reduces the risk of ischemic stroke?
262
+ We begin by formulating a null hypothesis :math:`H_0`:
263
+
264
+ The effect of aspirin is equivalent to that of placebo.
265
+
266
+ Let's assess the plausibility of this hypothesis with
267
+ a chi-square test.
268
+
269
+ >>> import numpy as np
270
+ >>> from scipy.stats import chi2_contingency
271
+ >>> table = np.array([[176, 230], [21035, 21018]])
272
+ >>> res = chi2_contingency(table)
273
+ >>> res.statistic
274
+ 6.892569132546561
275
+ >>> res.pvalue
276
+ 0.008655478161175739
277
+
278
+ Using a significance level of 5%, we would reject the null hypothesis in
279
+ favor of the alternative hypothesis: "the effect of aspirin
280
+ is not equivalent to the effect of placebo".
281
+ Because `scipy.stats.contingency.chi2_contingency` performs a two-sided
282
+ test, the alternative hypothesis does not indicate the direction of the
283
+ effect. We can use `stats.contingency.odds_ratio` to support the
284
+ conclusion that aspirin *reduces* the risk of ischemic stroke.
285
+
286
+ Below are further examples showing how larger contingency tables can be
287
+ tested.
288
+
289
+ A two-way example (2 x 3):
290
+
291
+ >>> obs = np.array([[10, 10, 20], [20, 20, 20]])
292
+ >>> res = chi2_contingency(obs)
293
+ >>> res.statistic
294
+ 2.7777777777777777
295
+ >>> res.pvalue
296
+ 0.24935220877729619
297
+ >>> res.dof
298
+ 2
299
+ >>> res.expected_freq
300
+ array([[ 12., 12., 16.],
301
+ [ 18., 18., 24.]])
302
+
303
+ Perform the test using the log-likelihood ratio (i.e. the "G-test")
304
+ instead of Pearson's chi-squared statistic.
305
+
306
+ >>> res = chi2_contingency(obs, lambda_="log-likelihood")
307
+ >>> res.statistic
308
+ 2.7688587616781319
309
+ >>> res.pvalue
310
+ 0.25046668010954165
311
+
312
+ A four-way example (2 x 2 x 2 x 2):
313
+
314
+ >>> obs = np.array(
315
+ ... [[[[12, 17],
316
+ ... [11, 16]],
317
+ ... [[11, 12],
318
+ ... [15, 16]]],
319
+ ... [[[23, 15],
320
+ ... [30, 22]],
321
+ ... [[14, 17],
322
+ ... [15, 16]]]])
323
+ >>> res = chi2_contingency(obs)
324
+ >>> res.statistic
325
+ 8.7584514426741897
326
+ >>> res.pvalue
327
+ 0.64417725029295503
328
+ """
329
+ observed = np.asarray(observed)
330
+ if np.any(observed < 0):
331
+ raise ValueError("All values in `observed` must be nonnegative.")
332
+ if observed.size == 0:
333
+ raise ValueError("No data; `observed` has size 0.")
334
+
335
+ expected = expected_freq(observed)
336
+ if np.any(expected == 0):
337
+ # Include one of the positions where expected is zero in
338
+ # the exception message.
339
+ zeropos = list(zip(*np.nonzero(expected == 0)))[0]
340
+ raise ValueError("The internally computed table of expected "
341
+ f"frequencies has a zero element at {zeropos}.")
342
+
343
+ # The degrees of freedom
344
+ dof = expected.size - sum(expected.shape) + expected.ndim - 1
345
+
346
+ if dof == 0:
347
+ # Degenerate case; this occurs when `observed` is 1D (or, more
348
+ # generally, when it has only one nontrivial dimension). In this
349
+ # case, we also have observed == expected, so chi2 is 0.
350
+ chi2 = 0.0
351
+ p = 1.0
352
+ else:
353
+ if dof == 1 and correction:
354
+ # Adjust `observed` according to Yates' correction for continuity.
355
+ # Magnitude of correction no bigger than difference; see gh-13875
356
+ diff = expected - observed
357
+ direction = np.sign(diff)
358
+ magnitude = np.minimum(0.5, np.abs(diff))
359
+ observed = observed + magnitude * direction
360
+
361
+ chi2, p = power_divergence(observed, expected,
362
+ ddof=observed.size - 1 - dof, axis=None,
363
+ lambda_=lambda_)
364
+
365
+ return Chi2ContingencyResult(chi2, p, dof, expected)
366
+
367
+
368
+ def association(observed, method="cramer", correction=False, lambda_=None):
369
+ """Calculates degree of association between two nominal variables.
370
+
371
+ The function provides the option for computing one of three measures of
372
+ association between two nominal variables from the data given in a 2d
373
+ contingency table: Tschuprow's T, Pearson's Contingency Coefficient
374
+ and Cramer's V.
375
+
376
+ Parameters
377
+ ----------
378
+ observed : array-like
379
+ The array of observed values
380
+ method : {"cramer", "tschuprow", "pearson"} (default = "cramer")
381
+ The association test statistic.
382
+ correction : bool, optional
383
+ Inherited from `scipy.stats.contingency.chi2_contingency()`
384
+ lambda_ : float or str, optional
385
+ Inherited from `scipy.stats.contingency.chi2_contingency()`
386
+
387
+ Returns
388
+ -------
389
+ statistic : float
390
+ Value of the test statistic
391
+
392
+ Notes
393
+ -----
394
+ Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all
395
+ measure the degree to which two nominal or ordinal variables are related,
396
+ or the level of their association. This differs from correlation, although
397
+ many often mistakenly consider them equivalent. Correlation measures in
398
+ what way two variables are related, whereas, association measures how
399
+ related the variables are. As such, association does not subsume
400
+ independent variables, and is rather a test of independence. A value of
401
+ 1.0 indicates perfect association, and 0.0 means the variables have no
402
+ association.
403
+
404
+ Both the Cramer's V and Tschuprow's T are extensions of the phi
405
+ coefficient. Moreover, due to the close relationship between the
406
+ Cramer's V and Tschuprow's T the returned values can often be similar
407
+ or even equivalent. They are likely to diverge more as the array shape
408
+ diverges from a 2x2.
409
+
410
+ References
411
+ ----------
412
+ .. [1] "Tschuprow's T",
413
+ https://en.wikipedia.org/wiki/Tschuprow's_T
414
+ .. [2] Tschuprow, A. A. (1939)
415
+ Principles of the Mathematical Theory of Correlation;
416
+ translated by M. Kantorowitsch. W. Hodge & Co.
417
+ .. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V
418
+ .. [4] "Nominal Association: Phi and Cramer's V",
419
+ http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html
420
+ .. [5] Gingrich, Paul, "Association Between Variables",
421
+ http://uregina.ca/~gingrich/ch11a.pdf
422
+
423
+ Examples
424
+ --------
425
+ An example with a 4x2 contingency table:
426
+
427
+ >>> import numpy as np
428
+ >>> from scipy.stats.contingency import association
429
+ >>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]])
430
+
431
+ Pearson's contingency coefficient
432
+
433
+ >>> association(obs4x2, method="pearson")
434
+ 0.18303298140595667
435
+
436
+ Cramer's V
437
+
438
+ >>> association(obs4x2, method="cramer")
439
+ 0.18617813077483678
440
+
441
+ Tschuprow's T
442
+
443
+ >>> association(obs4x2, method="tschuprow")
444
+ 0.14146478765062995
445
+ """
446
+ arr = np.asarray(observed)
447
+ if not np.issubdtype(arr.dtype, np.integer):
448
+ raise ValueError("`observed` must be an integer array.")
449
+
450
+ if len(arr.shape) != 2:
451
+ raise ValueError("method only accepts 2d arrays")
452
+
453
+ chi2_stat = chi2_contingency(arr, correction=correction,
454
+ lambda_=lambda_)
455
+
456
+ phi2 = chi2_stat.statistic / arr.sum()
457
+ n_rows, n_cols = arr.shape
458
+ if method == "cramer":
459
+ value = phi2 / min(n_cols - 1, n_rows - 1)
460
+ elif method == "tschuprow":
461
+ value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1))
462
+ elif method == 'pearson':
463
+ value = phi2 / (1 + phi2)
464
+ else:
465
+ raise ValueError("Invalid argument value: 'method' argument must "
466
+ "be 'cramer', 'tschuprow', or 'pearson'")
467
+
468
+ return math.sqrt(value)
parrot/lib/python3.10/site-packages/scipy/stats/tests/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc ADDED
Binary file (16.6 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multicomp.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/stats/tests/common_tests.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+
3
+ import numpy as np
4
+ import numpy.testing as npt
5
+ from numpy.testing import assert_allclose, assert_equal
6
+ from pytest import raises as assert_raises
7
+
8
+ import numpy.ma.testutils as ma_npt
9
+
10
+ from scipy._lib._util import (
11
+ getfullargspec_no_self as _getfullargspec, np_long
12
+ )
13
+ from scipy._lib._array_api import xp_assert_equal
14
+ from scipy import stats
15
+
16
+
17
+ def check_named_results(res, attributes, ma=False, xp=None):
18
+ for i, attr in enumerate(attributes):
19
+ if ma:
20
+ ma_npt.assert_equal(res[i], getattr(res, attr))
21
+ elif xp is not None:
22
+ xp_assert_equal(res[i], getattr(res, attr))
23
+ else:
24
+ npt.assert_equal(res[i], getattr(res, attr))
25
+
26
+
27
+ def check_normalization(distfn, args, distname):
28
+ norm_moment = distfn.moment(0, *args)
29
+ npt.assert_allclose(norm_moment, 1.0)
30
+
31
+ if distname == "rv_histogram_instance":
32
+ atol, rtol = 1e-5, 0
33
+ else:
34
+ atol, rtol = 1e-7, 1e-7
35
+
36
+ normalization_expect = distfn.expect(lambda x: 1, args=args)
37
+ npt.assert_allclose(normalization_expect, 1.0, atol=atol, rtol=rtol,
38
+ err_msg=distname, verbose=True)
39
+
40
+ _a, _b = distfn.support(*args)
41
+ normalization_cdf = distfn.cdf(_b, *args)
42
+ npt.assert_allclose(normalization_cdf, 1.0)
43
+
44
+
45
+ def check_moment(distfn, arg, m, v, msg):
46
+ m1 = distfn.moment(1, *arg)
47
+ m2 = distfn.moment(2, *arg)
48
+ if not np.isinf(m):
49
+ npt.assert_almost_equal(m1, m, decimal=10,
50
+ err_msg=msg + ' - 1st moment')
51
+ else: # or np.isnan(m1),
52
+ npt.assert_(np.isinf(m1),
53
+ msg + ' - 1st moment -infinite, m1=%s' % str(m1))
54
+
55
+ if not np.isinf(v):
56
+ npt.assert_almost_equal(m2 - m1 * m1, v, decimal=10,
57
+ err_msg=msg + ' - 2ndt moment')
58
+ else: # or np.isnan(m2),
59
+ npt.assert_(np.isinf(m2), msg + f' - 2nd moment -infinite, {m2=}')
60
+
61
+
62
+ def check_mean_expect(distfn, arg, m, msg):
63
+ if np.isfinite(m):
64
+ m1 = distfn.expect(lambda x: x, arg)
65
+ npt.assert_almost_equal(m1, m, decimal=5,
66
+ err_msg=msg + ' - 1st moment (expect)')
67
+
68
+
69
+ def check_var_expect(distfn, arg, m, v, msg):
70
+ dist_looser_tolerances = {"rv_histogram_instance" , "ksone"}
71
+ kwargs = {'rtol': 5e-6} if msg in dist_looser_tolerances else {}
72
+ if np.isfinite(v):
73
+ m2 = distfn.expect(lambda x: x*x, arg)
74
+ npt.assert_allclose(m2, v + m*m, **kwargs)
75
+
76
+
77
+ def check_skew_expect(distfn, arg, m, v, s, msg):
78
+ if np.isfinite(s):
79
+ m3e = distfn.expect(lambda x: np.power(x-m, 3), arg)
80
+ npt.assert_almost_equal(m3e, s * np.power(v, 1.5),
81
+ decimal=5, err_msg=msg + ' - skew')
82
+ else:
83
+ npt.assert_(np.isnan(s))
84
+
85
+
86
+ def check_kurt_expect(distfn, arg, m, v, k, msg):
87
+ if np.isfinite(k):
88
+ m4e = distfn.expect(lambda x: np.power(x-m, 4), arg)
89
+ npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2),
90
+ atol=1e-5, rtol=1e-5,
91
+ err_msg=msg + ' - kurtosis')
92
+ elif not np.isposinf(k):
93
+ npt.assert_(np.isnan(k))
94
+
95
+
96
+ def check_munp_expect(dist, args, msg):
97
+ # If _munp is overridden, test a higher moment. (Before gh-18634, some
98
+ # distributions had issues with moments 5 and higher.)
99
+ if dist._munp.__func__ != stats.rv_continuous._munp:
100
+ res = dist.moment(5, *args) # shouldn't raise an error
101
+ ref = dist.expect(lambda x: x ** 5, args, lb=-np.inf, ub=np.inf)
102
+ if not np.isfinite(res): # could be valid; automated test can't know
103
+ return
104
+ # loose tolerance, mostly to see whether _munp returns *something*
105
+ assert_allclose(res, ref, atol=1e-10, rtol=1e-4,
106
+ err_msg=msg + ' - higher moment / _munp')
107
+
108
+
109
+ def check_entropy(distfn, arg, msg):
110
+ ent = distfn.entropy(*arg)
111
+ npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan')
112
+
113
+
114
+ def check_private_entropy(distfn, args, superclass):
115
+ # compare a generic _entropy with the distribution-specific implementation
116
+ npt.assert_allclose(distfn._entropy(*args),
117
+ superclass._entropy(distfn, *args))
118
+
119
+
120
+ def check_entropy_vect_scale(distfn, arg):
121
+ # check 2-d
122
+ sc = np.asarray([[1, 2], [3, 4]])
123
+ v_ent = distfn.entropy(*arg, scale=sc)
124
+ s_ent = [distfn.entropy(*arg, scale=s) for s in sc.ravel()]
125
+ s_ent = np.asarray(s_ent).reshape(v_ent.shape)
126
+ assert_allclose(v_ent, s_ent, atol=1e-14)
127
+
128
+ # check invalid value, check cast
129
+ sc = [1, 2, -3]
130
+ v_ent = distfn.entropy(*arg, scale=sc)
131
+ s_ent = [distfn.entropy(*arg, scale=s) for s in sc]
132
+ s_ent = np.asarray(s_ent).reshape(v_ent.shape)
133
+ assert_allclose(v_ent, s_ent, atol=1e-14)
134
+
135
+
136
+ def check_edge_support(distfn, args):
137
+ # Make sure that x=self.a and self.b are handled correctly.
138
+ x = distfn.support(*args)
139
+ if isinstance(distfn, stats.rv_discrete):
140
+ x = x[0]-1, x[1]
141
+
142
+ npt.assert_equal(distfn.cdf(x, *args), [0.0, 1.0])
143
+ npt.assert_equal(distfn.sf(x, *args), [1.0, 0.0])
144
+
145
+ if distfn.name not in ('skellam', 'dlaplace'):
146
+ # with a = -inf, log(0) generates warnings
147
+ npt.assert_equal(distfn.logcdf(x, *args), [-np.inf, 0.0])
148
+ npt.assert_equal(distfn.logsf(x, *args), [0.0, -np.inf])
149
+
150
+ npt.assert_equal(distfn.ppf([0.0, 1.0], *args), x)
151
+ npt.assert_equal(distfn.isf([0.0, 1.0], *args), x[::-1])
152
+
153
+ # out-of-bounds for isf & ppf
154
+ npt.assert_(np.isnan(distfn.isf([-1, 2], *args)).all())
155
+ npt.assert_(np.isnan(distfn.ppf([-1, 2], *args)).all())
156
+
157
+
158
+ def check_named_args(distfn, x, shape_args, defaults, meths):
159
+ ## Check calling w/ named arguments.
160
+
161
+ # check consistency of shapes, numargs and _parse signature
162
+ signature = _getfullargspec(distfn._parse_args)
163
+ npt.assert_(signature.varargs is None)
164
+ npt.assert_(signature.varkw is None)
165
+ npt.assert_(not signature.kwonlyargs)
166
+ npt.assert_(list(signature.defaults) == list(defaults))
167
+
168
+ shape_argnames = signature.args[:-len(defaults)] # a, b, loc=0, scale=1
169
+ if distfn.shapes:
170
+ shapes_ = distfn.shapes.replace(',', ' ').split()
171
+ else:
172
+ shapes_ = ''
173
+ npt.assert_(len(shapes_) == distfn.numargs)
174
+ npt.assert_(len(shapes_) == len(shape_argnames))
175
+
176
+ # check calling w/ named arguments
177
+ shape_args = list(shape_args)
178
+
179
+ vals = [meth(x, *shape_args) for meth in meths]
180
+ npt.assert_(np.all(np.isfinite(vals)))
181
+
182
+ names, a, k = shape_argnames[:], shape_args[:], {}
183
+ while names:
184
+ k.update({names.pop(): a.pop()})
185
+ v = [meth(x, *a, **k) for meth in meths]
186
+ npt.assert_array_equal(vals, v)
187
+ if 'n' not in k.keys():
188
+ # `n` is first parameter of moment(), so can't be used as named arg
189
+ npt.assert_equal(distfn.moment(1, *a, **k),
190
+ distfn.moment(1, *shape_args))
191
+
192
+ # unknown arguments should not go through:
193
+ k.update({'kaboom': 42})
194
+ assert_raises(TypeError, distfn.cdf, x, **k)
195
+
196
+
197
+ def check_random_state_property(distfn, args):
198
+ # check the random_state attribute of a distribution *instance*
199
+
200
+ # This test fiddles with distfn.random_state. This breaks other tests,
201
+ # hence need to save it and then restore.
202
+ rndm = distfn.random_state
203
+
204
+ # baseline: this relies on the global state
205
+ np.random.seed(1234)
206
+ distfn.random_state = None
207
+ r0 = distfn.rvs(*args, size=8)
208
+
209
+ # use an explicit instance-level random_state
210
+ distfn.random_state = 1234
211
+ r1 = distfn.rvs(*args, size=8)
212
+ npt.assert_equal(r0, r1)
213
+
214
+ distfn.random_state = np.random.RandomState(1234)
215
+ r2 = distfn.rvs(*args, size=8)
216
+ npt.assert_equal(r0, r2)
217
+
218
+ # check that np.random.Generator can be used (numpy >= 1.17)
219
+ if hasattr(np.random, 'default_rng'):
220
+ # obtain a np.random.Generator object
221
+ rng = np.random.default_rng(1234)
222
+ distfn.rvs(*args, size=1, random_state=rng)
223
+
224
+ # can override the instance-level random_state for an individual .rvs call
225
+ distfn.random_state = 2
226
+ orig_state = distfn.random_state.get_state()
227
+
228
+ r3 = distfn.rvs(*args, size=8, random_state=np.random.RandomState(1234))
229
+ npt.assert_equal(r0, r3)
230
+
231
+ # ... and that does not alter the instance-level random_state!
232
+ npt.assert_equal(distfn.random_state.get_state(), orig_state)
233
+
234
+ # finally, restore the random_state
235
+ distfn.random_state = rndm
236
+
237
+
238
+ def check_meth_dtype(distfn, arg, meths):
239
+ q0 = [0.25, 0.5, 0.75]
240
+ x0 = distfn.ppf(q0, *arg)
241
+ x_cast = [x0.astype(tp) for tp in (np_long, np.float16, np.float32,
242
+ np.float64)]
243
+
244
+ for x in x_cast:
245
+ # casting may have clipped the values, exclude those
246
+ distfn._argcheck(*arg)
247
+ x = x[(distfn.a < x) & (x < distfn.b)]
248
+ for meth in meths:
249
+ val = meth(x, *arg)
250
+ npt.assert_(val.dtype == np.float64)
251
+
252
+
253
+ def check_ppf_dtype(distfn, arg):
254
+ q0 = np.asarray([0.25, 0.5, 0.75])
255
+ q_cast = [q0.astype(tp) for tp in (np.float16, np.float32, np.float64)]
256
+ for q in q_cast:
257
+ for meth in [distfn.ppf, distfn.isf]:
258
+ val = meth(q, *arg)
259
+ npt.assert_(val.dtype == np.float64)
260
+
261
+
262
+ def check_cmplx_deriv(distfn, arg):
263
+ # Distributions allow complex arguments.
264
+ def deriv(f, x, *arg):
265
+ x = np.asarray(x)
266
+ h = 1e-10
267
+ return (f(x + h*1j, *arg)/h).imag
268
+
269
+ x0 = distfn.ppf([0.25, 0.51, 0.75], *arg)
270
+ x_cast = [x0.astype(tp) for tp in (np_long, np.float16, np.float32,
271
+ np.float64)]
272
+
273
+ for x in x_cast:
274
+ # casting may have clipped the values, exclude those
275
+ distfn._argcheck(*arg)
276
+ x = x[(distfn.a < x) & (x < distfn.b)]
277
+
278
+ pdf, cdf, sf = distfn.pdf(x, *arg), distfn.cdf(x, *arg), distfn.sf(x, *arg)
279
+ assert_allclose(deriv(distfn.cdf, x, *arg), pdf, rtol=1e-5)
280
+ assert_allclose(deriv(distfn.logcdf, x, *arg), pdf/cdf, rtol=1e-5)
281
+
282
+ assert_allclose(deriv(distfn.sf, x, *arg), -pdf, rtol=1e-5)
283
+ assert_allclose(deriv(distfn.logsf, x, *arg), -pdf/sf, rtol=1e-5)
284
+
285
+ assert_allclose(deriv(distfn.logpdf, x, *arg),
286
+ deriv(distfn.pdf, x, *arg) / distfn.pdf(x, *arg),
287
+ rtol=1e-5)
288
+
289
+
290
+ def check_pickling(distfn, args):
291
+ # check that a distribution instance pickles and unpickles
292
+ # pay special attention to the random_state property
293
+
294
+ # save the random_state (restore later)
295
+ rndm = distfn.random_state
296
+
297
+ # check unfrozen
298
+ distfn.random_state = 1234
299
+ distfn.rvs(*args, size=8)
300
+ s = pickle.dumps(distfn)
301
+ r0 = distfn.rvs(*args, size=8)
302
+
303
+ unpickled = pickle.loads(s)
304
+ r1 = unpickled.rvs(*args, size=8)
305
+ npt.assert_equal(r0, r1)
306
+
307
+ # also smoke test some methods
308
+ medians = [distfn.ppf(0.5, *args), unpickled.ppf(0.5, *args)]
309
+ npt.assert_equal(medians[0], medians[1])
310
+ npt.assert_equal(distfn.cdf(medians[0], *args),
311
+ unpickled.cdf(medians[1], *args))
312
+
313
+ # check frozen pickling/unpickling with rvs
314
+ frozen_dist = distfn(*args)
315
+ pkl = pickle.dumps(frozen_dist)
316
+ unpickled = pickle.loads(pkl)
317
+
318
+ r0 = frozen_dist.rvs(size=8)
319
+ r1 = unpickled.rvs(size=8)
320
+ npt.assert_equal(r0, r1)
321
+
322
+ # check pickling/unpickling of .fit method
323
+ if hasattr(distfn, "fit"):
324
+ fit_function = distfn.fit
325
+ pickled_fit_function = pickle.dumps(fit_function)
326
+ unpickled_fit_function = pickle.loads(pickled_fit_function)
327
+ assert fit_function.__name__ == unpickled_fit_function.__name__ == "fit"
328
+
329
+ # restore the random_state
330
+ distfn.random_state = rndm
331
+
332
+
333
+ def check_freezing(distfn, args):
334
+ # regression test for gh-11089: freezing a distribution fails
335
+ # if loc and/or scale are specified
336
+ if isinstance(distfn, stats.rv_continuous):
337
+ locscale = {'loc': 1, 'scale': 2}
338
+ else:
339
+ locscale = {'loc': 1}
340
+
341
+ rv = distfn(*args, **locscale)
342
+ assert rv.a == distfn(*args).a
343
+ assert rv.b == distfn(*args).b
344
+
345
+
346
+ def check_rvs_broadcast(distfunc, distname, allargs, shape, shape_only, otype):
347
+ np.random.seed(123)
348
+ sample = distfunc.rvs(*allargs)
349
+ assert_equal(sample.shape, shape, "%s: rvs failed to broadcast" % distname)
350
+ if not shape_only:
351
+ rvs = np.vectorize(lambda *allargs: distfunc.rvs(*allargs), otypes=otype)
352
+ np.random.seed(123)
353
+ expected = rvs(*allargs)
354
+ assert_allclose(sample, expected, rtol=1e-13)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_axis_nan_policy.py ADDED
@@ -0,0 +1,1290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Many scipy.stats functions support `axis` and `nan_policy` parameters.
2
+ # When the two are combined, it can be tricky to get all the behavior just
3
+ # right. This file contains a suite of common tests for scipy.stats functions
4
+ # that support `axis` and `nan_policy` and additional tests for some associated
5
+ # functions in stats._util.
6
+
7
+ from itertools import product, combinations_with_replacement, permutations
8
+ import os
9
+ import re
10
+ import pickle
11
+ import pytest
12
+ import warnings
13
+
14
+ import numpy as np
15
+ from numpy.testing import assert_allclose, assert_equal
16
+ from scipy import stats
17
+ from scipy.stats import norm # type: ignore[attr-defined]
18
+ from scipy.stats._axis_nan_policy import (_masked_arrays_2_sentinel_arrays,
19
+ SmallSampleWarning,
20
+ too_small_nd_omit, too_small_nd_not_omit,
21
+ too_small_1d_omit, too_small_1d_not_omit)
22
+ from scipy._lib._util import AxisError
23
+ from scipy.conftest import skip_xp_invalid_arg
24
+
25
+
26
+ SCIPY_XSLOW = int(os.environ.get('SCIPY_XSLOW', '0'))
27
+
28
+
29
+ def unpack_ttest_result(res):
30
+ low, high = res.confidence_interval()
31
+ return (res.statistic, res.pvalue, res.df, res._standard_error,
32
+ res._estimate, low, high)
33
+
34
+
35
+ def _get_ttest_ci(ttest):
36
+ # get a function that returns the CI bounds of provided `ttest`
37
+ def ttest_ci(*args, **kwargs):
38
+ res = ttest(*args, **kwargs)
39
+ return res.confidence_interval()
40
+ return ttest_ci
41
+
42
+
43
+ axis_nan_policy_cases = [
44
+ # function, args, kwds, number of samples, number of outputs,
45
+ # ... paired, unpacker function
46
+ # args, kwds typically aren't needed; just showing that they work
47
+ (stats.kruskal, tuple(), dict(), 3, 2, False, None), # 4 samples is slow
48
+ (stats.ranksums, ('less',), dict(), 2, 2, False, None),
49
+ (stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, 2, False, None),
50
+ (stats.wilcoxon, ('pratt',), {'mode': 'auto'}, 2, 2, True,
51
+ lambda res: (res.statistic, res.pvalue)),
52
+ (stats.wilcoxon, tuple(), dict(), 1, 2, True,
53
+ lambda res: (res.statistic, res.pvalue)),
54
+ (stats.wilcoxon, tuple(), {'mode': 'approx'}, 1, 3, True,
55
+ lambda res: (res.statistic, res.pvalue, res.zstatistic)),
56
+ (stats.gmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
57
+ (stats.hmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
58
+ (stats.pmean, (1.42,), dict(), 1, 1, False, lambda x: (x,)),
59
+ (stats.sem, tuple(), dict(), 1, 1, False, lambda x: (x,)),
60
+ (stats.iqr, tuple(), dict(), 1, 1, False, lambda x: (x,)),
61
+ (stats.kurtosis, tuple(), dict(), 1, 1, False, lambda x: (x,)),
62
+ (stats.skew, tuple(), dict(), 1, 1, False, lambda x: (x,)),
63
+ (stats.kstat, tuple(), dict(), 1, 1, False, lambda x: (x,)),
64
+ (stats.kstatvar, tuple(), dict(), 1, 1, False, lambda x: (x,)),
65
+ (stats.moment, tuple(), dict(), 1, 1, False, lambda x: (x,)),
66
+ (stats.moment, tuple(), dict(order=[1, 2]), 1, 2, False, None),
67
+ (stats.jarque_bera, tuple(), dict(), 1, 2, False, None),
68
+ (stats.ttest_1samp, (np.array([0]),), dict(), 1, 7, False,
69
+ unpack_ttest_result),
70
+ (stats.ttest_rel, tuple(), dict(), 2, 7, True, unpack_ttest_result),
71
+ (stats.ttest_ind, tuple(), dict(), 2, 7, False, unpack_ttest_result),
72
+ (_get_ttest_ci(stats.ttest_1samp), (0,), dict(), 1, 2, False, None),
73
+ (_get_ttest_ci(stats.ttest_rel), tuple(), dict(), 2, 2, True, None),
74
+ (_get_ttest_ci(stats.ttest_ind), tuple(), dict(), 2, 2, False, None),
75
+ (stats.mode, tuple(), dict(), 1, 2, True, lambda x: (x.mode, x.count)),
76
+ (stats.differential_entropy, tuple(), dict(), 1, 1, False, lambda x: (x,)),
77
+ (stats.variation, tuple(), dict(), 1, 1, False, lambda x: (x,)),
78
+ (stats.friedmanchisquare, tuple(), dict(), 3, 2, True, None),
79
+ (stats.brunnermunzel, tuple(), dict(distribution='normal'), 2, 2, False, None),
80
+ (stats.mood, tuple(), {}, 2, 2, False, None),
81
+ (stats.shapiro, tuple(), {}, 1, 2, False, None),
82
+ (stats.ks_1samp, (norm().cdf,), dict(), 1, 4, False,
83
+ lambda res: (*res, res.statistic_location, res.statistic_sign)),
84
+ (stats.ks_2samp, tuple(), dict(), 2, 4, False,
85
+ lambda res: (*res, res.statistic_location, res.statistic_sign)),
86
+ (stats.kstest, (norm().cdf,), dict(), 1, 4, False,
87
+ lambda res: (*res, res.statistic_location, res.statistic_sign)),
88
+ (stats.kstest, tuple(), dict(), 2, 4, False,
89
+ lambda res: (*res, res.statistic_location, res.statistic_sign)),
90
+ (stats.levene, tuple(), {}, 2, 2, False, None),
91
+ (stats.fligner, tuple(), {'center': 'trimmed', 'proportiontocut': 0.01},
92
+ 2, 2, False, None),
93
+ (stats.ansari, tuple(), {}, 2, 2, False, None),
94
+ (stats.entropy, tuple(), dict(), 1, 1, False, lambda x: (x,)),
95
+ (stats.entropy, tuple(), dict(), 2, 1, True, lambda x: (x,)),
96
+ (stats.skewtest, tuple(), dict(), 1, 2, False, None),
97
+ (stats.kurtosistest, tuple(), dict(), 1, 2, False, None),
98
+ (stats.normaltest, tuple(), dict(), 1, 2, False, None),
99
+ (stats.cramervonmises, ("norm",), dict(), 1, 2, False,
100
+ lambda res: (res.statistic, res.pvalue)),
101
+ (stats.cramervonmises_2samp, tuple(), dict(), 2, 2, False,
102
+ lambda res: (res.statistic, res.pvalue)),
103
+ (stats.epps_singleton_2samp, tuple(), dict(), 2, 2, False, None),
104
+ (stats.bartlett, tuple(), {}, 2, 2, False, None),
105
+ (stats.tmean, tuple(), {}, 1, 1, False, lambda x: (x,)),
106
+ (stats.tvar, tuple(), {}, 1, 1, False, lambda x: (x,)),
107
+ (stats.tmin, tuple(), {}, 1, 1, False, lambda x: (x,)),
108
+ (stats.tmax, tuple(), {}, 1, 1, False, lambda x: (x,)),
109
+ (stats.tstd, tuple(), {}, 1, 1, False, lambda x: (x,)),
110
+ (stats.tsem, tuple(), {}, 1, 1, False, lambda x: (x,)),
111
+ (stats.circmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
112
+ (stats.circvar, tuple(), dict(), 1, 1, False, lambda x: (x,)),
113
+ (stats.circstd, tuple(), dict(), 1, 1, False, lambda x: (x,)),
114
+ (stats.f_oneway, tuple(), {}, 2, 2, False, None),
115
+ (stats.alexandergovern, tuple(), {}, 2, 2, False,
116
+ lambda res: (res.statistic, res.pvalue)),
117
+ (stats.combine_pvalues, tuple(), {}, 1, 2, False, None),
118
+ ]
119
+
120
+ # If the message is one of those expected, put nans in
121
+ # appropriate places of `statistics` and `pvalues`
122
+ too_small_messages = {"Degrees of freedom <= 0 for slice",
123
+ "x and y should have at least 5 elements",
124
+ "Data must be at least length 3",
125
+ "The sample must contain at least two",
126
+ "x and y must contain at least two",
127
+ "division by zero",
128
+ "Mean of empty slice",
129
+ "Data passed to ks_2samp must not be empty",
130
+ "Not enough test observations",
131
+ "Not enough other observations",
132
+ "Not enough observations.",
133
+ "At least one observation is required",
134
+ "zero-size array to reduction operation maximum",
135
+ "`x` and `y` must be of nonzero size.",
136
+ "The exact distribution of the Wilcoxon test",
137
+ "Data input must not be empty",
138
+ "Window length (0) must be positive and less",
139
+ "Window length (1) must be positive and less",
140
+ "Window length (2) must be positive and less",
141
+ "`skewtest` requires at least",
142
+ "`kurtosistest` requires at least",
143
+ "attempt to get argmax of an empty sequence",
144
+ "No array values within given limits",
145
+ "Input sample size must be greater than one.",
146
+ "invalid value encountered",
147
+ "divide by zero encountered",}
148
+
149
+ # If the message is one of these, results of the function may be inaccurate,
150
+ # but NaNs are not to be placed
151
+ inaccuracy_messages = {"Precision loss occurred in moment calculation",
152
+ "Sample size too small for normal approximation."}
153
+
154
+ # For some functions, nan_policy='propagate' should not just return NaNs
155
+ override_propagate_funcs = {stats.mode}
156
+
157
+ # For some functions, empty arrays produce non-NaN results
158
+ empty_special_case_funcs = {stats.entropy}
159
+
160
+ # Some functions don't follow the usual "too small" warning rules
161
+ too_small_special_case_funcs = {stats.entropy}
162
+
163
+ def _mixed_data_generator(n_samples, n_repetitions, axis, rng,
164
+ paired=False):
165
+ # generate random samples to check the response of hypothesis tests to
166
+ # samples with different (but broadcastable) shapes and various
167
+ # nan patterns (e.g. all nans, some nans, no nans) along axis-slices
168
+
169
+ data = []
170
+ for i in range(n_samples):
171
+ n_patterns = 6 # number of distinct nan patterns
172
+ n_obs = 20 if paired else 20 + i # observations per axis-slice
173
+ x = np.ones((n_repetitions, n_patterns, n_obs)) * np.nan
174
+
175
+ for j in range(n_repetitions):
176
+ samples = x[j, :, :]
177
+
178
+ # case 0: axis-slice with all nans (0 reals)
179
+ # cases 1-3: axis-slice with 1-3 reals (the rest nans)
180
+ # case 4: axis-slice with mostly (all but two) reals
181
+ # case 5: axis slice with all reals
182
+ for k, n_reals in enumerate([0, 1, 2, 3, n_obs-2, n_obs]):
183
+ # for cases 1-3, need paired nansw to be in the same place
184
+ indices = rng.permutation(n_obs)[:n_reals]
185
+ samples[k, indices] = rng.random(size=n_reals)
186
+
187
+ # permute the axis-slices just to show that order doesn't matter
188
+ samples[:] = rng.permutation(samples, axis=0)
189
+
190
+ # For multi-sample tests, we want to test broadcasting and check
191
+ # that nan policy works correctly for each nan pattern for each input.
192
+ # This takes care of both simultaneously.
193
+ new_shape = [n_repetitions] + [1]*n_samples + [n_obs]
194
+ new_shape[1 + i] = 6
195
+ x = x.reshape(new_shape)
196
+
197
+ x = np.moveaxis(x, -1, axis)
198
+ data.append(x)
199
+ return data
200
+
201
+
202
+ def _homogeneous_data_generator(n_samples, n_repetitions, axis, rng,
203
+ paired=False, all_nans=True):
204
+ # generate random samples to check the response of hypothesis tests to
205
+ # samples with different (but broadcastable) shapes and homogeneous
206
+ # data (all nans or all finite)
207
+ data = []
208
+ for i in range(n_samples):
209
+ n_obs = 20 if paired else 20 + i # observations per axis-slice
210
+ shape = [n_repetitions] + [1]*n_samples + [n_obs]
211
+ shape[1 + i] = 2
212
+ x = np.ones(shape) * np.nan if all_nans else rng.random(shape)
213
+ x = np.moveaxis(x, -1, axis)
214
+ data.append(x)
215
+ return data
216
+
217
+
218
+ def nan_policy_1d(hypotest, data1d, unpacker, *args, n_outputs=2,
219
+ nan_policy='raise', paired=False, _no_deco=True, **kwds):
220
+ # Reference implementation for how `nan_policy` should work for 1d samples
221
+
222
+ if nan_policy == 'raise':
223
+ for sample in data1d:
224
+ if np.any(np.isnan(sample)):
225
+ raise ValueError("The input contains nan values")
226
+
227
+ elif (nan_policy == 'propagate'
228
+ and hypotest not in override_propagate_funcs):
229
+ # For all hypothesis tests tested, returning nans is the right thing.
230
+ # But many hypothesis tests don't propagate correctly (e.g. they treat
231
+ # np.nan the same as np.inf, which doesn't make sense when ranks are
232
+ # involved) so override that behavior here.
233
+ for sample in data1d:
234
+ if np.any(np.isnan(sample)):
235
+ return np.full(n_outputs, np.nan)
236
+
237
+ elif nan_policy == 'omit':
238
+ # manually omit nans (or pairs in which at least one element is nan)
239
+ if not paired:
240
+ data1d = [sample[~np.isnan(sample)] for sample in data1d]
241
+ else:
242
+ nan_mask = np.isnan(data1d[0])
243
+ for sample in data1d[1:]:
244
+ nan_mask = np.logical_or(nan_mask, np.isnan(sample))
245
+ data1d = [sample[~nan_mask] for sample in data1d]
246
+
247
+ return unpacker(hypotest(*data1d, *args, _no_deco=_no_deco, **kwds))
248
+
249
+
250
+ # These three warnings are intentional
251
+ # For `wilcoxon` when the sample size < 50
252
+ @pytest.mark.filterwarnings('ignore:Sample size too small for normal:UserWarning')
253
+ # `kurtosistest` and `normaltest` when sample size < 20
254
+ @pytest.mark.filterwarnings('ignore:`kurtosistest` p-value may be:UserWarning')
255
+ # `foneway`
256
+ @pytest.mark.filterwarnings('ignore:all input arrays have length 1.:RuntimeWarning')
257
+
258
+ # The rest of these may or may not be desirable. They need further investigation
259
+ # to determine whether the function's decorator should define `too_small.
260
+ # `bartlett`, `tvar`, `tstd`, `tsem`
261
+ @pytest.mark.filterwarnings('ignore:Degrees of freedom <= 0 for slice:RuntimeWarning')
262
+ # kstat, kstatvar, ttest_1samp, ttest_rel, ttest_ind, ttest_ci, brunnermunzel
263
+ # mood, levene, fligner, bartlett
264
+ @pytest.mark.filterwarnings('ignore:Invalid value encountered in:RuntimeWarning')
265
+ # kstatvar, ttest_1samp, ttest_rel, ttest_ci, brunnermunzel, levene, bartlett
266
+ @pytest.mark.filterwarnings('ignore:divide by zero encountered:RuntimeWarning')
267
+
268
+ @pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
269
+ "paired", "unpacker"), axis_nan_policy_cases)
270
+ @pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
271
+ @pytest.mark.parametrize(("axis"), (1,))
272
+ @pytest.mark.parametrize(("data_generator"), ("mixed",))
273
+ def test_axis_nan_policy_fast(hypotest, args, kwds, n_samples, n_outputs,
274
+ paired, unpacker, nan_policy, axis,
275
+ data_generator):
276
+ if hypotest in {stats.cramervonmises_2samp, stats.kruskal} and not SCIPY_XSLOW:
277
+ pytest.skip("Too slow.")
278
+ _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
279
+ unpacker, nan_policy, axis, data_generator)
280
+
281
+
282
+ if SCIPY_XSLOW:
283
+ # Takes O(1 min) to run, and even skipping with the `xslow` decorator takes
284
+ # about 3 sec because this is >3,000 tests. So ensure pytest doesn't see
285
+ # them at all unless `SCIPY_XSLOW` is defined.
286
+
287
+ # These three warnings are intentional
288
+ # For `wilcoxon` when the sample size < 50
289
+ @pytest.mark.filterwarnings('ignore:Sample size too small for normal:UserWarning')
290
+ # `kurtosistest` and `normaltest` when sample size < 20
291
+ @pytest.mark.filterwarnings('ignore:`kurtosistest` p-value may be:UserWarning')
292
+ # `foneway`
293
+ @pytest.mark.filterwarnings('ignore:all input arrays have length 1.:RuntimeWarning')
294
+
295
+ # The rest of these may or may not be desirable. They need further investigation
296
+ # to determine whether the function's decorator should define `too_small.
297
+ # `bartlett`, `tvar`, `tstd`, `tsem`
298
+ @pytest.mark.filterwarnings('ignore:Degrees of freedom <= 0 for:RuntimeWarning')
299
+ # kstat, kstatvar, ttest_1samp, ttest_rel, ttest_ind, ttest_ci, brunnermunzel
300
+ # mood, levene, fligner, bartlett
301
+ @pytest.mark.filterwarnings('ignore:Invalid value encountered in:RuntimeWarning')
302
+ # kstatvar, ttest_1samp, ttest_rel, ttest_ci, brunnermunzel, levene, bartlett
303
+ @pytest.mark.filterwarnings('ignore:divide by zero encountered:RuntimeWarning')
304
+
305
+ @pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
306
+ "paired", "unpacker"), axis_nan_policy_cases)
307
+ @pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
308
+ @pytest.mark.parametrize(("axis"), range(-3, 3))
309
+ @pytest.mark.parametrize(("data_generator"),
310
+ ("all_nans", "all_finite", "mixed"))
311
+ def test_axis_nan_policy_full(hypotest, args, kwds, n_samples, n_outputs,
312
+ paired, unpacker, nan_policy, axis,
313
+ data_generator):
314
+ _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
315
+ unpacker, nan_policy, axis, data_generator)
316
+
317
+
318
+ def _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
319
+ unpacker, nan_policy, axis, data_generator):
320
+ # Tests the 1D and vectorized behavior of hypothesis tests against a
321
+ # reference implementation (nan_policy_1d with np.ndenumerate)
322
+
323
+ # Some hypothesis tests return a non-iterable that needs an `unpacker` to
324
+ # extract the statistic and p-value. For those that don't:
325
+ if not unpacker:
326
+ def unpacker(res):
327
+ return res
328
+
329
+ rng = np.random.default_rng(0)
330
+
331
+ # Generate multi-dimensional test data with all important combinations
332
+ # of patterns of nans along `axis`
333
+ n_repetitions = 3 # number of repetitions of each pattern
334
+ data_gen_kwds = {'n_samples': n_samples, 'n_repetitions': n_repetitions,
335
+ 'axis': axis, 'rng': rng, 'paired': paired}
336
+ if data_generator == 'mixed':
337
+ inherent_size = 6 # number of distinct types of patterns
338
+ data = _mixed_data_generator(**data_gen_kwds)
339
+ elif data_generator == 'all_nans':
340
+ inherent_size = 2 # hard-coded in _homogeneous_data_generator
341
+ data_gen_kwds['all_nans'] = True
342
+ data = _homogeneous_data_generator(**data_gen_kwds)
343
+ elif data_generator == 'all_finite':
344
+ inherent_size = 2 # hard-coded in _homogeneous_data_generator
345
+ data_gen_kwds['all_nans'] = False
346
+ data = _homogeneous_data_generator(**data_gen_kwds)
347
+
348
+ output_shape = [n_repetitions] + [inherent_size]*n_samples
349
+
350
+ # To generate reference behavior to compare against, loop over the axis-
351
+ # slices in data. Make indexing easier by moving `axis` to the end and
352
+ # broadcasting all samples to the same shape.
353
+ data_b = [np.moveaxis(sample, axis, -1) for sample in data]
354
+ data_b = [np.broadcast_to(sample, output_shape + [sample.shape[-1]])
355
+ for sample in data_b]
356
+ res_1d = np.zeros(output_shape + [n_outputs])
357
+
358
+ for i, _ in np.ndenumerate(np.zeros(output_shape)):
359
+ data1d = [sample[i] for sample in data_b]
360
+ contains_nan = any([np.isnan(sample).any() for sample in data1d])
361
+
362
+ # Take care of `nan_policy='raise'`.
363
+ # Afterward, the 1D part of the test is over
364
+ message = "The input contains nan values"
365
+ if nan_policy == 'raise' and contains_nan:
366
+ with pytest.raises(ValueError, match=message):
367
+ nan_policy_1d(hypotest, data1d, unpacker, *args,
368
+ n_outputs=n_outputs,
369
+ nan_policy=nan_policy,
370
+ paired=paired, _no_deco=True, **kwds)
371
+
372
+ with pytest.raises(ValueError, match=message):
373
+ hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)
374
+
375
+ continue
376
+
377
+ # Take care of `nan_policy='propagate'` and `nan_policy='omit'`
378
+
379
+ # Get results of simple reference implementation
380
+ try:
381
+ res_1da = nan_policy_1d(hypotest, data1d, unpacker, *args,
382
+ n_outputs=n_outputs,
383
+ nan_policy=nan_policy,
384
+ paired=paired, _no_deco=True, **kwds)
385
+ except (ValueError, RuntimeWarning, ZeroDivisionError) as ea:
386
+ ea_str = str(ea)
387
+ if any([str(ea_str).startswith(msg) for msg in too_small_messages]):
388
+ res_1da = np.full(n_outputs, np.nan)
389
+ else:
390
+ raise
391
+
392
+ # Get results of public function with 1D slices
393
+ # Should warn for all slices
394
+ if (nan_policy == 'omit' and data_generator == "all_nans"
395
+ and hypotest not in too_small_special_case_funcs):
396
+ with pytest.warns(SmallSampleWarning, match=too_small_1d_omit):
397
+ res = hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)
398
+ # warning depends on slice
399
+ elif (nan_policy == 'omit' and data_generator == "mixed"
400
+ and hypotest not in too_small_special_case_funcs):
401
+ with np.testing.suppress_warnings() as sup:
402
+ sup.filter(SmallSampleWarning, too_small_1d_omit)
403
+ res = hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)
404
+ # shouldn't complain if there are no NaNs
405
+ else:
406
+ res = hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)
407
+ res_1db = unpacker(res)
408
+
409
+ assert_equal(res_1db, res_1da)
410
+ res_1d[i] = res_1db
411
+
412
+ res_1d = np.moveaxis(res_1d, -1, 0)
413
+
414
+ # Perform a vectorized call to the hypothesis test.
415
+
416
+ # If `nan_policy == 'raise'`, check that it raises the appropriate error.
417
+ # Test is done, so return
418
+ if nan_policy == 'raise' and not data_generator == "all_finite":
419
+ message = 'The input contains nan values'
420
+ with pytest.raises(ValueError, match=message):
421
+ hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds)
422
+ return
423
+
424
+ # If `nan_policy == 'omit', we might be left with a small sample.
425
+ # Check for the appropriate warning.
426
+ if (nan_policy == 'omit' and data_generator in {"all_nans", "mixed"}
427
+ and hypotest not in too_small_special_case_funcs):
428
+ with pytest.warns(SmallSampleWarning, match=too_small_nd_omit):
429
+ res = hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds)
430
+ else: # otherwise, there should be no warning
431
+ res = hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds)
432
+
433
+ # Compare against the output against looping over 1D slices
434
+ res_nd = unpacker(res)
435
+
436
+ assert_allclose(res_nd, res_1d, rtol=1e-14)
437
+
438
+
439
+ @pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
440
+ "paired", "unpacker"), axis_nan_policy_cases)
441
+ @pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
442
+ @pytest.mark.parametrize(("data_generator"),
443
+ ("all_nans", "all_finite", "mixed", "empty"))
444
+ def test_axis_nan_policy_axis_is_None(hypotest, args, kwds, n_samples,
445
+ n_outputs, paired, unpacker, nan_policy,
446
+ data_generator):
447
+ # check for correct behavior when `axis=None`
448
+ if not unpacker:
449
+ def unpacker(res):
450
+ return res
451
+
452
+ rng = np.random.default_rng(0)
453
+
454
+ if data_generator == "empty":
455
+ data = [rng.random((2, 0)) for i in range(n_samples)]
456
+ else:
457
+ data = [rng.random((2, 20)) for i in range(n_samples)]
458
+
459
+ if data_generator == "mixed":
460
+ masks = [rng.random((2, 20)) > 0.9 for i in range(n_samples)]
461
+ for sample, mask in zip(data, masks):
462
+ sample[mask] = np.nan
463
+ elif data_generator == "all_nans":
464
+ data = [sample * np.nan for sample in data]
465
+
466
+ data_raveled = [sample.ravel() for sample in data]
467
+
468
+ if nan_policy == 'raise' and data_generator not in {"all_finite", "empty"}:
469
+ message = 'The input contains nan values'
470
+
471
+ # check for correct behavior whether or not data is 1d to begin with
472
+ with pytest.raises(ValueError, match=message):
473
+ hypotest(*data, axis=None, nan_policy=nan_policy,
474
+ *args, **kwds)
475
+ with pytest.raises(ValueError, match=message):
476
+ hypotest(*data_raveled, axis=None, nan_policy=nan_policy,
477
+ *args, **kwds)
478
+
479
+ return
480
+
481
+ # behavior of reference implementation with 1d input, public function with 1d
482
+ # input, and public function with Nd input and `axis=None` should be consistent.
483
+ # This means:
484
+ # - If the reference version raises an error or emits a warning, it's because
485
+ # the sample is too small, so check that the public function emits an
486
+ # appropriate "too small" warning
487
+ # - Any results returned by the three versions should be the same.
488
+ with warnings.catch_warnings(): # treat warnings as errors
489
+ warnings.simplefilter("error")
490
+
491
+ ea_str, eb_str, ec_str = None, None, None
492
+ try:
493
+ res1da = nan_policy_1d(hypotest, data_raveled, unpacker, *args,
494
+ n_outputs=n_outputs, nan_policy=nan_policy,
495
+ paired=paired, _no_deco=True, **kwds)
496
+ except (RuntimeWarning, ValueError, ZeroDivisionError) as ea:
497
+ res1da = None
498
+ ea_str = str(ea)
499
+
500
+ try:
501
+ res1db = hypotest(*data_raveled, *args, nan_policy=nan_policy, **kwds)
502
+ except SmallSampleWarning as eb:
503
+ eb_str = str(eb)
504
+
505
+ try:
506
+ res1dc = hypotest(*data, *args, axis=None, nan_policy=nan_policy, **kwds)
507
+ except SmallSampleWarning as ec:
508
+ ec_str = str(ec)
509
+
510
+ if ea_str or eb_str or ec_str: # *if* there is some sort of error or warning
511
+ # If the reference implemented generated an error or warning, make sure the
512
+ # message was one of the expected "too small" messages. Note that some
513
+ # functions don't complain at all without the decorator; that's OK, too.
514
+ ok_msg = any([str(ea_str).startswith(msg) for msg in too_small_messages])
515
+ assert (ea_str is None) or ok_msg
516
+
517
+ # make sure the wrapped function emits the *intended* warning
518
+ desired_warnings = {too_small_1d_omit, too_small_1d_not_omit}
519
+ assert str(eb_str) in desired_warnings
520
+ assert str(ec_str) in desired_warnings
521
+
522
+ with warnings.catch_warnings(): # ignore warnings to get return value
523
+ warnings.simplefilter("ignore")
524
+ res1db = hypotest(*data_raveled, *args, nan_policy=nan_policy, **kwds)
525
+ res1dc = hypotest(*data, *args, axis=None, nan_policy=nan_policy, **kwds)
526
+
527
+ # Make sure any results returned by reference/public function are identical
528
+ # and all attributes are *NumPy* scalars
529
+ res1db, res1dc = unpacker(res1db), unpacker(res1dc)
530
+ assert_equal(res1dc, res1db)
531
+ all_results = list(res1db) + list(res1dc)
532
+
533
+ if res1da is not None:
534
+ assert_equal(res1db, res1da)
535
+ all_results += list(res1da)
536
+
537
+ for item in all_results:
538
+ assert np.issubdtype(item.dtype, np.number)
539
+ assert np.isscalar(item)
540
+
541
+
542
+ # Test keepdims for:
543
+ # - single-output and multi-output functions (gmean and mannwhitneyu)
544
+ # - Axis negative, positive, None, and tuple
545
+ # - 1D with no NaNs
546
+ # - 1D with NaN propagation
547
+ # - Zero-sized output
548
+ @pytest.mark.filterwarnings('ignore:All axis-slices of one...')
549
+ @pytest.mark.filterwarnings('ignore:After omitting NaNs...')
550
+ # These were added in gh-20734 for `ttest_1samp`; they should be addressed and removed
551
+ @pytest.mark.filterwarnings('ignore:divide by zero encountered...')
552
+ @pytest.mark.filterwarnings('ignore:invalid value encountered...')
553
+ @pytest.mark.parametrize("nan_policy", ("omit", "propagate"))
554
+ @pytest.mark.parametrize(
555
+ ("hypotest", "args", "kwds", "n_samples", "unpacker"),
556
+ ((stats.gmean, tuple(), dict(), 1, lambda x: (x,)),
557
+ (stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, None),
558
+ (stats.ttest_1samp, (0,), dict(), 1, unpack_ttest_result))
559
+ )
560
+ @pytest.mark.parametrize(
561
+ ("sample_shape", "axis_cases"),
562
+ (((2, 3, 3, 4), (None, 0, -1, (0, 2), (1, -1), (3, 1, 2, 0))),
563
+ ((10, ), (0, -1)),
564
+ ((20, 0), (0, 1)))
565
+ )
566
+ def test_keepdims(hypotest, args, kwds, n_samples, unpacker,
567
+ sample_shape, axis_cases, nan_policy):
568
+ # test if keepdims parameter works correctly
569
+ if not unpacker:
570
+ def unpacker(res):
571
+ return res
572
+ rng = np.random.default_rng(0)
573
+ data = [rng.random(sample_shape) for _ in range(n_samples)]
574
+ nan_data = [sample.copy() for sample in data]
575
+ nan_mask = [rng.random(sample_shape) < 0.2 for _ in range(n_samples)]
576
+ for sample, mask in zip(nan_data, nan_mask):
577
+ sample[mask] = np.nan
578
+ for axis in axis_cases:
579
+ expected_shape = list(sample_shape)
580
+ if axis is None:
581
+ expected_shape = np.ones(len(sample_shape))
582
+ else:
583
+ if isinstance(axis, int):
584
+ expected_shape[axis] = 1
585
+ else:
586
+ for ax in axis:
587
+ expected_shape[ax] = 1
588
+ expected_shape = tuple(expected_shape)
589
+ res = unpacker(hypotest(*data, *args, axis=axis, keepdims=True,
590
+ **kwds))
591
+ res_base = unpacker(hypotest(*data, *args, axis=axis, keepdims=False,
592
+ **kwds))
593
+ nan_res = unpacker(hypotest(*nan_data, *args, axis=axis,
594
+ keepdims=True, nan_policy=nan_policy,
595
+ **kwds))
596
+ nan_res_base = unpacker(hypotest(*nan_data, *args, axis=axis,
597
+ keepdims=False,
598
+ nan_policy=nan_policy, **kwds))
599
+ for r, r_base, rn, rn_base in zip(res, res_base, nan_res,
600
+ nan_res_base):
601
+ assert r.shape == expected_shape
602
+ r = np.squeeze(r, axis=axis)
603
+ assert_equal(r, r_base)
604
+ assert rn.shape == expected_shape
605
+ rn = np.squeeze(rn, axis=axis)
606
+ assert_equal(rn, rn_base)
607
+
608
+
609
+ @pytest.mark.parametrize(("fun", "nsamp"),
610
+ [(stats.kstat, 1),
611
+ (stats.kstatvar, 1)])
612
+ def test_hypotest_back_compat_no_axis(fun, nsamp):
613
+ m, n = 8, 9
614
+
615
+ rng = np.random.default_rng(0)
616
+ x = rng.random((nsamp, m, n))
617
+ res = fun(*x)
618
+ res2 = fun(*x, _no_deco=True)
619
+ res3 = fun([xi.ravel() for xi in x])
620
+ assert_equal(res, res2)
621
+ assert_equal(res, res3)
622
+
623
+
624
+ @pytest.mark.parametrize(("axis"), (0, 1, 2))
625
+ def test_axis_nan_policy_decorated_positional_axis(axis):
626
+ # Test for correct behavior of function decorated with
627
+ # _axis_nan_policy_decorator whether `axis` is provided as positional or
628
+ # keyword argument
629
+
630
+ shape = (8, 9, 10)
631
+ rng = np.random.default_rng(0)
632
+ x = rng.random(shape)
633
+ y = rng.random(shape)
634
+ res1 = stats.mannwhitneyu(x, y, True, 'two-sided', axis)
635
+ res2 = stats.mannwhitneyu(x, y, True, 'two-sided', axis=axis)
636
+ assert_equal(res1, res2)
637
+
638
+ message = "mannwhitneyu() got multiple values for argument 'axis'"
639
+ with pytest.raises(TypeError, match=re.escape(message)):
640
+ stats.mannwhitneyu(x, y, True, 'two-sided', axis, axis=axis)
641
+
642
+
643
+ def test_axis_nan_policy_decorated_positional_args():
644
+ # Test for correct behavior of function decorated with
645
+ # _axis_nan_policy_decorator when function accepts *args
646
+
647
+ shape = (3, 8, 9, 10)
648
+ rng = np.random.default_rng(0)
649
+ x = rng.random(shape)
650
+ x[0, 0, 0, 0] = np.nan
651
+ stats.kruskal(*x)
652
+
653
+ message = "kruskal() got an unexpected keyword argument 'samples'"
654
+ with pytest.raises(TypeError, match=re.escape(message)):
655
+ stats.kruskal(samples=x)
656
+
657
+ with pytest.raises(TypeError, match=re.escape(message)):
658
+ stats.kruskal(*x, samples=x)
659
+
660
+
661
+ def test_axis_nan_policy_decorated_keyword_samples():
662
+ # Test for correct behavior of function decorated with
663
+ # _axis_nan_policy_decorator whether samples are provided as positional or
664
+ # keyword arguments
665
+
666
+ shape = (2, 8, 9, 10)
667
+ rng = np.random.default_rng(0)
668
+ x = rng.random(shape)
669
+ x[0, 0, 0, 0] = np.nan
670
+ res1 = stats.mannwhitneyu(*x)
671
+ res2 = stats.mannwhitneyu(x=x[0], y=x[1])
672
+ assert_equal(res1, res2)
673
+
674
+ message = "mannwhitneyu() got multiple values for argument"
675
+ with pytest.raises(TypeError, match=re.escape(message)):
676
+ stats.mannwhitneyu(*x, x=x[0], y=x[1])
677
+
678
+
679
+ @pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
680
+ "paired", "unpacker"), axis_nan_policy_cases)
681
+ def test_axis_nan_policy_decorated_pickled(hypotest, args, kwds, n_samples,
682
+ n_outputs, paired, unpacker):
683
+ if "ttest_ci" in hypotest.__name__:
684
+ pytest.skip("Can't pickle functions defined within functions.")
685
+
686
+ rng = np.random.default_rng(0)
687
+
688
+ # Some hypothesis tests return a non-iterable that needs an `unpacker` to
689
+ # extract the statistic and p-value. For those that don't:
690
+ if not unpacker:
691
+ def unpacker(res):
692
+ return res
693
+
694
+ data = rng.uniform(size=(n_samples, 2, 30))
695
+ pickled_hypotest = pickle.dumps(hypotest)
696
+ unpickled_hypotest = pickle.loads(pickled_hypotest)
697
+ res1 = unpacker(hypotest(*data, *args, axis=-1, **kwds))
698
+ res2 = unpacker(unpickled_hypotest(*data, *args, axis=-1, **kwds))
699
+ assert_allclose(res1, res2, rtol=1e-12)
700
+
701
+
702
+ def test_check_empty_inputs():
703
+ # Test that _check_empty_inputs is doing its job, at least for single-
704
+ # sample inputs. (Multi-sample functionality is tested below.)
705
+ # If the input sample is not empty, it should return None.
706
+ # If the input sample is empty, it should return an array of NaNs or an
707
+ # empty array of appropriate shape. np.mean is used as a reference for the
708
+ # output because, like the statistics calculated by these functions,
709
+ # it works along and "consumes" `axis` but preserves the other axes.
710
+ for i in range(5):
711
+ for combo in combinations_with_replacement([0, 1, 2], i):
712
+ for axis in range(len(combo)):
713
+ samples = (np.zeros(combo),)
714
+ output = stats._axis_nan_policy._check_empty_inputs(samples,
715
+ axis)
716
+ if output is not None:
717
+ with np.testing.suppress_warnings() as sup:
718
+ sup.filter(RuntimeWarning, "Mean of empty slice.")
719
+ sup.filter(RuntimeWarning, "invalid value encountered")
720
+ reference = samples[0].mean(axis=axis)
721
+ np.testing.assert_equal(output, reference)
722
+
723
+
724
+ def _check_arrays_broadcastable(arrays, axis):
725
+ # https://numpy.org/doc/stable/user/basics.broadcasting.html
726
+ # "When operating on two arrays, NumPy compares their shapes element-wise.
727
+ # It starts with the trailing (i.e. rightmost) dimensions and works its
728
+ # way left.
729
+ # Two dimensions are compatible when
730
+ # 1. they are equal, or
731
+ # 2. one of them is 1
732
+ # ...
733
+ # Arrays do not need to have the same number of dimensions."
734
+ # (Clarification: if the arrays are compatible according to the criteria
735
+ # above and an array runs out of dimensions, it is still compatible.)
736
+ # Below, we follow the rules above except ignoring `axis`
737
+
738
+ n_dims = max([arr.ndim for arr in arrays])
739
+ if axis is not None:
740
+ # convert to negative axis
741
+ axis = (-n_dims + axis) if axis >= 0 else axis
742
+
743
+ for dim in range(1, n_dims+1): # we'll index from -1 to -n_dims, inclusive
744
+ if -dim == axis:
745
+ continue # ignore lengths along `axis`
746
+
747
+ dim_lengths = set()
748
+ for arr in arrays:
749
+ if dim <= arr.ndim and arr.shape[-dim] != 1:
750
+ dim_lengths.add(arr.shape[-dim])
751
+
752
+ if len(dim_lengths) > 1:
753
+ return False
754
+ return True
755
+
756
+
757
+ @pytest.mark.slow
758
+ @pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
759
+ "paired", "unpacker"), axis_nan_policy_cases)
760
+ def test_empty(hypotest, args, kwds, n_samples, n_outputs, paired, unpacker):
761
+ # test for correct output shape when at least one input is empty
762
+ if hypotest in {stats.kruskal, stats.friedmanchisquare} and not SCIPY_XSLOW:
763
+ pytest.skip("Too slow.")
764
+
765
+ if hypotest in override_propagate_funcs:
766
+ reason = "Doesn't follow the usual pattern. Tested separately."
767
+ pytest.skip(reason=reason)
768
+
769
+ if unpacker is None:
770
+ unpacker = lambda res: (res[0], res[1]) # noqa: E731
771
+
772
+ def small_data_generator(n_samples, n_dims):
773
+
774
+ def small_sample_generator(n_dims):
775
+ # return all possible "small" arrays in up to n_dim dimensions
776
+ for i in n_dims:
777
+ # "small" means with size along dimension either 0 or 1
778
+ for combo in combinations_with_replacement([0, 1, 2], i):
779
+ yield np.zeros(combo)
780
+
781
+ # yield all possible combinations of small samples
782
+ gens = [small_sample_generator(n_dims) for i in range(n_samples)]
783
+ yield from product(*gens)
784
+
785
+ n_dims = [1, 2, 3]
786
+ for samples in small_data_generator(n_samples, n_dims):
787
+
788
+ # this test is only for arrays of zero size
789
+ if not any(sample.size == 0 for sample in samples):
790
+ continue
791
+
792
+ max_axis = max(sample.ndim for sample in samples)
793
+
794
+ # need to test for all valid values of `axis` parameter, too
795
+ for axis in range(-max_axis, max_axis):
796
+
797
+ try:
798
+ # After broadcasting, all arrays are the same shape, so
799
+ # the shape of the output should be the same as a single-
800
+ # sample statistic. Use np.mean as a reference.
801
+ concat = stats._stats_py._broadcast_concatenate(samples, axis)
802
+ with np.testing.suppress_warnings() as sup:
803
+ sup.filter(RuntimeWarning, "Mean of empty slice.")
804
+ sup.filter(RuntimeWarning, "invalid value encountered")
805
+ expected = np.mean(concat, axis=axis) * np.nan
806
+
807
+ if hypotest in empty_special_case_funcs:
808
+ empty_val = hypotest(*([[]]*len(samples)), *args, **kwds)
809
+ expected = np.asarray(expected)
810
+ mask = np.isnan(expected)
811
+ expected[mask] = empty_val
812
+ expected = expected[()]
813
+
814
+ if expected.size and hypotest not in too_small_special_case_funcs:
815
+ message = (too_small_1d_not_omit if max_axis == 1
816
+ else too_small_nd_not_omit)
817
+ with pytest.warns(SmallSampleWarning, match=message):
818
+ res = hypotest(*samples, *args, axis=axis, **kwds)
819
+ else:
820
+ with np.testing.suppress_warnings() as sup:
821
+ # f_oneway special case
822
+ sup.filter(SmallSampleWarning, "all input arrays have length 1")
823
+ res = hypotest(*samples, *args, axis=axis, **kwds)
824
+ res = unpacker(res)
825
+
826
+ for i in range(n_outputs):
827
+ assert_equal(res[i], expected)
828
+
829
+ except ValueError:
830
+ # confirm that the arrays truly are not broadcastable
831
+ assert not _check_arrays_broadcastable(samples,
832
+ None if paired else axis)
833
+
834
+ # confirm that _both_ `_broadcast_concatenate` and `hypotest`
835
+ # produce this information.
836
+ message = "Array shapes are incompatible for broadcasting."
837
+ with pytest.raises(ValueError, match=message):
838
+ stats._stats_py._broadcast_concatenate(samples, axis, paired)
839
+ with pytest.raises(ValueError, match=message):
840
+ hypotest(*samples, *args, axis=axis, **kwds)
841
+
842
+
843
+ def test_masked_array_2_sentinel_array():
844
+ # prepare arrays
845
+ np.random.seed(0)
846
+ A = np.random.rand(10, 11, 12)
847
+ B = np.random.rand(12)
848
+ mask = A < 0.5
849
+ A = np.ma.masked_array(A, mask)
850
+
851
+ # set arbitrary elements to special values
852
+ # (these values might have been considered for use as sentinel values)
853
+ max_float = np.finfo(np.float64).max
854
+ max_float2 = np.nextafter(max_float, -np.inf)
855
+ max_float3 = np.nextafter(max_float2, -np.inf)
856
+ A[3, 4, 1] = np.nan
857
+ A[4, 5, 2] = np.inf
858
+ A[5, 6, 3] = max_float
859
+ B[8] = np.nan
860
+ B[7] = np.inf
861
+ B[6] = max_float2
862
+
863
+ # convert masked A to array with sentinel value, don't modify B
864
+ out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([A, B])
865
+ A_out, B_out = out_arrays
866
+
867
+ # check that good sentinel value was chosen (according to intended logic)
868
+ assert (sentinel != max_float) and (sentinel != max_float2)
869
+ assert sentinel == max_float3
870
+
871
+ # check that output arrays are as intended
872
+ A_reference = A.data
873
+ A_reference[A.mask] = sentinel
874
+ np.testing.assert_array_equal(A_out, A_reference)
875
+ assert B_out is B
876
+
877
+
878
+ @skip_xp_invalid_arg
879
+ def test_masked_dtype():
880
+ # When _masked_arrays_2_sentinel_arrays was first added, it always
881
+ # upcast the arrays to np.float64. After gh16662, check expected promotion
882
+ # and that the expected sentinel is found.
883
+
884
+ # these are important because the max of the promoted dtype is the first
885
+ # candidate to be the sentinel value
886
+ max16 = np.iinfo(np.int16).max
887
+ max128c = np.finfo(np.complex128).max
888
+
889
+ # a is a regular array, b has masked elements, and c has no masked elements
890
+ a = np.array([1, 2, max16], dtype=np.int16)
891
+ b = np.ma.array([1, 2, 1], dtype=np.int8, mask=[0, 1, 0])
892
+ c = np.ma.array([1, 2, 1], dtype=np.complex128, mask=[0, 0, 0])
893
+
894
+ # check integer masked -> sentinel conversion
895
+ out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a, b])
896
+ a_out, b_out = out_arrays
897
+ assert sentinel == max16-1 # not max16 because max16 was in the data
898
+ assert b_out.dtype == np.int16 # check expected promotion
899
+ assert_allclose(b_out, [b[0], sentinel, b[-1]]) # check sentinel placement
900
+ assert a_out is a # not a masked array, so left untouched
901
+ assert not isinstance(b_out, np.ma.MaskedArray) # b became regular array
902
+
903
+ # similarly with complex
904
+ out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([b, c])
905
+ b_out, c_out = out_arrays
906
+ assert sentinel == max128c # max128c was not in the data
907
+ assert b_out.dtype == np.complex128 # b got promoted
908
+ assert_allclose(b_out, [b[0], sentinel, b[-1]]) # check sentinel placement
909
+ assert not isinstance(b_out, np.ma.MaskedArray) # b became regular array
910
+ assert not isinstance(c_out, np.ma.MaskedArray) # c became regular array
911
+
912
+ # Also, check edge case when a sentinel value cannot be found in the data
913
+ min8, max8 = np.iinfo(np.int8).min, np.iinfo(np.int8).max
914
+ a = np.arange(min8, max8+1, dtype=np.int8) # use all possible values
915
+ mask1 = np.zeros_like(a, dtype=bool)
916
+ mask0 = np.zeros_like(a, dtype=bool)
917
+
918
+ # a masked value can be used as the sentinel
919
+ mask1[1] = True
920
+ a1 = np.ma.array(a, mask=mask1)
921
+ out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a1])
922
+ assert sentinel == min8+1
923
+
924
+ # unless it's the smallest possible; skipped for simiplicity (see code)
925
+ mask0[0] = True
926
+ a0 = np.ma.array(a, mask=mask0)
927
+ message = "This function replaces masked elements with sentinel..."
928
+ with pytest.raises(ValueError, match=message):
929
+ _masked_arrays_2_sentinel_arrays([a0])
930
+
931
+ # test that dtype is preserved in functions
932
+ a = np.ma.array([1, 2, 3], mask=[0, 1, 0], dtype=np.float32)
933
+ assert stats.gmean(a).dtype == np.float32
934
+
935
+
936
+ def test_masked_stat_1d():
937
+ # basic test of _axis_nan_policy_factory with 1D masked sample
938
+ males = [19, 22, 16, 29, 24]
939
+ females = [20, 11, 17, 12]
940
+ res = stats.mannwhitneyu(males, females)
941
+
942
+ # same result when extra nan is omitted
943
+ females2 = [20, 11, 17, np.nan, 12]
944
+ res2 = stats.mannwhitneyu(males, females2, nan_policy='omit')
945
+ np.testing.assert_array_equal(res2, res)
946
+
947
+ # same result when extra element is masked
948
+ females3 = [20, 11, 17, 1000, 12]
949
+ mask3 = [False, False, False, True, False]
950
+ females3 = np.ma.masked_array(females3, mask=mask3)
951
+ res3 = stats.mannwhitneyu(males, females3)
952
+ np.testing.assert_array_equal(res3, res)
953
+
954
+ # same result when extra nan is omitted and additional element is masked
955
+ females4 = [20, 11, 17, np.nan, 1000, 12]
956
+ mask4 = [False, False, False, False, True, False]
957
+ females4 = np.ma.masked_array(females4, mask=mask4)
958
+ res4 = stats.mannwhitneyu(males, females4, nan_policy='omit')
959
+ np.testing.assert_array_equal(res4, res)
960
+
961
+ # same result when extra elements, including nan, are masked
962
+ females5 = [20, 11, 17, np.nan, 1000, 12]
963
+ mask5 = [False, False, False, True, True, False]
964
+ females5 = np.ma.masked_array(females5, mask=mask5)
965
+ res5 = stats.mannwhitneyu(males, females5, nan_policy='propagate')
966
+ res6 = stats.mannwhitneyu(males, females5, nan_policy='raise')
967
+ np.testing.assert_array_equal(res5, res)
968
+ np.testing.assert_array_equal(res6, res)
969
+
970
+
971
+ @pytest.mark.filterwarnings('ignore:After omitting NaNs...')
972
+ @pytest.mark.filterwarnings('ignore:One or more axis-slices of one...')
973
+ @skip_xp_invalid_arg
974
+ @pytest.mark.parametrize(("axis"), range(-3, 3))
975
+ def test_masked_stat_3d(axis):
976
+ # basic test of _axis_nan_policy_factory with 3D masked sample
977
+ np.random.seed(0)
978
+ a = np.random.rand(3, 4, 5)
979
+ b = np.random.rand(4, 5)
980
+ c = np.random.rand(4, 1)
981
+
982
+ mask_a = a < 0.1
983
+ mask_c = [False, False, False, True]
984
+ a_masked = np.ma.masked_array(a, mask=mask_a)
985
+ c_masked = np.ma.masked_array(c, mask=mask_c)
986
+
987
+ a_nans = a.copy()
988
+ a_nans[mask_a] = np.nan
989
+ c_nans = c.copy()
990
+ c_nans[mask_c] = np.nan
991
+
992
+ res = stats.kruskal(a_nans, b, c_nans, nan_policy='omit', axis=axis)
993
+ res2 = stats.kruskal(a_masked, b, c_masked, axis=axis)
994
+ np.testing.assert_array_equal(res, res2)
995
+
996
+
997
+ @pytest.mark.filterwarnings('ignore:After omitting NaNs...')
998
+ @pytest.mark.filterwarnings('ignore:One or more axis-slices of one...')
999
+ @skip_xp_invalid_arg
1000
+ def test_mixed_mask_nan_1():
1001
+ # targeted test of _axis_nan_policy_factory with 2D masked sample:
1002
+ # omitting samples with masks and nan_policy='omit' are equivalent
1003
+ # also checks paired-sample sentinel value removal
1004
+ m, n = 3, 20
1005
+ axis = -1
1006
+
1007
+ np.random.seed(0)
1008
+ a = np.random.rand(m, n)
1009
+ b = np.random.rand(m, n)
1010
+ mask_a1 = np.random.rand(m, n) < 0.2
1011
+ mask_a2 = np.random.rand(m, n) < 0.1
1012
+ mask_b1 = np.random.rand(m, n) < 0.15
1013
+ mask_b2 = np.random.rand(m, n) < 0.15
1014
+ mask_a1[2, :] = True
1015
+
1016
+ a_nans = a.copy()
1017
+ b_nans = b.copy()
1018
+ a_nans[mask_a1 | mask_a2] = np.nan
1019
+ b_nans[mask_b1 | mask_b2] = np.nan
1020
+
1021
+ a_masked1 = np.ma.masked_array(a, mask=mask_a1)
1022
+ b_masked1 = np.ma.masked_array(b, mask=mask_b1)
1023
+ a_masked1[mask_a2] = np.nan
1024
+ b_masked1[mask_b2] = np.nan
1025
+
1026
+ a_masked2 = np.ma.masked_array(a, mask=mask_a2)
1027
+ b_masked2 = np.ma.masked_array(b, mask=mask_b2)
1028
+ a_masked2[mask_a1] = np.nan
1029
+ b_masked2[mask_b1] = np.nan
1030
+
1031
+ a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2))
1032
+ b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2))
1033
+
1034
+ res = stats.wilcoxon(a_nans, b_nans, nan_policy='omit', axis=axis)
1035
+ res1 = stats.wilcoxon(a_masked1, b_masked1, nan_policy='omit', axis=axis)
1036
+ res2 = stats.wilcoxon(a_masked2, b_masked2, nan_policy='omit', axis=axis)
1037
+ res3 = stats.wilcoxon(a_masked3, b_masked3, nan_policy='raise', axis=axis)
1038
+ res4 = stats.wilcoxon(a_masked3, b_masked3,
1039
+ nan_policy='propagate', axis=axis)
1040
+
1041
+ np.testing.assert_array_equal(res1, res)
1042
+ np.testing.assert_array_equal(res2, res)
1043
+ np.testing.assert_array_equal(res3, res)
1044
+ np.testing.assert_array_equal(res4, res)
1045
+
1046
+
1047
+ @pytest.mark.filterwarnings('ignore:After omitting NaNs...')
1048
+ @pytest.mark.filterwarnings('ignore:One or more axis-slices of one...')
1049
+ @skip_xp_invalid_arg
1050
+ def test_mixed_mask_nan_2():
1051
+ # targeted test of _axis_nan_policy_factory with 2D masked sample:
1052
+ # check for expected interaction between masks and nans
1053
+
1054
+ # Cases here are
1055
+ # [mixed nan/mask, all nans, all masked,
1056
+ # unmasked nan, masked nan, unmasked non-nan]
1057
+ a = [[1, np.nan, 2], [np.nan, np.nan, np.nan], [1, 2, 3],
1058
+ [1, np.nan, 3], [1, np.nan, 3], [1, 2, 3]]
1059
+ mask = [[1, 0, 1], [0, 0, 0], [1, 1, 1],
1060
+ [0, 0, 0], [0, 1, 0], [0, 0, 0]]
1061
+ a_masked = np.ma.masked_array(a, mask=mask)
1062
+ b = [[4, 5, 6]]
1063
+ ref1 = stats.ranksums([1, 3], [4, 5, 6])
1064
+ ref2 = stats.ranksums([1, 2, 3], [4, 5, 6])
1065
+
1066
+ # nan_policy = 'omit'
1067
+ # all elements are removed from first three rows
1068
+ # middle element is removed from fourth and fifth rows
1069
+ # no elements removed from last row
1070
+ res = stats.ranksums(a_masked, b, nan_policy='omit', axis=-1)
1071
+ stat_ref = [np.nan, np.nan, np.nan,
1072
+ ref1.statistic, ref1.statistic, ref2.statistic]
1073
+ p_ref = [np.nan, np.nan, np.nan,
1074
+ ref1.pvalue, ref1.pvalue, ref2.pvalue]
1075
+ np.testing.assert_array_equal(res.statistic, stat_ref)
1076
+ np.testing.assert_array_equal(res.pvalue, p_ref)
1077
+
1078
+ # nan_policy = 'propagate'
1079
+ # nans propagate in first, second, and fourth row
1080
+ # all elements are removed by mask from third row
1081
+ # middle element is removed from fifth row
1082
+ # no elements removed from last row
1083
+ res = stats.ranksums(a_masked, b, nan_policy='propagate', axis=-1)
1084
+ stat_ref = [np.nan, np.nan, np.nan,
1085
+ np.nan, ref1.statistic, ref2.statistic]
1086
+ p_ref = [np.nan, np.nan, np.nan,
1087
+ np.nan, ref1.pvalue, ref2.pvalue]
1088
+ np.testing.assert_array_equal(res.statistic, stat_ref)
1089
+ np.testing.assert_array_equal(res.pvalue, p_ref)
1090
+
1091
+
1092
+ def test_axis_None_vs_tuple():
1093
+ # `axis` `None` should be equivalent to tuple with all axes
1094
+ shape = (3, 8, 9, 10)
1095
+ rng = np.random.default_rng(0)
1096
+ x = rng.random(shape)
1097
+ res = stats.kruskal(*x, axis=None)
1098
+ res2 = stats.kruskal(*x, axis=(0, 1, 2))
1099
+ np.testing.assert_array_equal(res, res2)
1100
+
1101
+
1102
+ def test_axis_None_vs_tuple_with_broadcasting():
1103
+ # `axis` `None` should be equivalent to tuple with all axes,
1104
+ # which should be equivalent to raveling the arrays before passing them
1105
+ rng = np.random.default_rng(0)
1106
+ x = rng.random((5, 1))
1107
+ y = rng.random((1, 5))
1108
+ x2, y2 = np.broadcast_arrays(x, y)
1109
+
1110
+ res0 = stats.mannwhitneyu(x.ravel(), y.ravel())
1111
+ res1 = stats.mannwhitneyu(x, y, axis=None)
1112
+ res2 = stats.mannwhitneyu(x, y, axis=(0, 1))
1113
+ res3 = stats.mannwhitneyu(x2.ravel(), y2.ravel())
1114
+
1115
+ assert res1 == res0
1116
+ assert res2 == res0
1117
+ assert res3 != res0
1118
+
1119
+
1120
+ @pytest.mark.parametrize(("axis"),
1121
+ list(permutations(range(-3, 3), 2)) + [(-4, 1)])
1122
+ def test_other_axis_tuples(axis):
1123
+ # Check that _axis_nan_policy_factory treats all `axis` tuples as expected
1124
+ rng = np.random.default_rng(0)
1125
+ shape_x = (4, 5, 6)
1126
+ shape_y = (1, 6)
1127
+ x = rng.random(shape_x)
1128
+ y = rng.random(shape_y)
1129
+ axis_original = axis
1130
+
1131
+ # convert axis elements to positive
1132
+ axis = tuple([(i if i >= 0 else 3 + i) for i in axis])
1133
+ axis = sorted(axis)
1134
+
1135
+ if len(set(axis)) != len(axis):
1136
+ message = "`axis` must contain only distinct elements"
1137
+ with pytest.raises(AxisError, match=re.escape(message)):
1138
+ stats.mannwhitneyu(x, y, axis=axis_original)
1139
+ return
1140
+
1141
+ if axis[0] < 0 or axis[-1] > 2:
1142
+ message = "`axis` is out of bounds for array of dimension 3"
1143
+ with pytest.raises(AxisError, match=re.escape(message)):
1144
+ stats.mannwhitneyu(x, y, axis=axis_original)
1145
+ return
1146
+
1147
+ res = stats.mannwhitneyu(x, y, axis=axis_original)
1148
+
1149
+ # reference behavior
1150
+ not_axis = {0, 1, 2} - set(axis) # which axis is not part of `axis`
1151
+ not_axis = next(iter(not_axis)) # take it out of the set
1152
+
1153
+ x2 = x
1154
+ shape_y_broadcasted = [1, 1, 6]
1155
+ shape_y_broadcasted[not_axis] = shape_x[not_axis]
1156
+ y2 = np.broadcast_to(y, shape_y_broadcasted)
1157
+
1158
+ m = x2.shape[not_axis]
1159
+ x2 = np.moveaxis(x2, axis, (1, 2))
1160
+ y2 = np.moveaxis(y2, axis, (1, 2))
1161
+ x2 = np.reshape(x2, (m, -1))
1162
+ y2 = np.reshape(y2, (m, -1))
1163
+ res2 = stats.mannwhitneyu(x2, y2, axis=1)
1164
+
1165
+ np.testing.assert_array_equal(res, res2)
1166
+
1167
+
1168
+ @pytest.mark.filterwarnings('ignore:After omitting NaNs...')
1169
+ @pytest.mark.filterwarnings('ignore:One or more axis-slices of one...')
1170
+ @skip_xp_invalid_arg
1171
+ @pytest.mark.parametrize(
1172
+ ("weighted_fun_name, unpacker"),
1173
+ [
1174
+ ("gmean", lambda x: x),
1175
+ ("hmean", lambda x: x),
1176
+ ("pmean", lambda x: x),
1177
+ ("combine_pvalues", lambda x: (x.pvalue, x.statistic)),
1178
+ ],
1179
+ )
1180
+ def test_mean_mixed_mask_nan_weights(weighted_fun_name, unpacker):
1181
+ # targeted test of _axis_nan_policy_factory with 2D masked sample:
1182
+ # omitting samples with masks and nan_policy='omit' are equivalent
1183
+ # also checks paired-sample sentinel value removal
1184
+
1185
+ if weighted_fun_name == 'pmean':
1186
+ def weighted_fun(a, **kwargs):
1187
+ return stats.pmean(a, p=0.42, **kwargs)
1188
+ else:
1189
+ weighted_fun = getattr(stats, weighted_fun_name)
1190
+
1191
+ def func(*args, **kwargs):
1192
+ return unpacker(weighted_fun(*args, **kwargs))
1193
+
1194
+ m, n = 3, 20
1195
+ axis = -1
1196
+
1197
+ rng = np.random.default_rng(6541968121)
1198
+ a = rng.uniform(size=(m, n))
1199
+ b = rng.uniform(size=(m, n))
1200
+ mask_a1 = rng.uniform(size=(m, n)) < 0.2
1201
+ mask_a2 = rng.uniform(size=(m, n)) < 0.1
1202
+ mask_b1 = rng.uniform(size=(m, n)) < 0.15
1203
+ mask_b2 = rng.uniform(size=(m, n)) < 0.15
1204
+ mask_a1[2, :] = True
1205
+
1206
+ a_nans = a.copy()
1207
+ b_nans = b.copy()
1208
+ a_nans[mask_a1 | mask_a2] = np.nan
1209
+ b_nans[mask_b1 | mask_b2] = np.nan
1210
+
1211
+ a_masked1 = np.ma.masked_array(a, mask=mask_a1)
1212
+ b_masked1 = np.ma.masked_array(b, mask=mask_b1)
1213
+ a_masked1[mask_a2] = np.nan
1214
+ b_masked1[mask_b2] = np.nan
1215
+
1216
+ a_masked2 = np.ma.masked_array(a, mask=mask_a2)
1217
+ b_masked2 = np.ma.masked_array(b, mask=mask_b2)
1218
+ a_masked2[mask_a1] = np.nan
1219
+ b_masked2[mask_b1] = np.nan
1220
+
1221
+ a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2))
1222
+ b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2))
1223
+
1224
+ mask_all = (mask_a1 | mask_a2 | mask_b1 | mask_b2)
1225
+ a_masked4 = np.ma.masked_array(a, mask=mask_all)
1226
+ b_masked4 = np.ma.masked_array(b, mask=mask_all)
1227
+
1228
+ with np.testing.suppress_warnings() as sup:
1229
+ message = 'invalid value encountered'
1230
+ sup.filter(RuntimeWarning, message)
1231
+ res = func(a_nans, weights=b_nans, nan_policy="omit", axis=axis)
1232
+ res1 = func(a_masked1, weights=b_masked1, nan_policy="omit", axis=axis)
1233
+ res2 = func(a_masked2, weights=b_masked2, nan_policy="omit", axis=axis)
1234
+ res3 = func(a_masked3, weights=b_masked3, nan_policy="raise", axis=axis)
1235
+ res4 = func(a_masked3, weights=b_masked3, nan_policy="propagate", axis=axis)
1236
+ # Would test with a_masked3/b_masked3, but there is a bug in np.average
1237
+ # that causes a bug in _no_deco mean with masked weights. Would use
1238
+ # np.ma.average, but that causes other problems. See numpy/numpy#7330.
1239
+ if weighted_fun_name in {"hmean"}:
1240
+ weighted_fun_ma = getattr(stats.mstats, weighted_fun_name)
1241
+ res5 = weighted_fun_ma(a_masked4, weights=b_masked4,
1242
+ axis=axis, _no_deco=True)
1243
+
1244
+ np.testing.assert_array_equal(res1, res)
1245
+ np.testing.assert_array_equal(res2, res)
1246
+ np.testing.assert_array_equal(res3, res)
1247
+ np.testing.assert_array_equal(res4, res)
1248
+ if weighted_fun_name in {"hmean"}:
1249
+ # _no_deco mean returns masked array, last element was masked
1250
+ np.testing.assert_allclose(res5.compressed(), res[~np.isnan(res)])
1251
+
1252
+
1253
+ def test_raise_invalid_args_g17713():
1254
+ # other cases are handled in:
1255
+ # test_axis_nan_policy_decorated_positional_axis - multiple values for arg
1256
+ # test_axis_nan_policy_decorated_positional_args - unexpected kwd arg
1257
+ message = "got an unexpected keyword argument"
1258
+ with pytest.raises(TypeError, match=message):
1259
+ stats.gmean([1, 2, 3], invalid_arg=True)
1260
+
1261
+ message = " got multiple values for argument"
1262
+ with pytest.raises(TypeError, match=message):
1263
+ stats.gmean([1, 2, 3], a=True)
1264
+
1265
+ message = "missing 1 required positional argument"
1266
+ with pytest.raises(TypeError, match=message):
1267
+ stats.gmean()
1268
+
1269
+ message = "takes from 1 to 4 positional arguments but 5 were given"
1270
+ with pytest.raises(TypeError, match=message):
1271
+ stats.gmean([1, 2, 3], 0, float, [1, 1, 1], 10)
1272
+
1273
+
1274
+ @pytest.mark.parametrize('dtype', [np.int16, np.float32, np.complex128])
1275
+ def test_array_like_input(dtype):
1276
+ # Check that `_axis_nan_policy`-decorated functions work with custom
1277
+ # containers that are coercible to numeric arrays
1278
+
1279
+ class ArrLike:
1280
+ def __init__(self, x, dtype):
1281
+ self._x = x
1282
+ self._dtype = dtype
1283
+
1284
+ def __array__(self, dtype=None, copy=None):
1285
+ return np.asarray(x, dtype=self._dtype)
1286
+
1287
+ x = [1]*2 + [3, 4, 5]
1288
+ res = stats.mode(ArrLike(x, dtype=dtype))
1289
+ assert res.mode == 1
1290
+ assert res.count == 2
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_binned_statistic.py ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_allclose
3
+ import pytest
4
+ from pytest import raises as assert_raises
5
+ from scipy.stats import (binned_statistic, binned_statistic_2d,
6
+ binned_statistic_dd)
7
+ from scipy._lib._util import check_random_state
8
+
9
+ from .common_tests import check_named_results
10
+
11
+
12
+ class TestBinnedStatistic:
13
+
14
+ @classmethod
15
+ def setup_class(cls):
16
+ rng = check_random_state(9865)
17
+ cls.x = rng.uniform(size=100)
18
+ cls.y = rng.uniform(size=100)
19
+ cls.v = rng.uniform(size=100)
20
+ cls.X = rng.uniform(size=(100, 3))
21
+ cls.w = rng.uniform(size=100)
22
+ cls.u = rng.uniform(size=100) + 1e6
23
+
24
+ def test_1d_count(self):
25
+ x = self.x
26
+ v = self.v
27
+
28
+ count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)
29
+ count2, edges2 = np.histogram(x, bins=10)
30
+
31
+ assert_allclose(count1, count2)
32
+ assert_allclose(edges1, edges2)
33
+
34
+ def test_gh5927(self):
35
+ # smoke test for gh5927 - binned_statistic was using `is` for string
36
+ # comparison
37
+ x = self.x
38
+ v = self.v
39
+ statistics = ['mean', 'median', 'count', 'sum']
40
+ for statistic in statistics:
41
+ binned_statistic(x, v, statistic, bins=10)
42
+
43
+ def test_big_number_std(self):
44
+ # tests for numerical stability of std calculation
45
+ # see issue gh-10126 for more
46
+ x = self.x
47
+ u = self.u
48
+ stat1, edges1, bc = binned_statistic(x, u, 'std', bins=10)
49
+ stat2, edges2, bc = binned_statistic(x, u, np.std, bins=10)
50
+
51
+ assert_allclose(stat1, stat2)
52
+
53
+ def test_empty_bins_std(self):
54
+ # tests that std returns gives nan for empty bins
55
+ x = self.x
56
+ u = self.u
57
+ print(binned_statistic(x, u, 'count', bins=1000))
58
+ stat1, edges1, bc = binned_statistic(x, u, 'std', bins=1000)
59
+ stat2, edges2, bc = binned_statistic(x, u, np.std, bins=1000)
60
+
61
+ assert_allclose(stat1, stat2)
62
+
63
+ def test_non_finite_inputs_and_int_bins(self):
64
+ # if either `values` or `sample` contain np.inf or np.nan throw
65
+ # see issue gh-9010 for more
66
+ x = self.x
67
+ u = self.u
68
+ orig = u[0]
69
+ u[0] = np.inf
70
+ assert_raises(ValueError, binned_statistic, u, x, 'std', bins=10)
71
+ # need to test for non-python specific ints, e.g. np.int8, np.int64
72
+ assert_raises(ValueError, binned_statistic, u, x, 'std',
73
+ bins=np.int64(10))
74
+ u[0] = np.nan
75
+ assert_raises(ValueError, binned_statistic, u, x, 'count', bins=10)
76
+ # replace original value, u belongs the class
77
+ u[0] = orig
78
+
79
+ def test_1d_result_attributes(self):
80
+ x = self.x
81
+ v = self.v
82
+
83
+ res = binned_statistic(x, v, 'count', bins=10)
84
+ attributes = ('statistic', 'bin_edges', 'binnumber')
85
+ check_named_results(res, attributes)
86
+
87
+ def test_1d_sum(self):
88
+ x = self.x
89
+ v = self.v
90
+
91
+ sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)
92
+ sum2, edges2 = np.histogram(x, bins=10, weights=v)
93
+
94
+ assert_allclose(sum1, sum2)
95
+ assert_allclose(edges1, edges2)
96
+
97
+ def test_1d_mean(self):
98
+ x = self.x
99
+ v = self.v
100
+
101
+ stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)
102
+ stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)
103
+
104
+ assert_allclose(stat1, stat2)
105
+ assert_allclose(edges1, edges2)
106
+
107
+ def test_1d_std(self):
108
+ x = self.x
109
+ v = self.v
110
+
111
+ stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)
112
+ stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)
113
+
114
+ assert_allclose(stat1, stat2)
115
+ assert_allclose(edges1, edges2)
116
+
117
+ def test_1d_min(self):
118
+ x = self.x
119
+ v = self.v
120
+
121
+ stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10)
122
+ stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10)
123
+
124
+ assert_allclose(stat1, stat2)
125
+ assert_allclose(edges1, edges2)
126
+
127
+ def test_1d_max(self):
128
+ x = self.x
129
+ v = self.v
130
+
131
+ stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10)
132
+ stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10)
133
+
134
+ assert_allclose(stat1, stat2)
135
+ assert_allclose(edges1, edges2)
136
+
137
+ def test_1d_median(self):
138
+ x = self.x
139
+ v = self.v
140
+
141
+ stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)
142
+ stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)
143
+
144
+ assert_allclose(stat1, stat2)
145
+ assert_allclose(edges1, edges2)
146
+
147
+ def test_1d_bincode(self):
148
+ x = self.x[:20]
149
+ v = self.v[:20]
150
+
151
+ count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)
152
+ bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,
153
+ 1, 2, 1])
154
+
155
+ bcount = [(bc == i).sum() for i in np.unique(bc)]
156
+
157
+ assert_allclose(bc, bc2)
158
+ assert_allclose(bcount, count1)
159
+
160
+ def test_1d_range_keyword(self):
161
+ # Regression test for gh-3063, range can be (min, max) or [(min, max)]
162
+ np.random.seed(9865)
163
+ x = np.arange(30)
164
+ data = np.random.random(30)
165
+
166
+ mean, bins, _ = binned_statistic(x[:15], data[:15])
167
+ mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)])
168
+ mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14))
169
+
170
+ assert_allclose(mean, mean_range)
171
+ assert_allclose(bins, bins_range)
172
+ assert_allclose(mean, mean_range2)
173
+ assert_allclose(bins, bins_range2)
174
+
175
+ def test_1d_multi_values(self):
176
+ x = self.x
177
+ v = self.v
178
+ w = self.w
179
+
180
+ stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10)
181
+ stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10)
182
+ stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10)
183
+
184
+ assert_allclose(stat2[0], stat1v)
185
+ assert_allclose(stat2[1], stat1w)
186
+ assert_allclose(edges1v, edges2)
187
+ assert_allclose(bc1v, bc2)
188
+
189
+ def test_2d_count(self):
190
+ x = self.x
191
+ y = self.y
192
+ v = self.v
193
+
194
+ count1, binx1, biny1, bc = binned_statistic_2d(
195
+ x, y, v, 'count', bins=5)
196
+ count2, binx2, biny2 = np.histogram2d(x, y, bins=5)
197
+
198
+ assert_allclose(count1, count2)
199
+ assert_allclose(binx1, binx2)
200
+ assert_allclose(biny1, biny2)
201
+
202
+ def test_2d_result_attributes(self):
203
+ x = self.x
204
+ y = self.y
205
+ v = self.v
206
+
207
+ res = binned_statistic_2d(x, y, v, 'count', bins=5)
208
+ attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber')
209
+ check_named_results(res, attributes)
210
+
211
+ def test_2d_sum(self):
212
+ x = self.x
213
+ y = self.y
214
+ v = self.v
215
+
216
+ sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)
217
+ sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)
218
+
219
+ assert_allclose(sum1, sum2)
220
+ assert_allclose(binx1, binx2)
221
+ assert_allclose(biny1, biny2)
222
+
223
+ def test_2d_mean(self):
224
+ x = self.x
225
+ y = self.y
226
+ v = self.v
227
+
228
+ stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)
229
+ stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
230
+
231
+ assert_allclose(stat1, stat2)
232
+ assert_allclose(binx1, binx2)
233
+ assert_allclose(biny1, biny2)
234
+
235
+ def test_2d_mean_unicode(self):
236
+ x = self.x
237
+ y = self.y
238
+ v = self.v
239
+ stat1, binx1, biny1, bc = binned_statistic_2d(
240
+ x, y, v, 'mean', bins=5)
241
+ stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
242
+ assert_allclose(stat1, stat2)
243
+ assert_allclose(binx1, binx2)
244
+ assert_allclose(biny1, biny2)
245
+
246
+ def test_2d_std(self):
247
+ x = self.x
248
+ y = self.y
249
+ v = self.v
250
+
251
+ stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)
252
+ stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)
253
+
254
+ assert_allclose(stat1, stat2)
255
+ assert_allclose(binx1, binx2)
256
+ assert_allclose(biny1, biny2)
257
+
258
+ def test_2d_min(self):
259
+ x = self.x
260
+ y = self.y
261
+ v = self.v
262
+
263
+ stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5)
264
+ stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5)
265
+
266
+ assert_allclose(stat1, stat2)
267
+ assert_allclose(binx1, binx2)
268
+ assert_allclose(biny1, biny2)
269
+
270
+ def test_2d_max(self):
271
+ x = self.x
272
+ y = self.y
273
+ v = self.v
274
+
275
+ stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5)
276
+ stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5)
277
+
278
+ assert_allclose(stat1, stat2)
279
+ assert_allclose(binx1, binx2)
280
+ assert_allclose(biny1, biny2)
281
+
282
+ def test_2d_median(self):
283
+ x = self.x
284
+ y = self.y
285
+ v = self.v
286
+
287
+ stat1, binx1, biny1, bc = binned_statistic_2d(
288
+ x, y, v, 'median', bins=5)
289
+ stat2, binx2, biny2, bc = binned_statistic_2d(
290
+ x, y, v, np.median, bins=5)
291
+
292
+ assert_allclose(stat1, stat2)
293
+ assert_allclose(binx1, binx2)
294
+ assert_allclose(biny1, biny2)
295
+
296
+ def test_2d_bincode(self):
297
+ x = self.x[:20]
298
+ y = self.y[:20]
299
+ v = self.v[:20]
300
+
301
+ count1, binx1, biny1, bc = binned_statistic_2d(
302
+ x, y, v, 'count', bins=3)
303
+ bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,
304
+ 6, 11, 16, 6, 6, 11, 8])
305
+
306
+ bcount = [(bc == i).sum() for i in np.unique(bc)]
307
+
308
+ assert_allclose(bc, bc2)
309
+ count1adj = count1[count1.nonzero()]
310
+ assert_allclose(bcount, count1adj)
311
+
312
+ def test_2d_multi_values(self):
313
+ x = self.x
314
+ y = self.y
315
+ v = self.v
316
+ w = self.w
317
+
318
+ stat1v, binx1v, biny1v, bc1v = binned_statistic_2d(
319
+ x, y, v, 'mean', bins=8)
320
+ stat1w, binx1w, biny1w, bc1w = binned_statistic_2d(
321
+ x, y, w, 'mean', bins=8)
322
+ stat2, binx2, biny2, bc2 = binned_statistic_2d(
323
+ x, y, [v, w], 'mean', bins=8)
324
+
325
+ assert_allclose(stat2[0], stat1v)
326
+ assert_allclose(stat2[1], stat1w)
327
+ assert_allclose(binx1v, binx2)
328
+ assert_allclose(biny1w, biny2)
329
+ assert_allclose(bc1v, bc2)
330
+
331
+ def test_2d_binnumbers_unraveled(self):
332
+ x = self.x
333
+ y = self.y
334
+ v = self.v
335
+
336
+ stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20)
337
+ stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10)
338
+
339
+ stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d(
340
+ x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True)
341
+
342
+ bcx3 = np.searchsorted(edgesx, x, side='right')
343
+ bcy3 = np.searchsorted(edgesy, y, side='right')
344
+
345
+ # `numpy.searchsorted` is non-inclusive on right-edge, compensate
346
+ bcx3[x == x.max()] -= 1
347
+ bcy3[y == y.max()] -= 1
348
+
349
+ assert_allclose(bcx, bc2[0])
350
+ assert_allclose(bcy, bc2[1])
351
+ assert_allclose(bcx3, bc2[0])
352
+ assert_allclose(bcy3, bc2[1])
353
+
354
+ def test_dd_count(self):
355
+ X = self.X
356
+ v = self.v
357
+
358
+ count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
359
+ count2, edges2 = np.histogramdd(X, bins=3)
360
+
361
+ assert_allclose(count1, count2)
362
+ assert_allclose(edges1, edges2)
363
+
364
+ def test_dd_result_attributes(self):
365
+ X = self.X
366
+ v = self.v
367
+
368
+ res = binned_statistic_dd(X, v, 'count', bins=3)
369
+ attributes = ('statistic', 'bin_edges', 'binnumber')
370
+ check_named_results(res, attributes)
371
+
372
+ def test_dd_sum(self):
373
+ X = self.X
374
+ v = self.v
375
+
376
+ sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)
377
+ sum2, edges2 = np.histogramdd(X, bins=3, weights=v)
378
+ sum3, edges3, bc = binned_statistic_dd(X, v, np.sum, bins=3)
379
+
380
+ assert_allclose(sum1, sum2)
381
+ assert_allclose(edges1, edges2)
382
+ assert_allclose(sum1, sum3)
383
+ assert_allclose(edges1, edges3)
384
+
385
+ def test_dd_mean(self):
386
+ X = self.X
387
+ v = self.v
388
+
389
+ stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)
390
+ stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)
391
+
392
+ assert_allclose(stat1, stat2)
393
+ assert_allclose(edges1, edges2)
394
+
395
+ def test_dd_std(self):
396
+ X = self.X
397
+ v = self.v
398
+
399
+ stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)
400
+ stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)
401
+
402
+ assert_allclose(stat1, stat2)
403
+ assert_allclose(edges1, edges2)
404
+
405
+ def test_dd_min(self):
406
+ X = self.X
407
+ v = self.v
408
+
409
+ stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3)
410
+ stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3)
411
+
412
+ assert_allclose(stat1, stat2)
413
+ assert_allclose(edges1, edges2)
414
+
415
+ def test_dd_max(self):
416
+ X = self.X
417
+ v = self.v
418
+
419
+ stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3)
420
+ stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3)
421
+
422
+ assert_allclose(stat1, stat2)
423
+ assert_allclose(edges1, edges2)
424
+
425
+ def test_dd_median(self):
426
+ X = self.X
427
+ v = self.v
428
+
429
+ stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)
430
+ stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)
431
+
432
+ assert_allclose(stat1, stat2)
433
+ assert_allclose(edges1, edges2)
434
+
435
+ def test_dd_bincode(self):
436
+ X = self.X[:20]
437
+ v = self.v[:20]
438
+
439
+ count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
440
+ bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,
441
+ 32, 36, 91, 43, 87, 81, 81])
442
+
443
+ bcount = [(bc == i).sum() for i in np.unique(bc)]
444
+
445
+ assert_allclose(bc, bc2)
446
+ count1adj = count1[count1.nonzero()]
447
+ assert_allclose(bcount, count1adj)
448
+
449
+ def test_dd_multi_values(self):
450
+ X = self.X
451
+ v = self.v
452
+ w = self.w
453
+
454
+ for stat in ["count", "sum", "mean", "std", "min", "max", "median",
455
+ np.std]:
456
+ stat1v, edges1v, bc1v = binned_statistic_dd(X, v, stat, bins=8)
457
+ stat1w, edges1w, bc1w = binned_statistic_dd(X, w, stat, bins=8)
458
+ stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], stat, bins=8)
459
+ assert_allclose(stat2[0], stat1v)
460
+ assert_allclose(stat2[1], stat1w)
461
+ assert_allclose(edges1v, edges2)
462
+ assert_allclose(edges1w, edges2)
463
+ assert_allclose(bc1v, bc2)
464
+
465
+ def test_dd_binnumbers_unraveled(self):
466
+ X = self.X
467
+ v = self.v
468
+
469
+ stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15)
470
+ stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20)
471
+ stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10)
472
+
473
+ stat2, edges2, bc2 = binned_statistic_dd(
474
+ X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True)
475
+
476
+ assert_allclose(bcx, bc2[0])
477
+ assert_allclose(bcy, bc2[1])
478
+ assert_allclose(bcz, bc2[2])
479
+
480
+ def test_dd_binned_statistic_result(self):
481
+ # NOTE: tests the reuse of bin_edges from previous call
482
+ x = np.random.random((10000, 3))
483
+ v = np.random.random(10000)
484
+ bins = np.linspace(0, 1, 10)
485
+ bins = (bins, bins, bins)
486
+
487
+ result = binned_statistic_dd(x, v, 'mean', bins=bins)
488
+ stat = result.statistic
489
+
490
+ result = binned_statistic_dd(x, v, 'mean',
491
+ binned_statistic_result=result)
492
+ stat2 = result.statistic
493
+
494
+ assert_allclose(stat, stat2)
495
+
496
+ def test_dd_zero_dedges(self):
497
+ x = np.random.random((10000, 3))
498
+ v = np.random.random(10000)
499
+ bins = np.linspace(0, 1, 10)
500
+ bins = np.append(bins, 1)
501
+ bins = (bins, bins, bins)
502
+ with assert_raises(ValueError, match='difference is numerically 0'):
503
+ binned_statistic_dd(x, v, 'mean', bins=bins)
504
+
505
+ def test_dd_range_errors(self):
506
+ # Test that descriptive exceptions are raised as appropriate for bad
507
+ # values of the `range` argument. (See gh-12996)
508
+ with assert_raises(ValueError,
509
+ match='In range, start must be <= stop'):
510
+ binned_statistic_dd([self.y], self.v,
511
+ range=[[1, 0]])
512
+ with assert_raises(
513
+ ValueError,
514
+ match='In dimension 1 of range, start must be <= stop'):
515
+ binned_statistic_dd([self.x, self.y], self.v,
516
+ range=[[1, 0], [0, 1]])
517
+ with assert_raises(
518
+ ValueError,
519
+ match='In dimension 2 of range, start must be <= stop'):
520
+ binned_statistic_dd([self.x, self.y], self.v,
521
+ range=[[0, 1], [1, 0]])
522
+ with assert_raises(
523
+ ValueError,
524
+ match='range given for 1 dimensions; 2 required'):
525
+ binned_statistic_dd([self.x, self.y], self.v,
526
+ range=[[0, 1]])
527
+
528
+ def test_binned_statistic_float32(self):
529
+ X = np.array([0, 0.42358226], dtype=np.float32)
530
+ stat, _, _ = binned_statistic(X, None, 'count', bins=5)
531
+ assert_allclose(stat, np.array([1, 0, 0, 0, 1], dtype=np.float64))
532
+
533
+ def test_gh14332(self):
534
+ # Test the wrong output when the `sample` is close to bin edge
535
+ x = []
536
+ size = 20
537
+ for i in range(size):
538
+ x += [1-0.1**i]
539
+
540
+ bins = np.linspace(0,1,11)
541
+ sum1, edges1, bc = binned_statistic_dd(x, np.ones(len(x)),
542
+ bins=[bins], statistic='sum')
543
+ sum2, edges2 = np.histogram(x, bins=bins)
544
+
545
+ assert_allclose(sum1, sum2)
546
+ assert_allclose(edges1[0], edges2)
547
+
548
+ @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
549
+ @pytest.mark.parametrize("statistic", [np.mean, np.median, np.sum, np.std,
550
+ np.min, np.max, 'count',
551
+ lambda x: (x**2).sum(),
552
+ lambda x: (x**2).sum() * 1j])
553
+ def test_dd_all(self, dtype, statistic):
554
+ def ref_statistic(x):
555
+ return len(x) if statistic == 'count' else statistic(x)
556
+
557
+ rng = np.random.default_rng(3704743126639371)
558
+ n = 10
559
+ x = rng.random(size=n)
560
+ i = x >= 0.5
561
+ v = rng.random(size=n)
562
+ if dtype is np.complex128:
563
+ v = v + rng.random(size=n)*1j
564
+
565
+ stat, _, _ = binned_statistic_dd(x, v, statistic, bins=2)
566
+ ref = np.array([ref_statistic(v[~i]), ref_statistic(v[i])])
567
+ assert_allclose(stat, ref)
568
+ assert stat.dtype == np.result_type(ref.dtype, np.float64)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_censored_data.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tests for the CensoredData class.
2
+
3
+ import pytest
4
+ import numpy as np
5
+ from numpy.testing import assert_equal, assert_array_equal
6
+ from scipy.stats import CensoredData
7
+
8
+
9
+ class TestCensoredData:
10
+
11
+ def test_basic(self):
12
+ uncensored = [1]
13
+ left = [0]
14
+ right = [2, 5]
15
+ interval = [[2, 3]]
16
+ data = CensoredData(uncensored, left=left, right=right,
17
+ interval=interval)
18
+ assert_equal(data._uncensored, uncensored)
19
+ assert_equal(data._left, left)
20
+ assert_equal(data._right, right)
21
+ assert_equal(data._interval, interval)
22
+
23
+ udata = data._uncensor()
24
+ assert_equal(udata, np.concatenate((uncensored, left, right,
25
+ np.mean(interval, axis=1))))
26
+
27
+ def test_right_censored(self):
28
+ x = np.array([0, 3, 2.5])
29
+ is_censored = np.array([0, 1, 0], dtype=bool)
30
+ data = CensoredData.right_censored(x, is_censored)
31
+ assert_equal(data._uncensored, x[~is_censored])
32
+ assert_equal(data._right, x[is_censored])
33
+ assert_equal(data._left, [])
34
+ assert_equal(data._interval, np.empty((0, 2)))
35
+
36
+ def test_left_censored(self):
37
+ x = np.array([0, 3, 2.5])
38
+ is_censored = np.array([0, 1, 0], dtype=bool)
39
+ data = CensoredData.left_censored(x, is_censored)
40
+ assert_equal(data._uncensored, x[~is_censored])
41
+ assert_equal(data._left, x[is_censored])
42
+ assert_equal(data._right, [])
43
+ assert_equal(data._interval, np.empty((0, 2)))
44
+
45
+ def test_interval_censored_basic(self):
46
+ a = [0.5, 2.0, 3.0, 5.5]
47
+ b = [1.0, 2.5, 3.5, 7.0]
48
+ data = CensoredData.interval_censored(low=a, high=b)
49
+ assert_array_equal(data._interval, np.array(list(zip(a, b))))
50
+ assert data._uncensored.shape == (0,)
51
+ assert data._left.shape == (0,)
52
+ assert data._right.shape == (0,)
53
+
54
+ def test_interval_censored_mixed(self):
55
+ # This is actually a mix of uncensored, left-censored, right-censored
56
+ # and interval-censored data. Check that when the `interval_censored`
57
+ # class method is used, the data is correctly separated into the
58
+ # appropriate arrays.
59
+ a = [0.5, -np.inf, -13.0, 2.0, 1.0, 10.0, -1.0]
60
+ b = [0.5, 2500.0, np.inf, 3.0, 1.0, 11.0, np.inf]
61
+ data = CensoredData.interval_censored(low=a, high=b)
62
+ assert_array_equal(data._interval, [[2.0, 3.0], [10.0, 11.0]])
63
+ assert_array_equal(data._uncensored, [0.5, 1.0])
64
+ assert_array_equal(data._left, [2500.0])
65
+ assert_array_equal(data._right, [-13.0, -1.0])
66
+
67
+ def test_interval_to_other_types(self):
68
+ # The interval parameter can represent uncensored and
69
+ # left- or right-censored data. Test the conversion of such
70
+ # an example to the canonical form in which the different
71
+ # types have been split into the separate arrays.
72
+ interval = np.array([[0, 1], # interval-censored
73
+ [2, 2], # not censored
74
+ [3, 3], # not censored
75
+ [9, np.inf], # right-censored
76
+ [8, np.inf], # right-censored
77
+ [-np.inf, 0], # left-censored
78
+ [1, 2]]) # interval-censored
79
+ data = CensoredData(interval=interval)
80
+ assert_equal(data._uncensored, [2, 3])
81
+ assert_equal(data._left, [0])
82
+ assert_equal(data._right, [9, 8])
83
+ assert_equal(data._interval, [[0, 1], [1, 2]])
84
+
85
+ def test_empty_arrays(self):
86
+ data = CensoredData(uncensored=[], left=[], right=[], interval=[])
87
+ assert data._uncensored.shape == (0,)
88
+ assert data._left.shape == (0,)
89
+ assert data._right.shape == (0,)
90
+ assert data._interval.shape == (0, 2)
91
+ assert len(data) == 0
92
+
93
+ def test_invalid_constructor_args(self):
94
+ with pytest.raises(ValueError, match='must be a one-dimensional'):
95
+ CensoredData(uncensored=[[1, 2, 3]])
96
+ with pytest.raises(ValueError, match='must be a one-dimensional'):
97
+ CensoredData(left=[[1, 2, 3]])
98
+ with pytest.raises(ValueError, match='must be a one-dimensional'):
99
+ CensoredData(right=[[1, 2, 3]])
100
+ with pytest.raises(ValueError, match='must be a two-dimensional'):
101
+ CensoredData(interval=[[1, 2, 3]])
102
+
103
+ with pytest.raises(ValueError, match='must not contain nan'):
104
+ CensoredData(uncensored=[1, np.nan, 2])
105
+ with pytest.raises(ValueError, match='must not contain nan'):
106
+ CensoredData(left=[1, np.nan, 2])
107
+ with pytest.raises(ValueError, match='must not contain nan'):
108
+ CensoredData(right=[1, np.nan, 2])
109
+ with pytest.raises(ValueError, match='must not contain nan'):
110
+ CensoredData(interval=[[1, np.nan], [2, 3]])
111
+
112
+ with pytest.raises(ValueError,
113
+ match='both values must not be infinite'):
114
+ CensoredData(interval=[[1, 3], [2, 9], [np.inf, np.inf]])
115
+
116
+ with pytest.raises(ValueError,
117
+ match='left value must not exceed the right'):
118
+ CensoredData(interval=[[1, 0], [2, 2]])
119
+
120
+ @pytest.mark.parametrize('func', [CensoredData.left_censored,
121
+ CensoredData.right_censored])
122
+ def test_invalid_left_right_censored_args(self, func):
123
+ with pytest.raises(ValueError,
124
+ match='`x` must be one-dimensional'):
125
+ func([[1, 2, 3]], [0, 1, 1])
126
+ with pytest.raises(ValueError,
127
+ match='`censored` must be one-dimensional'):
128
+ func([1, 2, 3], [[0, 1, 1]])
129
+ with pytest.raises(ValueError, match='`x` must not contain'):
130
+ func([1, 2, np.nan], [0, 1, 1])
131
+ with pytest.raises(ValueError, match='must have the same length'):
132
+ func([1, 2, 3], [0, 0, 1, 1])
133
+
134
+ def test_invalid_censored_args(self):
135
+ with pytest.raises(ValueError,
136
+ match='`low` must be a one-dimensional'):
137
+ CensoredData.interval_censored(low=[[3]], high=[4, 5])
138
+ with pytest.raises(ValueError,
139
+ match='`high` must be a one-dimensional'):
140
+ CensoredData.interval_censored(low=[3], high=[[4, 5]])
141
+ with pytest.raises(ValueError, match='`low` must not contain'):
142
+ CensoredData.interval_censored([1, 2, np.nan], [0, 1, 1])
143
+ with pytest.raises(ValueError, match='must have the same length'):
144
+ CensoredData.interval_censored([1, 2, 3], [0, 0, 1, 1])
145
+
146
+ def test_count_censored(self):
147
+ x = [1, 2, 3]
148
+ # data1 has no censored data.
149
+ data1 = CensoredData(x)
150
+ assert data1.num_censored() == 0
151
+ data2 = CensoredData(uncensored=[2.5], left=[10], interval=[[0, 1]])
152
+ assert data2.num_censored() == 2
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_contingency.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import (assert_equal, assert_array_equal,
3
+ assert_array_almost_equal, assert_approx_equal,
4
+ assert_allclose)
5
+ import pytest
6
+ from pytest import raises as assert_raises
7
+ from scipy.special import xlogy
8
+ from scipy.stats.contingency import (margins, expected_freq,
9
+ chi2_contingency, association)
10
+
11
+
12
+ def test_margins():
13
+ a = np.array([1])
14
+ m = margins(a)
15
+ assert_equal(len(m), 1)
16
+ m0 = m[0]
17
+ assert_array_equal(m0, np.array([1]))
18
+
19
+ a = np.array([[1]])
20
+ m0, m1 = margins(a)
21
+ expected0 = np.array([[1]])
22
+ expected1 = np.array([[1]])
23
+ assert_array_equal(m0, expected0)
24
+ assert_array_equal(m1, expected1)
25
+
26
+ a = np.arange(12).reshape(2, 6)
27
+ m0, m1 = margins(a)
28
+ expected0 = np.array([[15], [51]])
29
+ expected1 = np.array([[6, 8, 10, 12, 14, 16]])
30
+ assert_array_equal(m0, expected0)
31
+ assert_array_equal(m1, expected1)
32
+
33
+ a = np.arange(24).reshape(2, 3, 4)
34
+ m0, m1, m2 = margins(a)
35
+ expected0 = np.array([[[66]], [[210]]])
36
+ expected1 = np.array([[[60], [92], [124]]])
37
+ expected2 = np.array([[[60, 66, 72, 78]]])
38
+ assert_array_equal(m0, expected0)
39
+ assert_array_equal(m1, expected1)
40
+ assert_array_equal(m2, expected2)
41
+
42
+
43
+ def test_expected_freq():
44
+ assert_array_equal(expected_freq([1]), np.array([1.0]))
45
+
46
+ observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]])
47
+ e = expected_freq(observed)
48
+ assert_array_equal(e, np.ones_like(observed))
49
+
50
+ observed = np.array([[10, 10, 20], [20, 20, 20]])
51
+ e = expected_freq(observed)
52
+ correct = np.array([[12., 12., 16.], [18., 18., 24.]])
53
+ assert_array_almost_equal(e, correct)
54
+
55
+
56
+ def test_chi2_contingency_trivial():
57
+ # Some very simple tests for chi2_contingency.
58
+
59
+ # A trivial case
60
+ obs = np.array([[1, 2], [1, 2]])
61
+ chi2, p, dof, expected = chi2_contingency(obs, correction=False)
62
+ assert_equal(chi2, 0.0)
63
+ assert_equal(p, 1.0)
64
+ assert_equal(dof, 1)
65
+ assert_array_equal(obs, expected)
66
+
67
+ # A *really* trivial case: 1-D data.
68
+ obs = np.array([1, 2, 3])
69
+ chi2, p, dof, expected = chi2_contingency(obs, correction=False)
70
+ assert_equal(chi2, 0.0)
71
+ assert_equal(p, 1.0)
72
+ assert_equal(dof, 0)
73
+ assert_array_equal(obs, expected)
74
+
75
+
76
+ def test_chi2_contingency_R():
77
+ # Some test cases that were computed independently, using R.
78
+
79
+ # Rcode = \
80
+ # """
81
+ # # Data vector.
82
+ # data <- c(
83
+ # 12, 34, 23, 4, 47, 11,
84
+ # 35, 31, 11, 34, 10, 18,
85
+ # 12, 32, 9, 18, 13, 19,
86
+ # 12, 12, 14, 9, 33, 25
87
+ # )
88
+ #
89
+ # # Create factor tags:r=rows, c=columns, t=tiers
90
+ # r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4")))
91
+ # c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3")))
92
+ # t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2")))
93
+ #
94
+ # # 3-way Chi squared test of independence
95
+ # s = summary(xtabs(data~r+c+t))
96
+ # print(s)
97
+ # """
98
+ # Routput = \
99
+ # """
100
+ # Call: xtabs(formula = data ~ r + c + t)
101
+ # Number of cases in table: 478
102
+ # Number of factors: 3
103
+ # Test for independence of all factors:
104
+ # Chisq = 102.17, df = 17, p-value = 3.514e-14
105
+ # """
106
+ obs = np.array(
107
+ [[[12, 34, 23],
108
+ [35, 31, 11],
109
+ [12, 32, 9],
110
+ [12, 12, 14]],
111
+ [[4, 47, 11],
112
+ [34, 10, 18],
113
+ [18, 13, 19],
114
+ [9, 33, 25]]])
115
+ chi2, p, dof, expected = chi2_contingency(obs)
116
+ assert_approx_equal(chi2, 102.17, significant=5)
117
+ assert_approx_equal(p, 3.514e-14, significant=4)
118
+ assert_equal(dof, 17)
119
+
120
+ # Rcode = \
121
+ # """
122
+ # # Data vector.
123
+ # data <- c(
124
+ # #
125
+ # 12, 17,
126
+ # 11, 16,
127
+ # #
128
+ # 11, 12,
129
+ # 15, 16,
130
+ # #
131
+ # 23, 15,
132
+ # 30, 22,
133
+ # #
134
+ # 14, 17,
135
+ # 15, 16
136
+ # )
137
+ #
138
+ # # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers
139
+ # r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2")))
140
+ # c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2")))
141
+ # d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2")))
142
+ # t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2")))
143
+ #
144
+ # # 4-way Chi squared test of independence
145
+ # s = summary(xtabs(data~r+c+d+t))
146
+ # print(s)
147
+ # """
148
+ # Routput = \
149
+ # """
150
+ # Call: xtabs(formula = data ~ r + c + d + t)
151
+ # Number of cases in table: 262
152
+ # Number of factors: 4
153
+ # Test for independence of all factors:
154
+ # Chisq = 8.758, df = 11, p-value = 0.6442
155
+ # """
156
+ obs = np.array(
157
+ [[[[12, 17],
158
+ [11, 16]],
159
+ [[11, 12],
160
+ [15, 16]]],
161
+ [[[23, 15],
162
+ [30, 22]],
163
+ [[14, 17],
164
+ [15, 16]]]])
165
+ chi2, p, dof, expected = chi2_contingency(obs)
166
+ assert_approx_equal(chi2, 8.758, significant=4)
167
+ assert_approx_equal(p, 0.6442, significant=4)
168
+ assert_equal(dof, 11)
169
+
170
+
171
+ def test_chi2_contingency_g():
172
+ c = np.array([[15, 60], [15, 90]])
173
+ g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
174
+ correction=False)
175
+ assert_allclose(g, 2*xlogy(c, c/e).sum())
176
+
177
+ g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
178
+ correction=True)
179
+ c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]])
180
+ assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum())
181
+
182
+ c = np.array([[10, 12, 10], [12, 10, 10]])
183
+ g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood')
184
+ assert_allclose(g, 2*xlogy(c, c/e).sum())
185
+
186
+
187
+ def test_chi2_contingency_bad_args():
188
+ # Test that "bad" inputs raise a ValueError.
189
+
190
+ # Negative value in the array of observed frequencies.
191
+ obs = np.array([[-1, 10], [1, 2]])
192
+ assert_raises(ValueError, chi2_contingency, obs)
193
+
194
+ # The zeros in this will result in zeros in the array
195
+ # of expected frequencies.
196
+ obs = np.array([[0, 1], [0, 1]])
197
+ assert_raises(ValueError, chi2_contingency, obs)
198
+
199
+ # A degenerate case: `observed` has size 0.
200
+ obs = np.empty((0, 8))
201
+ assert_raises(ValueError, chi2_contingency, obs)
202
+
203
+
204
+ def test_chi2_contingency_yates_gh13875():
205
+ # Magnitude of Yates' continuity correction should not exceed difference
206
+ # between expected and observed value of the statistic; see gh-13875
207
+ observed = np.array([[1573, 3], [4, 0]])
208
+ p = chi2_contingency(observed)[1]
209
+ assert_allclose(p, 1, rtol=1e-12)
210
+
211
+
212
+ @pytest.mark.parametrize("correction", [False, True])
213
+ def test_result(correction):
214
+ obs = np.array([[1, 2], [1, 2]])
215
+ res = chi2_contingency(obs, correction=correction)
216
+ assert_equal((res.statistic, res.pvalue, res.dof, res.expected_freq), res)
217
+
218
+
219
+ def test_bad_association_args():
220
+ # Invalid Test Statistic
221
+ assert_raises(ValueError, association, [[1, 2], [3, 4]], "X")
222
+ # Invalid array shape
223
+ assert_raises(ValueError, association, [[[1, 2]], [[3, 4]]], "cramer")
224
+ # chi2_contingency exception
225
+ assert_raises(ValueError, association, [[-1, 10], [1, 2]], 'cramer')
226
+ # Invalid Array Item Data Type
227
+ assert_raises(ValueError, association,
228
+ np.array([[1, 2], ["dd", 4]], dtype=object), 'cramer')
229
+
230
+
231
+ @pytest.mark.parametrize('stat, expected',
232
+ [('cramer', 0.09222412010290792),
233
+ ('tschuprow', 0.0775509319944633),
234
+ ('pearson', 0.12932925727138758)])
235
+ def test_assoc(stat, expected):
236
+ # 2d Array
237
+ obs1 = np.array([[12, 13, 14, 15, 16],
238
+ [17, 16, 18, 19, 11],
239
+ [9, 15, 14, 12, 11]])
240
+ a = association(observed=obs1, method=stat)
241
+ assert_allclose(a, expected)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_basic.py ADDED
@@ -0,0 +1,1046 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import numpy as np
3
+ import numpy.testing as npt
4
+ import pytest
5
+ from pytest import raises as assert_raises
6
+ from scipy.integrate import IntegrationWarning
7
+ import itertools
8
+
9
+ from scipy import stats
10
+ from .common_tests import (check_normalization, check_moment,
11
+ check_mean_expect,
12
+ check_var_expect, check_skew_expect,
13
+ check_kurt_expect, check_entropy,
14
+ check_private_entropy, check_entropy_vect_scale,
15
+ check_edge_support, check_named_args,
16
+ check_random_state_property,
17
+ check_meth_dtype, check_ppf_dtype,
18
+ check_cmplx_deriv,
19
+ check_pickling, check_rvs_broadcast,
20
+ check_freezing, check_munp_expect,)
21
+ from scipy.stats._distr_params import distcont
22
+ from scipy.stats._distn_infrastructure import rv_continuous_frozen
23
+
24
+ """
25
+ Test all continuous distributions.
26
+
27
+ Parameters were chosen for those distributions that pass the
28
+ Kolmogorov-Smirnov test. This provides safe parameters for each
29
+ distributions so that we can perform further testing of class methods.
30
+
31
+ These tests currently check only/mostly for serious errors and exceptions,
32
+ not for numerically exact results.
33
+ """
34
+
35
+ # Note that you need to add new distributions you want tested
36
+ # to _distr_params
37
+
38
+ DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
39
+ _IS_32BIT = (sys.maxsize < 2**32)
40
+
41
+ # Sets of tests to skip.
42
+ # Entries sorted by speed (very slow to slow).
43
+ # xslow took > 1s; slow took > 0.5s
44
+
45
+ xslow_test_cont_basic = {'studentized_range', 'kstwo', 'ksone', 'vonmises', 'kappa4',
46
+ 'recipinvgauss', 'vonmises_line', 'gausshyper',
47
+ 'rel_breitwigner', 'norminvgauss'}
48
+ slow_test_cont_basic = {'crystalball', 'powerlognorm', 'pearson3'}
49
+
50
+ # test_moments is already marked slow
51
+ xslow_test_moments = {'studentized_range', 'ksone', 'vonmises', 'vonmises_line',
52
+ 'recipinvgauss', 'kstwo', 'kappa4'}
53
+
54
+ slow_fit_mle = {'exponweib', 'genexpon', 'genhyperbolic', 'johnsonsb',
55
+ 'kappa4', 'powerlognorm', 'tukeylambda'}
56
+ xslow_fit_mle = {'gausshyper', 'ncf', 'ncx2', 'recipinvgauss', 'vonmises_line'}
57
+ xfail_fit_mle = {'ksone', 'kstwo', 'trapezoid', 'truncpareto', 'irwinhall'}
58
+ skip_fit_mle = {'levy_stable', 'studentized_range'} # far too slow (>10min)
59
+ slow_fit_mm = {'chi2', 'expon', 'lognorm', 'loguniform', 'powerlaw', 'reciprocal'}
60
+ xslow_fit_mm = {'argus', 'beta', 'exponpow', 'gausshyper', 'gengamma',
61
+ 'genhalflogistic', 'geninvgauss', 'gompertz', 'halfgennorm',
62
+ 'johnsonsb', 'kstwobign', 'ncx2', 'norminvgauss', 'truncnorm',
63
+ 'truncweibull_min', 'wrapcauchy'}
64
+ xfail_fit_mm = {'alpha', 'betaprime', 'bradford', 'burr', 'burr12', 'cauchy',
65
+ 'crystalball', 'exponweib', 'f', 'fisk', 'foldcauchy', 'genextreme',
66
+ 'genpareto', 'halfcauchy', 'invgamma', 'irwinhall', 'jf_skew_t',
67
+ 'johnsonsu', 'kappa3', 'kappa4', 'levy', 'levy_l', 'loglaplace',
68
+ 'lomax', 'mielke', 'ncf', 'nct', 'pareto', 'powerlognorm', 'powernorm',
69
+ 'rel_breitwigner', 'skewcauchy', 't', 'trapezoid', 'truncexpon',
70
+ 'truncpareto', 'tukeylambda', 'vonmises', 'vonmises_line'}
71
+ skip_fit_mm = {'genexpon', 'genhyperbolic', 'ksone', 'kstwo', 'levy_stable',
72
+ 'recipinvgauss', 'studentized_range'} # far too slow (>10min)
73
+
74
+ # These distributions fail the complex derivative test below.
75
+ # Here 'fail' mean produce wrong results and/or raise exceptions, depending
76
+ # on the implementation details of corresponding special functions.
77
+ # cf https://github.com/scipy/scipy/pull/4979 for a discussion.
78
+ fails_cmplx = {'argus', 'beta', 'betaprime', 'chi', 'chi2', 'cosine',
79
+ 'dgamma', 'dweibull', 'erlang', 'f', 'foldcauchy', 'gamma',
80
+ 'gausshyper', 'gengamma', 'genhyperbolic',
81
+ 'geninvgauss', 'gennorm', 'genpareto',
82
+ 'halfcauchy', 'halfgennorm', 'invgamma', 'irwinhall', 'jf_skew_t',
83
+ 'ksone', 'kstwo', 'kstwobign', 'levy_l', 'loggamma',
84
+ 'logistic', 'loguniform', 'maxwell', 'nakagami',
85
+ 'ncf', 'nct', 'ncx2', 'norminvgauss', 'pearson3',
86
+ 'powerlaw', 'rdist', 'reciprocal', 'rice',
87
+ 'skewnorm', 't', 'truncweibull_min',
88
+ 'tukeylambda', 'vonmises', 'vonmises_line',
89
+ 'rv_histogram_instance', 'truncnorm', 'studentized_range',
90
+ 'johnsonsb', 'halflogistic', 'rel_breitwigner'}
91
+
92
+ # Slow test_method_with_lists
93
+ slow_with_lists = {'studentized_range'}
94
+
95
+
96
+ # rv_histogram instances, with uniform and non-uniform bins;
97
+ # stored as (dist, arg) tuples for cases_test_cont_basic
98
+ # and cases_test_moments.
99
+ histogram_test_instances = []
100
+ case1 = {'a': [1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6,
101
+ 6, 6, 6, 7, 7, 7, 8, 8, 9], 'bins': 8} # equal width bins
102
+ case2 = {'a': [1, 1], 'bins': [0, 1, 10]} # unequal width bins
103
+ for case, density in itertools.product([case1, case2], [True, False]):
104
+ _hist = np.histogram(**case, density=density)
105
+ _rv_hist = stats.rv_histogram(_hist, density=density)
106
+ histogram_test_instances.append((_rv_hist, tuple()))
107
+
108
+
109
+ def cases_test_cont_basic():
110
+ for distname, arg in distcont[:] + histogram_test_instances:
111
+ if distname == 'levy_stable': # fails; tested separately
112
+ continue
113
+ if distname in slow_test_cont_basic:
114
+ yield pytest.param(distname, arg, marks=pytest.mark.slow)
115
+ elif distname in xslow_test_cont_basic:
116
+ yield pytest.param(distname, arg, marks=pytest.mark.xslow)
117
+ else:
118
+ yield distname, arg
119
+
120
+
121
+ @pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
122
+ @pytest.mark.parametrize('sn', [500])
123
+ def test_cont_basic(distname, arg, sn):
124
+ try:
125
+ distfn = getattr(stats, distname)
126
+ except TypeError:
127
+ distfn = distname
128
+ distname = 'rv_histogram_instance'
129
+
130
+ rng = np.random.RandomState(765456)
131
+ rvs = distfn.rvs(size=sn, *arg, random_state=rng)
132
+ m, v = distfn.stats(*arg)
133
+
134
+ if distname not in {'laplace_asymmetric'}:
135
+ check_sample_meanvar_(m, v, rvs)
136
+ check_cdf_ppf(distfn, arg, distname)
137
+ check_sf_isf(distfn, arg, distname)
138
+ check_cdf_sf(distfn, arg, distname)
139
+ check_ppf_isf(distfn, arg, distname)
140
+ check_pdf(distfn, arg, distname)
141
+ check_pdf_logpdf(distfn, arg, distname)
142
+ check_pdf_logpdf_at_endpoints(distfn, arg, distname)
143
+ check_cdf_logcdf(distfn, arg, distname)
144
+ check_sf_logsf(distfn, arg, distname)
145
+ check_ppf_broadcast(distfn, arg, distname)
146
+
147
+ alpha = 0.01
148
+ if distname == 'rv_histogram_instance':
149
+ check_distribution_rvs(distfn.cdf, arg, alpha, rvs)
150
+ elif distname != 'geninvgauss':
151
+ # skip kstest for geninvgauss since cdf is too slow; see test for
152
+ # rv generation in TestGenInvGauss in test_distributions.py
153
+ check_distribution_rvs(distname, arg, alpha, rvs)
154
+
155
+ locscale_defaults = (0, 1)
156
+ meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
157
+ distfn.logsf]
158
+ # make sure arguments are within support
159
+ spec_x = {'weibull_max': -0.5, 'levy_l': -0.5,
160
+ 'pareto': 1.5, 'truncpareto': 3.2, 'tukeylambda': 0.3,
161
+ 'rv_histogram_instance': 5.0}
162
+ x = spec_x.get(distname, 0.5)
163
+ if distname == 'invweibull':
164
+ arg = (1,)
165
+ elif distname == 'ksone':
166
+ arg = (3,)
167
+
168
+ check_named_args(distfn, x, arg, locscale_defaults, meths)
169
+ check_random_state_property(distfn, arg)
170
+
171
+ if distname in ['rel_breitwigner'] and _IS_32BIT:
172
+ # gh18414
173
+ pytest.skip("fails on Linux 32-bit")
174
+ else:
175
+ check_pickling(distfn, arg)
176
+ check_freezing(distfn, arg)
177
+
178
+ # Entropy
179
+ if distname not in ['kstwobign', 'kstwo', 'ncf']:
180
+ check_entropy(distfn, arg, distname)
181
+
182
+ if distfn.numargs == 0:
183
+ check_vecentropy(distfn, arg)
184
+
185
+ if (distfn.__class__._entropy != stats.rv_continuous._entropy
186
+ and distname != 'vonmises'):
187
+ check_private_entropy(distfn, arg, stats.rv_continuous)
188
+
189
+ with npt.suppress_warnings() as sup:
190
+ sup.filter(IntegrationWarning, "The occurrence of roundoff error")
191
+ sup.filter(IntegrationWarning, "Extremely bad integrand")
192
+ sup.filter(RuntimeWarning, "invalid value")
193
+ check_entropy_vect_scale(distfn, arg)
194
+
195
+ check_retrieving_support(distfn, arg)
196
+ check_edge_support(distfn, arg)
197
+
198
+ check_meth_dtype(distfn, arg, meths)
199
+ check_ppf_dtype(distfn, arg)
200
+
201
+ if distname not in fails_cmplx:
202
+ check_cmplx_deriv(distfn, arg)
203
+
204
+ if distname != 'truncnorm':
205
+ check_ppf_private(distfn, arg, distname)
206
+
207
+
208
+ def cases_test_cont_basic_fit():
209
+ slow = pytest.mark.slow
210
+ xslow = pytest.mark.xslow
211
+ fail = pytest.mark.skip(reason="Test fails and may be slow.")
212
+ skip = pytest.mark.skip(reason="Test too slow to run to completion (>10m).")
213
+
214
+ for distname, arg in distcont[:] + histogram_test_instances:
215
+ for method in ["MLE", "MM"]:
216
+ for fix_args in [True, False]:
217
+ if method == 'MLE' and distname in slow_fit_mle:
218
+ yield pytest.param(distname, arg, method, fix_args, marks=slow)
219
+ continue
220
+ if method == 'MLE' and distname in xslow_fit_mle:
221
+ yield pytest.param(distname, arg, method, fix_args, marks=xslow)
222
+ continue
223
+ if method == 'MLE' and distname in xfail_fit_mle:
224
+ yield pytest.param(distname, arg, method, fix_args, marks=fail)
225
+ continue
226
+ if method == 'MLE' and distname in skip_fit_mle:
227
+ yield pytest.param(distname, arg, method, fix_args, marks=skip)
228
+ continue
229
+ if method == 'MM' and distname in slow_fit_mm:
230
+ yield pytest.param(distname, arg, method, fix_args, marks=slow)
231
+ continue
232
+ if method == 'MM' and distname in xslow_fit_mm:
233
+ yield pytest.param(distname, arg, method, fix_args, marks=xslow)
234
+ continue
235
+ if method == 'MM' and distname in xfail_fit_mm:
236
+ yield pytest.param(distname, arg, method, fix_args, marks=fail)
237
+ continue
238
+ if method == 'MM' and distname in skip_fit_mm:
239
+ yield pytest.param(distname, arg, method, fix_args, marks=skip)
240
+ continue
241
+
242
+ yield distname, arg, method, fix_args
243
+
244
+
245
+ def test_cont_basic_fit_cases():
246
+ # Distribution names should not be in multiple MLE or MM sets
247
+ assert (len(xslow_fit_mle.union(xfail_fit_mle).union(skip_fit_mle)) ==
248
+ len(xslow_fit_mle) + len(xfail_fit_mle) + len(skip_fit_mle))
249
+ assert (len(xslow_fit_mm.union(xfail_fit_mm).union(skip_fit_mm)) ==
250
+ len(xslow_fit_mm) + len(xfail_fit_mm) + len(skip_fit_mm))
251
+
252
+
253
+ @pytest.mark.parametrize('distname, arg, method, fix_args',
254
+ cases_test_cont_basic_fit())
255
+ @pytest.mark.parametrize('n_fit_samples', [200])
256
+ def test_cont_basic_fit(distname, arg, n_fit_samples, method, fix_args):
257
+ try:
258
+ distfn = getattr(stats, distname)
259
+ except TypeError:
260
+ distfn = distname
261
+
262
+ rng = np.random.RandomState(765456)
263
+ rvs = distfn.rvs(size=n_fit_samples, *arg, random_state=rng)
264
+ if fix_args:
265
+ check_fit_args_fix(distfn, arg, rvs, method)
266
+ else:
267
+ check_fit_args(distfn, arg, rvs, method)
268
+
269
+ @pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
270
+ def test_rvs_scalar(distname, arg):
271
+ # rvs should return a scalar when given scalar arguments (gh-12428)
272
+ try:
273
+ distfn = getattr(stats, distname)
274
+ except TypeError:
275
+ distfn = distname
276
+ distname = 'rv_histogram_instance'
277
+
278
+ assert np.isscalar(distfn.rvs(*arg))
279
+ assert np.isscalar(distfn.rvs(*arg, size=()))
280
+ assert np.isscalar(distfn.rvs(*arg, size=None))
281
+
282
+
283
+ def test_levy_stable_random_state_property():
284
+ # levy_stable only implements rvs(), so it is skipped in the
285
+ # main loop in test_cont_basic(). Here we apply just the test
286
+ # check_random_state_property to levy_stable.
287
+ check_random_state_property(stats.levy_stable, (0.5, 0.1))
288
+
289
+
290
+ def cases_test_moments():
291
+ fail_normalization = set()
292
+ fail_higher = {'ncf'}
293
+ fail_moment = {'johnsonsu'} # generic `munp` is inaccurate for johnsonsu
294
+
295
+ for distname, arg in distcont[:] + histogram_test_instances:
296
+ if distname == 'levy_stable':
297
+ continue
298
+
299
+ if distname in xslow_test_moments:
300
+ yield pytest.param(distname, arg, True, True, True, True,
301
+ marks=pytest.mark.xslow(reason="too slow"))
302
+ continue
303
+
304
+ cond1 = distname not in fail_normalization
305
+ cond2 = distname not in fail_higher
306
+ cond3 = distname not in fail_moment
307
+
308
+ marks = list()
309
+ # Currently unused, `marks` can be used to add a timeout to a test of
310
+ # a specific distribution. For example, this shows how a timeout could
311
+ # be added for the 'skewnorm' distribution:
312
+ #
313
+ # marks = list()
314
+ # if distname == 'skewnorm':
315
+ # marks.append(pytest.mark.timeout(300))
316
+
317
+ yield pytest.param(distname, arg, cond1, cond2, cond3,
318
+ False, marks=marks)
319
+
320
+ if not cond1 or not cond2 or not cond3:
321
+ # Run the distributions that have issues twice, once skipping the
322
+ # not_ok parts, once with the not_ok parts but marked as knownfail
323
+ yield pytest.param(distname, arg, True, True, True, True,
324
+ marks=[pytest.mark.xfail] + marks)
325
+
326
+
327
+ @pytest.mark.slow
328
+ @pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,moment_ok,'
329
+ 'is_xfailing',
330
+ cases_test_moments())
331
+ def test_moments(distname, arg, normalization_ok, higher_ok, moment_ok,
332
+ is_xfailing):
333
+ try:
334
+ distfn = getattr(stats, distname)
335
+ except TypeError:
336
+ distfn = distname
337
+ distname = 'rv_histogram_instance'
338
+
339
+ with npt.suppress_warnings() as sup:
340
+ sup.filter(IntegrationWarning,
341
+ "The integral is probably divergent, or slowly convergent.")
342
+ sup.filter(IntegrationWarning,
343
+ "The maximum number of subdivisions.")
344
+ sup.filter(IntegrationWarning,
345
+ "The algorithm does not converge.")
346
+
347
+ if is_xfailing:
348
+ sup.filter(IntegrationWarning)
349
+
350
+ m, v, s, k = distfn.stats(*arg, moments='mvsk')
351
+
352
+ with np.errstate(all="ignore"):
353
+ if normalization_ok:
354
+ check_normalization(distfn, arg, distname)
355
+
356
+ if higher_ok:
357
+ check_mean_expect(distfn, arg, m, distname)
358
+ check_skew_expect(distfn, arg, m, v, s, distname)
359
+ check_var_expect(distfn, arg, m, v, distname)
360
+ check_kurt_expect(distfn, arg, m, v, k, distname)
361
+ check_munp_expect(distfn, arg, distname)
362
+
363
+ check_loc_scale(distfn, arg, m, v, distname)
364
+
365
+ if moment_ok:
366
+ check_moment(distfn, arg, m, v, distname)
367
+
368
+
369
+ @pytest.mark.parametrize('dist,shape_args', distcont)
370
+ def test_rvs_broadcast(dist, shape_args):
371
+ if dist in ['gausshyper', 'studentized_range']:
372
+ pytest.skip("too slow")
373
+
374
+ if dist in ['rel_breitwigner'] and _IS_32BIT:
375
+ # gh18414
376
+ pytest.skip("fails on Linux 32-bit")
377
+
378
+ # If shape_only is True, it means the _rvs method of the
379
+ # distribution uses more than one random number to generate a random
380
+ # variate. That means the result of using rvs with broadcasting or
381
+ # with a nontrivial size will not necessarily be the same as using the
382
+ # numpy.vectorize'd version of rvs(), so we can only compare the shapes
383
+ # of the results, not the values.
384
+ # Whether or not a distribution is in the following list is an
385
+ # implementation detail of the distribution, not a requirement. If
386
+ # the implementation the rvs() method of a distribution changes, this
387
+ # test might also have to be changed.
388
+ shape_only = dist in ['argus', 'betaprime', 'dgamma', 'dweibull',
389
+ 'exponnorm', 'genhyperbolic', 'geninvgauss',
390
+ 'levy_stable', 'nct', 'norminvgauss', 'rice',
391
+ 'skewnorm', 'semicircular', 'gennorm', 'loggamma']
392
+
393
+ distfunc = getattr(stats, dist)
394
+ loc = np.zeros(2)
395
+ scale = np.ones((3, 1))
396
+ nargs = distfunc.numargs
397
+ allargs = []
398
+ bshape = [3, 2]
399
+ # Generate shape parameter arguments...
400
+ for k in range(nargs):
401
+ shp = (k + 4,) + (1,)*(k + 2)
402
+ allargs.append(shape_args[k]*np.ones(shp))
403
+ bshape.insert(0, k + 4)
404
+ allargs.extend([loc, scale])
405
+ # bshape holds the expected shape when loc, scale, and the shape
406
+ # parameters are all broadcast together.
407
+
408
+ check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd')
409
+
410
+
411
+ # Expected values of the SF, CDF, PDF were computed using
412
+ # mpmath with mpmath.mp.dps = 50 and output at 20:
413
+ #
414
+ # def ks(x, n):
415
+ # x = mpmath.mpf(x)
416
+ # logp = -mpmath.power(6.0*n*x+1.0, 2)/18.0/n
417
+ # sf, cdf = mpmath.exp(logp), -mpmath.expm1(logp)
418
+ # pdf = (6.0*n*x+1.0) * 2 * sf/3
419
+ # print(mpmath.nstr(sf, 20), mpmath.nstr(cdf, 20), mpmath.nstr(pdf, 20))
420
+ #
421
+ # Tests use 1/n < x < 1-1/n and n > 1e6 to use the asymptotic computation.
422
+ # Larger x has a smaller sf.
423
+ @pytest.mark.parametrize('x,n,sf,cdf,pdf,rtol',
424
+ [(2.0e-5, 1000000000,
425
+ 0.44932297307934442379, 0.55067702692065557621,
426
+ 35946.137394996276407, 5e-15),
427
+ (2.0e-9, 1000000000,
428
+ 0.99999999061111115519, 9.3888888448132728224e-9,
429
+ 8.6666665852962971765, 5e-14),
430
+ (5.0e-4, 1000000000,
431
+ 7.1222019433090374624e-218, 1.0,
432
+ 1.4244408634752704094e-211, 5e-14)])
433
+ def test_gh17775_regression(x, n, sf, cdf, pdf, rtol):
434
+ # Regression test for gh-17775. In scipy 1.9.3 and earlier,
435
+ # these test would fail.
436
+ #
437
+ # KS one asymptotic sf ~ e^(-(6nx+1)^2 / 18n)
438
+ # Given a large 32-bit integer n, 6n will overflow in the c implementation.
439
+ # Example of broken behaviour:
440
+ # ksone.sf(2.0e-5, 1000000000) == 0.9374359693473666
441
+ ks = stats.ksone
442
+ vals = np.array([ks.sf(x, n), ks.cdf(x, n), ks.pdf(x, n)])
443
+ expected = np.array([sf, cdf, pdf])
444
+ npt.assert_allclose(vals, expected, rtol=rtol)
445
+ # The sf+cdf must sum to 1.0.
446
+ npt.assert_equal(vals[0] + vals[1], 1.0)
447
+ # Check inverting the (potentially very small) sf (uses a lower tolerance)
448
+ npt.assert_allclose([ks.isf(sf, n)], [x], rtol=1e-8)
449
+
450
+
451
+ def test_rvs_gh2069_regression():
452
+ # Regression tests for gh-2069. In scipy 0.17 and earlier,
453
+ # these tests would fail.
454
+ #
455
+ # A typical example of the broken behavior:
456
+ # >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5))
457
+ # array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705])
458
+ rng = np.random.RandomState(123)
459
+ vals = stats.norm.rvs(loc=np.zeros(5), scale=1, random_state=rng)
460
+ d = np.diff(vals)
461
+ npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
462
+ vals = stats.norm.rvs(loc=0, scale=np.ones(5), random_state=rng)
463
+ d = np.diff(vals)
464
+ npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
465
+ vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5), random_state=rng)
466
+ d = np.diff(vals)
467
+ npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
468
+ vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5),
469
+ random_state=rng)
470
+ d = np.diff(vals.ravel())
471
+ npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
472
+
473
+ assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]],
474
+ [[1, 1], [1, 1]], 1)
475
+ assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2))
476
+ assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0],
477
+ [[1], [2]], (4,))
478
+
479
+
480
+ def test_nomodify_gh9900_regression():
481
+ # Regression test for gh-9990
482
+ # Prior to gh-9990, calls to stats.truncnorm._cdf() use what ever was
483
+ # set inside the stats.truncnorm instance during stats.truncnorm.cdf().
484
+ # This could cause issues with multi-threaded code.
485
+ # Since then, the calls to cdf() are not permitted to modify the global
486
+ # stats.truncnorm instance.
487
+ tn = stats.truncnorm
488
+ # Use the right-half truncated normal
489
+ # Check that the cdf and _cdf return the same result.
490
+ npt.assert_almost_equal(tn.cdf(1, 0, np.inf),
491
+ 0.6826894921370859)
492
+ npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]),
493
+ 0.6826894921370859)
494
+
495
+ # Now use the left-half truncated normal
496
+ npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0),
497
+ 0.31731050786291415)
498
+ npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]),
499
+ 0.31731050786291415)
500
+
501
+ # Check that the right-half truncated normal _cdf hasn't changed
502
+ npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]),
503
+ 0.6826894921370859) # Not 1.6826894921370859
504
+ npt.assert_almost_equal(tn.cdf(1, 0, np.inf),
505
+ 0.6826894921370859)
506
+
507
+ # Check that the left-half truncated normal _cdf hasn't changed
508
+ npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]),
509
+ 0.31731050786291415) # Not -0.6826894921370859
510
+ npt.assert_almost_equal(tn.cdf(1, -np.inf, 0),
511
+ 1) # Not 1.6826894921370859
512
+ npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0),
513
+ 0.31731050786291415) # Not -0.6826894921370859
514
+
515
+
516
+ def test_broadcast_gh9990_regression():
517
+ # Regression test for gh-9990
518
+ # The x-value 7 only lies within the support of 4 of the supplied
519
+ # distributions. Prior to 9990, one array passed to
520
+ # stats.reciprocal._cdf would have 4 elements, but an array
521
+ # previously stored by stats.reciprocal_argcheck() would have 6, leading
522
+ # to a broadcast error.
523
+ a = np.array([1, 2, 3, 4, 5, 6])
524
+ b = np.array([8, 16, 1, 32, 1, 48])
525
+ ans = [stats.reciprocal.cdf(7, _a, _b) for _a, _b in zip(a,b)]
526
+ npt.assert_array_almost_equal(stats.reciprocal.cdf(7, a, b), ans)
527
+
528
+ ans = [stats.reciprocal.cdf(1, _a, _b) for _a, _b in zip(a,b)]
529
+ npt.assert_array_almost_equal(stats.reciprocal.cdf(1, a, b), ans)
530
+
531
+ ans = [stats.reciprocal.cdf(_a, _a, _b) for _a, _b in zip(a,b)]
532
+ npt.assert_array_almost_equal(stats.reciprocal.cdf(a, a, b), ans)
533
+
534
+ ans = [stats.reciprocal.cdf(_b, _a, _b) for _a, _b in zip(a,b)]
535
+ npt.assert_array_almost_equal(stats.reciprocal.cdf(b, a, b), ans)
536
+
537
+
538
+ def test_broadcast_gh7933_regression():
539
+ # Check broadcast works
540
+ stats.truncnorm.logpdf(
541
+ np.array([3.0, 2.0, 1.0]),
542
+ a=(1.5 - np.array([6.0, 5.0, 4.0])) / 3.0,
543
+ b=np.inf,
544
+ loc=np.array([6.0, 5.0, 4.0]),
545
+ scale=3.0
546
+ )
547
+
548
+
549
+ def test_gh2002_regression():
550
+ # Add a check that broadcast works in situations where only some
551
+ # x-values are compatible with some of the shape arguments.
552
+ x = np.r_[-2:2:101j]
553
+ a = np.r_[-np.ones(50), np.ones(51)]
554
+ expected = [stats.truncnorm.pdf(_x, _a, np.inf) for _x, _a in zip(x, a)]
555
+ ans = stats.truncnorm.pdf(x, a, np.inf)
556
+ npt.assert_array_almost_equal(ans, expected)
557
+
558
+
559
+ def test_gh1320_regression():
560
+ # Check that the first example from gh-1320 now works.
561
+ c = 2.62
562
+ stats.genextreme.ppf(0.5, np.array([[c], [c + 0.5]]))
563
+ # The other examples in gh-1320 appear to have stopped working
564
+ # some time ago.
565
+ # ans = stats.genextreme.moment(2, np.array([c, c + 0.5]))
566
+ # expected = np.array([25.50105963, 115.11191437])
567
+ # stats.genextreme.moment(5, np.array([[c], [c + 0.5]]))
568
+ # stats.genextreme.moment(5, np.array([c, c + 0.5]))
569
+
570
+
571
+ def test_method_of_moments():
572
+ # example from https://en.wikipedia.org/wiki/Method_of_moments_(statistics)
573
+ np.random.seed(1234)
574
+ x = [0, 0, 0, 0, 1]
575
+ a = 1/5 - 2*np.sqrt(3)/5
576
+ b = 1/5 + 2*np.sqrt(3)/5
577
+ # force use of method of moments (uniform.fit is overridden)
578
+ loc, scale = super(type(stats.uniform), stats.uniform).fit(x, method="MM")
579
+ npt.assert_almost_equal(loc, a, decimal=4)
580
+ npt.assert_almost_equal(loc+scale, b, decimal=4)
581
+
582
+
583
+ def check_sample_meanvar_(popmean, popvar, sample):
584
+ if np.isfinite(popmean):
585
+ check_sample_mean(sample, popmean)
586
+ if np.isfinite(popvar):
587
+ check_sample_var(sample, popvar)
588
+
589
+
590
+ def check_sample_mean(sample, popmean):
591
+ # Checks for unlikely difference between sample mean and population mean
592
+ prob = stats.ttest_1samp(sample, popmean).pvalue
593
+ assert prob > 0.01
594
+
595
+
596
+ def check_sample_var(sample, popvar):
597
+ # check that population mean lies within the CI bootstrapped from the
598
+ # sample. This used to be a chi-squared test for variance, but there were
599
+ # too many false positives
600
+ res = stats.bootstrap(
601
+ (sample,),
602
+ lambda x, axis: x.var(ddof=1, axis=axis),
603
+ confidence_level=0.995,
604
+ )
605
+ conf = res.confidence_interval
606
+ low, high = conf.low, conf.high
607
+ assert low <= popvar <= high
608
+
609
+
610
+ def check_cdf_ppf(distfn, arg, msg):
611
+ values = [0.001, 0.5, 0.999]
612
+ npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
613
+ values, decimal=DECIMAL, err_msg=msg +
614
+ ' - cdf-ppf roundtrip')
615
+
616
+
617
+ def check_sf_isf(distfn, arg, msg):
618
+ npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg),
619
+ [0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg +
620
+ ' - sf-isf roundtrip')
621
+
622
+
623
+ def check_cdf_sf(distfn, arg, msg):
624
+ npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg),
625
+ 1.0 - distfn.sf([0.1, 0.9], *arg),
626
+ decimal=DECIMAL, err_msg=msg +
627
+ ' - cdf-sf relationship')
628
+
629
+
630
+ def check_ppf_isf(distfn, arg, msg):
631
+ p = np.array([0.1, 0.9])
632
+ npt.assert_almost_equal(distfn.isf(p, *arg), distfn.ppf(1-p, *arg),
633
+ decimal=DECIMAL, err_msg=msg +
634
+ ' - ppf-isf relationship')
635
+
636
+
637
+ def check_pdf(distfn, arg, msg):
638
+ # compares pdf at median with numerical derivative of cdf
639
+ median = distfn.ppf(0.5, *arg)
640
+ eps = 1e-6
641
+ pdfv = distfn.pdf(median, *arg)
642
+ if (pdfv < 1e-4) or (pdfv > 1e4):
643
+ # avoid checking a case where pdf is close to zero or
644
+ # huge (singularity)
645
+ median = median + 0.1
646
+ pdfv = distfn.pdf(median, *arg)
647
+ cdfdiff = (distfn.cdf(median + eps, *arg) -
648
+ distfn.cdf(median - eps, *arg))/eps/2.0
649
+ # replace with better diff and better test (more points),
650
+ # actually, this works pretty well
651
+ msg += ' - cdf-pdf relationship'
652
+ npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg)
653
+
654
+
655
+ def check_pdf_logpdf(distfn, args, msg):
656
+ # compares pdf at several points with the log of the pdf
657
+ points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
658
+ vals = distfn.ppf(points, *args)
659
+ vals = vals[np.isfinite(vals)]
660
+ pdf = distfn.pdf(vals, *args)
661
+ logpdf = distfn.logpdf(vals, *args)
662
+ pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
663
+ logpdf = logpdf[np.isfinite(logpdf)]
664
+ msg += " - logpdf-log(pdf) relationship"
665
+ npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
666
+
667
+
668
+ def check_pdf_logpdf_at_endpoints(distfn, args, msg):
669
+ # compares pdf with the log of the pdf at the (finite) end points
670
+ points = np.array([0, 1])
671
+ vals = distfn.ppf(points, *args)
672
+ vals = vals[np.isfinite(vals)]
673
+ pdf = distfn.pdf(vals, *args)
674
+ logpdf = distfn.logpdf(vals, *args)
675
+ pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
676
+ logpdf = logpdf[np.isfinite(logpdf)]
677
+ msg += " - logpdf-log(pdf) relationship"
678
+ npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
679
+
680
+
681
+ def check_sf_logsf(distfn, args, msg):
682
+ # compares sf at several points with the log of the sf
683
+ points = np.array([0.0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
684
+ vals = distfn.ppf(points, *args)
685
+ vals = vals[np.isfinite(vals)]
686
+ sf = distfn.sf(vals, *args)
687
+ logsf = distfn.logsf(vals, *args)
688
+ sf = sf[sf != 0]
689
+ logsf = logsf[np.isfinite(logsf)]
690
+ msg += " - logsf-log(sf) relationship"
691
+ npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg)
692
+
693
+
694
+ def check_cdf_logcdf(distfn, args, msg):
695
+ # compares cdf at several points with the log of the cdf
696
+ points = np.array([0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
697
+ vals = distfn.ppf(points, *args)
698
+ vals = vals[np.isfinite(vals)]
699
+ cdf = distfn.cdf(vals, *args)
700
+ logcdf = distfn.logcdf(vals, *args)
701
+ cdf = cdf[cdf != 0]
702
+ logcdf = logcdf[np.isfinite(logcdf)]
703
+ msg += " - logcdf-log(cdf) relationship"
704
+ npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
705
+
706
+
707
+ def check_ppf_broadcast(distfn, arg, msg):
708
+ # compares ppf for multiple argsets.
709
+ num_repeats = 5
710
+ args = [] * num_repeats
711
+ if arg:
712
+ args = [np.array([_] * num_repeats) for _ in arg]
713
+
714
+ median = distfn.ppf(0.5, *arg)
715
+ medians = distfn.ppf(0.5, *args)
716
+ msg += " - ppf multiple"
717
+ npt.assert_almost_equal(medians, [median] * num_repeats, decimal=7, err_msg=msg)
718
+
719
+
720
+ def check_distribution_rvs(dist, args, alpha, rvs):
721
+ # dist is either a cdf function or name of a distribution in scipy.stats.
722
+ # args are the args for scipy.stats.dist(*args)
723
+ # alpha is a significance level, ~0.01
724
+ # rvs is array_like of random variables
725
+ # test from scipy.stats.tests
726
+ # this version reuses existing random variables
727
+ D, pval = stats.kstest(rvs, dist, args=args, N=1000)
728
+ if (pval < alpha):
729
+ # The rvs passed in failed the K-S test, which _could_ happen
730
+ # but is unlikely if alpha is small enough.
731
+ # Repeat the test with a new sample of rvs.
732
+ # Generate 1000 rvs, perform a K-S test that the new sample of rvs
733
+ # are distributed according to the distribution.
734
+ D, pval = stats.kstest(dist, dist, args=args, N=1000)
735
+ npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
736
+ "; alpha = " + str(alpha) + "\nargs = " + str(args))
737
+
738
+
739
+ def check_vecentropy(distfn, args):
740
+ npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
741
+
742
+
743
+ def check_loc_scale(distfn, arg, m, v, msg):
744
+ # Make `loc` and `scale` arrays to catch bugs like gh-13580 where
745
+ # `loc` and `scale` arrays improperly broadcast with shapes.
746
+ loc, scale = np.array([10.0, 20.0]), np.array([10.0, 20.0])
747
+ mt, vt = distfn.stats(*arg, loc=loc, scale=scale)
748
+ npt.assert_allclose(m*scale + loc, mt)
749
+ npt.assert_allclose(v*scale*scale, vt)
750
+
751
+
752
+ def check_ppf_private(distfn, arg, msg):
753
+ # fails by design for truncnorm self.nb not defined
754
+ ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
755
+ npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
756
+
757
+
758
+ def check_retrieving_support(distfn, args):
759
+ loc, scale = 1, 2
760
+ supp = distfn.support(*args)
761
+ supp_loc_scale = distfn.support(*args, loc=loc, scale=scale)
762
+ npt.assert_almost_equal(np.array(supp)*scale + loc,
763
+ np.array(supp_loc_scale))
764
+
765
+
766
+ def check_fit_args(distfn, arg, rvs, method):
767
+ with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
768
+ sup.filter(category=RuntimeWarning,
769
+ message="The shape parameter of the erlang")
770
+ sup.filter(category=RuntimeWarning,
771
+ message="floating point number truncated")
772
+ vals = distfn.fit(rvs, method=method)
773
+ vals2 = distfn.fit(rvs, optimizer='powell', method=method)
774
+ # Only check the length of the return; accuracy tested in test_fit.py
775
+ npt.assert_(len(vals) == 2+len(arg))
776
+ npt.assert_(len(vals2) == 2+len(arg))
777
+
778
+
779
+ def check_fit_args_fix(distfn, arg, rvs, method):
780
+ with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
781
+ sup.filter(category=RuntimeWarning,
782
+ message="The shape parameter of the erlang")
783
+
784
+ vals = distfn.fit(rvs, floc=0, method=method)
785
+ vals2 = distfn.fit(rvs, fscale=1, method=method)
786
+ npt.assert_(len(vals) == 2+len(arg))
787
+ npt.assert_(vals[-2] == 0)
788
+ npt.assert_(vals2[-1] == 1)
789
+ npt.assert_(len(vals2) == 2+len(arg))
790
+ if len(arg) > 0:
791
+ vals3 = distfn.fit(rvs, f0=arg[0], method=method)
792
+ npt.assert_(len(vals3) == 2+len(arg))
793
+ npt.assert_(vals3[0] == arg[0])
794
+ if len(arg) > 1:
795
+ vals4 = distfn.fit(rvs, f1=arg[1], method=method)
796
+ npt.assert_(len(vals4) == 2+len(arg))
797
+ npt.assert_(vals4[1] == arg[1])
798
+ if len(arg) > 2:
799
+ vals5 = distfn.fit(rvs, f2=arg[2], method=method)
800
+ npt.assert_(len(vals5) == 2+len(arg))
801
+ npt.assert_(vals5[2] == arg[2])
802
+
803
+
804
+ def cases_test_methods_with_lists():
805
+ for distname, arg in distcont:
806
+ if distname in slow_with_lists:
807
+ yield pytest.param(distname, arg, marks=pytest.mark.slow)
808
+ else:
809
+ yield distname, arg
810
+
811
+
812
+ @pytest.mark.parametrize('method', ['pdf', 'logpdf', 'cdf', 'logcdf',
813
+ 'sf', 'logsf', 'ppf', 'isf'])
814
+ @pytest.mark.parametrize('distname, args', cases_test_methods_with_lists())
815
+ def test_methods_with_lists(method, distname, args):
816
+ # Test that the continuous distributions can accept Python lists
817
+ # as arguments.
818
+ dist = getattr(stats, distname)
819
+ f = getattr(dist, method)
820
+ if distname == 'invweibull' and method.startswith('log'):
821
+ x = [1.5, 2]
822
+ else:
823
+ x = [0.1, 0.2]
824
+
825
+ shape2 = [[a]*2 for a in args]
826
+ loc = [0, 0.1]
827
+ scale = [1, 1.01]
828
+ result = f(x, *shape2, loc=loc, scale=scale)
829
+ npt.assert_allclose(result,
830
+ [f(*v) for v in zip(x, *shape2, loc, scale)],
831
+ rtol=1e-14, atol=5e-14)
832
+
833
+
834
+ def test_burr_fisk_moment_gh13234_regression():
835
+ vals0 = stats.burr.moment(1, 5, 4)
836
+ assert isinstance(vals0, float)
837
+
838
+ vals1 = stats.fisk.moment(1, 8)
839
+ assert isinstance(vals1, float)
840
+
841
+
842
+ def test_moments_with_array_gh12192_regression():
843
+ # array loc and scalar scale
844
+ vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=1)
845
+ expected0 = np.array([1., 2., 3.])
846
+ npt.assert_equal(vals0, expected0)
847
+
848
+ # array loc and invalid scalar scale
849
+ vals1 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=-1)
850
+ expected1 = np.array([np.nan, np.nan, np.nan])
851
+ npt.assert_equal(vals1, expected1)
852
+
853
+ # array loc and array scale with invalid entries
854
+ vals2 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]),
855
+ scale=[-3, 1, 0])
856
+ expected2 = np.array([np.nan, 2., np.nan])
857
+ npt.assert_equal(vals2, expected2)
858
+
859
+ # (loc == 0) & (scale < 0)
860
+ vals3 = stats.norm.moment(order=2, loc=0, scale=-4)
861
+ expected3 = np.nan
862
+ npt.assert_equal(vals3, expected3)
863
+ assert isinstance(vals3, expected3.__class__)
864
+
865
+ # array loc with 0 entries and scale with invalid entries
866
+ vals4 = stats.norm.moment(order=2, loc=[1, 0, 2], scale=[3, -4, -5])
867
+ expected4 = np.array([10., np.nan, np.nan])
868
+ npt.assert_equal(vals4, expected4)
869
+
870
+ # all(loc == 0) & (array scale with invalid entries)
871
+ vals5 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[5., -2, 100.])
872
+ expected5 = np.array([25., np.nan, 10000.])
873
+ npt.assert_equal(vals5, expected5)
874
+
875
+ # all( (loc == 0) & (scale < 0) )
876
+ vals6 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[-5., -2, -100.])
877
+ expected6 = np.array([np.nan, np.nan, np.nan])
878
+ npt.assert_equal(vals6, expected6)
879
+
880
+ # scalar args, loc, and scale
881
+ vals7 = stats.chi.moment(order=2, df=1, loc=0, scale=0)
882
+ expected7 = np.nan
883
+ npt.assert_equal(vals7, expected7)
884
+ assert isinstance(vals7, expected7.__class__)
885
+
886
+ # array args, scalar loc, and scalar scale
887
+ vals8 = stats.chi.moment(order=2, df=[1, 2, 3], loc=0, scale=0)
888
+ expected8 = np.array([np.nan, np.nan, np.nan])
889
+ npt.assert_equal(vals8, expected8)
890
+
891
+ # array args, array loc, and array scale
892
+ vals9 = stats.chi.moment(order=2, df=[1, 2, 3], loc=[1., 0., 2.],
893
+ scale=[1., -3., 0.])
894
+ expected9 = np.array([3.59576912, np.nan, np.nan])
895
+ npt.assert_allclose(vals9, expected9, rtol=1e-8)
896
+
897
+ # (n > 4), all(loc != 0), and all(scale != 0)
898
+ vals10 = stats.norm.moment(5, [1., 2.], [1., 2.])
899
+ expected10 = np.array([26., 832.])
900
+ npt.assert_allclose(vals10, expected10, rtol=1e-13)
901
+
902
+ # test broadcasting and more
903
+ a = [-1.1, 0, 1, 2.2, np.pi]
904
+ b = [-1.1, 0, 1, 2.2, np.pi]
905
+ loc = [-1.1, 0, np.sqrt(2)]
906
+ scale = [-2.1, 0, 1, 2.2, np.pi]
907
+
908
+ a = np.array(a).reshape((-1, 1, 1, 1))
909
+ b = np.array(b).reshape((-1, 1, 1))
910
+ loc = np.array(loc).reshape((-1, 1))
911
+ scale = np.array(scale)
912
+
913
+ vals11 = stats.beta.moment(order=2, a=a, b=b, loc=loc, scale=scale)
914
+
915
+ a, b, loc, scale = np.broadcast_arrays(a, b, loc, scale)
916
+
917
+ for i in np.ndenumerate(a):
918
+ with np.errstate(invalid='ignore', divide='ignore'):
919
+ i = i[0] # just get the index
920
+ # check against same function with scalar input
921
+ expected = stats.beta.moment(order=2, a=a[i], b=b[i],
922
+ loc=loc[i], scale=scale[i])
923
+ np.testing.assert_equal(vals11[i], expected)
924
+
925
+
926
+ def test_broadcasting_in_moments_gh12192_regression():
927
+ vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=[[1]])
928
+ expected0 = np.array([[1., 2., 3.]])
929
+ npt.assert_equal(vals0, expected0)
930
+ assert vals0.shape == expected0.shape
931
+
932
+ vals1 = stats.norm.moment(order=1, loc=np.array([[1], [2], [3]]),
933
+ scale=[1, 2, 3])
934
+ expected1 = np.array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]])
935
+ npt.assert_equal(vals1, expected1)
936
+ assert vals1.shape == expected1.shape
937
+
938
+ vals2 = stats.chi.moment(order=1, df=[1., 2., 3.], loc=0., scale=1.)
939
+ expected2 = np.array([0.79788456, 1.25331414, 1.59576912])
940
+ npt.assert_allclose(vals2, expected2, rtol=1e-8)
941
+ assert vals2.shape == expected2.shape
942
+
943
+ vals3 = stats.chi.moment(order=1, df=[[1.], [2.], [3.]], loc=[0., 1., 2.],
944
+ scale=[-1., 0., 3.])
945
+ expected3 = np.array([[np.nan, np.nan, 4.39365368],
946
+ [np.nan, np.nan, 5.75994241],
947
+ [np.nan, np.nan, 6.78730736]])
948
+ npt.assert_allclose(vals3, expected3, rtol=1e-8)
949
+ assert vals3.shape == expected3.shape
950
+
951
+
952
+ @pytest.mark.slow
953
+ def test_kappa3_array_gh13582():
954
+ # https://github.com/scipy/scipy/pull/15140#issuecomment-994958241
955
+ shapes = [0.5, 1.5, 2.5, 3.5, 4.5]
956
+ moments = 'mvsk'
957
+ res = np.array([[stats.kappa3.stats(shape, moments=moment)
958
+ for shape in shapes] for moment in moments])
959
+ res2 = np.array(stats.kappa3.stats(shapes, moments=moments))
960
+ npt.assert_allclose(res, res2)
961
+
962
+
963
+ @pytest.mark.xslow
964
+ def test_kappa4_array_gh13582():
965
+ h = np.array([-0.5, 2.5, 3.5, 4.5, -3])
966
+ k = np.array([-0.5, 1, -1.5, 0, 3.5])
967
+ moments = 'mvsk'
968
+ res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment)
969
+ for i in range(5)] for moment in moments])
970
+ res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
971
+ npt.assert_allclose(res, res2)
972
+
973
+ # https://github.com/scipy/scipy/pull/15250#discussion_r775112913
974
+ h = np.array([-1, -1/4, -1/4, 1, -1, 0])
975
+ k = np.array([1, 1, 1/2, -1/3, -1, 0])
976
+ res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment)
977
+ for i in range(6)] for moment in moments])
978
+ res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
979
+ npt.assert_allclose(res, res2)
980
+
981
+ # https://github.com/scipy/scipy/pull/15250#discussion_r775115021
982
+ h = np.array([-1, -0.5, 1])
983
+ k = np.array([-1, -0.5, 0, 1])[:, None]
984
+ res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
985
+ assert res2.shape == (4, 4, 3)
986
+
987
+
988
+ def test_frozen_attributes():
989
+ # gh-14827 reported that all frozen distributions had both pmf and pdf
990
+ # attributes; continuous should have pdf and discrete should have pmf.
991
+ message = "'rv_continuous_frozen' object has no attribute"
992
+ with pytest.raises(AttributeError, match=message):
993
+ stats.norm().pmf
994
+ with pytest.raises(AttributeError, match=message):
995
+ stats.norm().logpmf
996
+ stats.norm.pmf = "herring"
997
+ frozen_norm = stats.norm()
998
+ assert isinstance(frozen_norm, rv_continuous_frozen)
999
+ delattr(stats.norm, 'pmf')
1000
+
1001
+
1002
+ def test_skewnorm_pdf_gh16038():
1003
+ rng = np.random.default_rng(0)
1004
+ x, a = -np.inf, 0
1005
+ npt.assert_equal(stats.skewnorm.pdf(x, a), stats.norm.pdf(x))
1006
+ x, a = rng.random(size=(3, 3)), rng.random(size=(3, 3))
1007
+ mask = rng.random(size=(3, 3)) < 0.5
1008
+ a[mask] = 0
1009
+ x_norm = x[mask]
1010
+ res = stats.skewnorm.pdf(x, a)
1011
+ npt.assert_equal(res[mask], stats.norm.pdf(x_norm))
1012
+ npt.assert_equal(res[~mask], stats.skewnorm.pdf(x[~mask], a[~mask]))
1013
+
1014
+
1015
+ # for scalar input, these functions should return scalar output
1016
+ scalar_out = [['rvs', []], ['pdf', [0]], ['logpdf', [0]], ['cdf', [0]],
1017
+ ['logcdf', [0]], ['sf', [0]], ['logsf', [0]], ['ppf', [0]],
1018
+ ['isf', [0]], ['moment', [1]], ['entropy', []], ['expect', []],
1019
+ ['median', []], ['mean', []], ['std', []], ['var', []]]
1020
+ scalars_out = [['interval', [0.95]], ['support', []], ['stats', ['mv']]]
1021
+
1022
+
1023
+ @pytest.mark.parametrize('case', scalar_out + scalars_out)
1024
+ def test_scalar_for_scalar(case):
1025
+ # Some rv_continuous functions returned 0d array instead of NumPy scalar
1026
+ # Guard against regression
1027
+ method_name, args = case
1028
+ method = getattr(stats.norm(), method_name)
1029
+ res = method(*args)
1030
+ if case in scalar_out:
1031
+ assert isinstance(res, np.number)
1032
+ else:
1033
+ assert isinstance(res[0], np.number)
1034
+ assert isinstance(res[1], np.number)
1035
+
1036
+
1037
+ def test_scalar_for_scalar2():
1038
+ # test methods that are not attributes of frozen distributions
1039
+ res = stats.norm.fit([1, 2, 3])
1040
+ assert isinstance(res[0], np.number)
1041
+ assert isinstance(res[1], np.number)
1042
+ res = stats.norm.fit_loc_scale([1, 2, 3])
1043
+ assert isinstance(res[0], np.number)
1044
+ assert isinstance(res[1], np.number)
1045
+ res = stats.norm.nnlf((0, 1), [1, 2, 3])
1046
+ assert isinstance(res, np.number)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_fit_censored.py ADDED
@@ -0,0 +1,683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tests for fitting specific distributions to censored data.
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_allclose
5
+
6
+ from scipy.optimize import fmin
7
+ from scipy.stats import (CensoredData, beta, cauchy, chi2, expon, gamma,
8
+ gumbel_l, gumbel_r, invgauss, invweibull, laplace,
9
+ logistic, lognorm, nct, ncx2, norm, weibull_max,
10
+ weibull_min)
11
+
12
+
13
+ # In some tests, we'll use this optimizer for improved accuracy.
14
+ def optimizer(func, x0, args=(), disp=0):
15
+ return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12)
16
+
17
+
18
+ def test_beta():
19
+ """
20
+ Test fitting beta shape parameters to interval-censored data.
21
+
22
+ Calculation in R:
23
+
24
+ > library(fitdistrplus)
25
+ > data <- data.frame(left=c(0.10, 0.50, 0.75, 0.80),
26
+ + right=c(0.20, 0.55, 0.90, 0.95))
27
+ > result = fitdistcens(data, 'beta', control=list(reltol=1e-14))
28
+
29
+ > result
30
+ Fitting of the distribution ' beta ' on censored data by maximum likelihood
31
+ Parameters:
32
+ estimate
33
+ shape1 1.419941
34
+ shape2 1.027066
35
+ > result$sd
36
+ shape1 shape2
37
+ 0.9914177 0.6866565
38
+ """
39
+ data = CensoredData(interval=[[0.10, 0.20],
40
+ [0.50, 0.55],
41
+ [0.75, 0.90],
42
+ [0.80, 0.95]])
43
+
44
+ # For this test, fit only the shape parameters; loc and scale are fixed.
45
+ a, b, loc, scale = beta.fit(data, floc=0, fscale=1, optimizer=optimizer)
46
+
47
+ assert_allclose(a, 1.419941, rtol=5e-6)
48
+ assert_allclose(b, 1.027066, rtol=5e-6)
49
+ assert loc == 0
50
+ assert scale == 1
51
+
52
+
53
+ def test_cauchy_right_censored():
54
+ """
55
+ Test fitting the Cauchy distribution to right-censored data.
56
+
57
+ Calculation in R, with two values not censored [1, 10] and
58
+ one right-censored value [30].
59
+
60
+ > library(fitdistrplus)
61
+ > data <- data.frame(left=c(1, 10, 30), right=c(1, 10, NA))
62
+ > result = fitdistcens(data, 'cauchy', control=list(reltol=1e-14))
63
+ > result
64
+ Fitting of the distribution ' cauchy ' on censored data by maximum
65
+ likelihood
66
+ Parameters:
67
+ estimate
68
+ location 7.100001
69
+ scale 7.455866
70
+ """
71
+ data = CensoredData(uncensored=[1, 10], right=[30])
72
+ loc, scale = cauchy.fit(data, optimizer=optimizer)
73
+ assert_allclose(loc, 7.10001, rtol=5e-6)
74
+ assert_allclose(scale, 7.455866, rtol=5e-6)
75
+
76
+
77
+ def test_cauchy_mixed():
78
+ """
79
+ Test fitting the Cauchy distribution to data with mixed censoring.
80
+
81
+ Calculation in R, with:
82
+ * two values not censored [1, 10],
83
+ * one left-censored [1],
84
+ * one right-censored [30], and
85
+ * one interval-censored [[4, 8]].
86
+
87
+ > library(fitdistrplus)
88
+ > data <- data.frame(left=c(NA, 1, 4, 10, 30), right=c(1, 1, 8, 10, NA))
89
+ > result = fitdistcens(data, 'cauchy', control=list(reltol=1e-14))
90
+ > result
91
+ Fitting of the distribution ' cauchy ' on censored data by maximum
92
+ likelihood
93
+ Parameters:
94
+ estimate
95
+ location 4.605150
96
+ scale 5.900852
97
+ """
98
+ data = CensoredData(uncensored=[1, 10], left=[1], right=[30],
99
+ interval=[[4, 8]])
100
+ loc, scale = cauchy.fit(data, optimizer=optimizer)
101
+ assert_allclose(loc, 4.605150, rtol=5e-6)
102
+ assert_allclose(scale, 5.900852, rtol=5e-6)
103
+
104
+
105
+ def test_chi2_mixed():
106
+ """
107
+ Test fitting just the shape parameter (df) of chi2 to mixed data.
108
+
109
+ Calculation in R, with:
110
+ * two values not censored [1, 10],
111
+ * one left-censored [1],
112
+ * one right-censored [30], and
113
+ * one interval-censored [[4, 8]].
114
+
115
+ > library(fitdistrplus)
116
+ > data <- data.frame(left=c(NA, 1, 4, 10, 30), right=c(1, 1, 8, 10, NA))
117
+ > result = fitdistcens(data, 'chisq', control=list(reltol=1e-14))
118
+ > result
119
+ Fitting of the distribution ' chisq ' on censored data by maximum
120
+ likelihood
121
+ Parameters:
122
+ estimate
123
+ df 5.060329
124
+ """
125
+ data = CensoredData(uncensored=[1, 10], left=[1], right=[30],
126
+ interval=[[4, 8]])
127
+ df, loc, scale = chi2.fit(data, floc=0, fscale=1, optimizer=optimizer)
128
+ assert_allclose(df, 5.060329, rtol=5e-6)
129
+ assert loc == 0
130
+ assert scale == 1
131
+
132
+
133
+ def test_expon_right_censored():
134
+ """
135
+ For the exponential distribution with loc=0, the exact solution for
136
+ fitting n uncensored points x[0]...x[n-1] and m right-censored points
137
+ x[n]..x[n+m-1] is
138
+
139
+ scale = sum(x)/n
140
+
141
+ That is, divide the sum of all the values (not censored and
142
+ right-censored) by the number of uncensored values. (See, for example,
143
+ https://en.wikipedia.org/wiki/Censoring_(statistics)#Likelihood.)
144
+
145
+ The second derivative of the log-likelihood function is
146
+
147
+ n/scale**2 - 2*sum(x)/scale**3
148
+
149
+ from which the estimate of the standard error can be computed.
150
+
151
+ -----
152
+
153
+ Calculation in R, for reference only. The R results are not
154
+ used in the test.
155
+
156
+ > library(fitdistrplus)
157
+ > dexps <- function(x, scale) {
158
+ + return(dexp(x, 1/scale))
159
+ + }
160
+ > pexps <- function(q, scale) {
161
+ + return(pexp(q, 1/scale))
162
+ + }
163
+ > left <- c(1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15,
164
+ + 16, 16, 20, 20, 21, 22)
165
+ > right <- c(1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15,
166
+ + NA, NA, NA, NA, NA, NA)
167
+ > result = fitdistcens(data, 'exps', start=list(scale=mean(data$left)),
168
+ + control=list(reltol=1e-14))
169
+ > result
170
+ Fitting of the distribution ' exps ' on censored data by maximum likelihood
171
+ Parameters:
172
+ estimate
173
+ scale 19.85
174
+ > result$sd
175
+ scale
176
+ 6.277119
177
+ """
178
+ # This data has 10 uncensored values and 6 right-censored values.
179
+ obs = [1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15, 16, 16, 20, 20, 21, 22]
180
+ cens = [False]*10 + [True]*6
181
+ data = CensoredData.right_censored(obs, cens)
182
+
183
+ loc, scale = expon.fit(data, floc=0, optimizer=optimizer)
184
+
185
+ assert loc == 0
186
+ # Use the analytical solution to compute the expected value. This
187
+ # is the sum of the observed values divided by the number of uncensored
188
+ # values.
189
+ n = len(data) - data.num_censored()
190
+ total = data._uncensored.sum() + data._right.sum()
191
+ expected = total / n
192
+ assert_allclose(scale, expected, 1e-8)
193
+
194
+
195
+ def test_gamma_right_censored():
196
+ """
197
+ Fit gamma shape and scale to data with one right-censored value.
198
+
199
+ Calculation in R:
200
+
201
+ > library(fitdistrplus)
202
+ > data <- data.frame(left=c(2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0, 25.0),
203
+ + right=c(2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0, NA))
204
+ > result = fitdistcens(data, 'gamma', start=list(shape=1, scale=10),
205
+ + control=list(reltol=1e-13))
206
+ > result
207
+ Fitting of the distribution ' gamma ' on censored data by maximum
208
+ likelihood
209
+ Parameters:
210
+ estimate
211
+ shape 1.447623
212
+ scale 8.360197
213
+ > result$sd
214
+ shape scale
215
+ 0.7053086 5.1016531
216
+ """
217
+ # The last value is right-censored.
218
+ x = CensoredData.right_censored([2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0,
219
+ 25.0],
220
+ [0]*7 + [1])
221
+
222
+ a, loc, scale = gamma.fit(x, floc=0, optimizer=optimizer)
223
+
224
+ assert_allclose(a, 1.447623, rtol=5e-6)
225
+ assert loc == 0
226
+ assert_allclose(scale, 8.360197, rtol=5e-6)
227
+
228
+
229
+ def test_gumbel():
230
+ """
231
+ Fit gumbel_l and gumbel_r to censored data.
232
+
233
+ This R calculation should match gumbel_r.
234
+
235
+ > library(evd)
236
+ > library(fitdistrplus)
237
+ > data = data.frame(left=c(0, 2, 3, 9, 10, 10),
238
+ + right=c(1, 2, 3, 9, NA, NA))
239
+ > result = fitdistcens(data, 'gumbel',
240
+ + control=list(reltol=1e-14),
241
+ + start=list(loc=4, scale=5))
242
+ > result
243
+ Fitting of the distribution ' gumbel ' on censored data by maximum
244
+ likelihood
245
+ Parameters:
246
+ estimate
247
+ loc 4.487853
248
+ scale 4.843640
249
+ """
250
+ # First value is interval-censored. Last two are right-censored.
251
+ uncensored = np.array([2, 3, 9])
252
+ right = np.array([10, 10])
253
+ interval = np.array([[0, 1]])
254
+ data = CensoredData(uncensored, right=right, interval=interval)
255
+ loc, scale = gumbel_r.fit(data, optimizer=optimizer)
256
+ assert_allclose(loc, 4.487853, rtol=5e-6)
257
+ assert_allclose(scale, 4.843640, rtol=5e-6)
258
+
259
+ # Negate the data and reverse the intervals, and test with gumbel_l.
260
+ data2 = CensoredData(-uncensored, left=-right,
261
+ interval=-interval[:, ::-1])
262
+ # Fitting gumbel_l to data2 should give the same result as above, but
263
+ # with loc negated.
264
+ loc2, scale2 = gumbel_l.fit(data2, optimizer=optimizer)
265
+ assert_allclose(loc2, -4.487853, rtol=5e-6)
266
+ assert_allclose(scale2, 4.843640, rtol=5e-6)
267
+
268
+
269
+ def test_invgauss():
270
+ """
271
+ Fit just the shape parameter of invgauss to data with one value
272
+ left-censored and one value right-censored.
273
+
274
+ Calculation in R; using a fixed dispersion parameter amounts to fixing
275
+ the scale to be 1.
276
+
277
+ > library(statmod)
278
+ > library(fitdistrplus)
279
+ > left <- c(NA, 0.4813096, 0.5571880, 0.5132463, 0.3801414, 0.5904386,
280
+ + 0.4822340, 0.3478597, 3, 0.7191797, 1.5810902, 0.4442299)
281
+ > right <- c(0.15, 0.4813096, 0.5571880, 0.5132463, 0.3801414, 0.5904386,
282
+ + 0.4822340, 0.3478597, NA, 0.7191797, 1.5810902, 0.4442299)
283
+ > data <- data.frame(left=left, right=right)
284
+ > result = fitdistcens(data, 'invgauss', control=list(reltol=1e-12),
285
+ + fix.arg=list(dispersion=1), start=list(mean=3))
286
+ > result
287
+ Fitting of the distribution ' invgauss ' on censored data by maximum
288
+ likelihood
289
+ Parameters:
290
+ estimate
291
+ mean 0.853469
292
+ Fixed parameters:
293
+ value
294
+ dispersion 1
295
+ > result$sd
296
+ mean
297
+ 0.247636
298
+
299
+ Here's the R calculation with the dispersion as a free parameter to
300
+ be fit.
301
+
302
+ > result = fitdistcens(data, 'invgauss', control=list(reltol=1e-12),
303
+ + start=list(mean=3, dispersion=1))
304
+ > result
305
+ Fitting of the distribution ' invgauss ' on censored data by maximum
306
+ likelihood
307
+ Parameters:
308
+ estimate
309
+ mean 0.8699819
310
+ dispersion 1.2261362
311
+
312
+ The parametrization of the inverse Gaussian distribution in the
313
+ `statmod` package is not the same as in SciPy (see
314
+ https://arxiv.org/abs/1603.06687
315
+ for details). The translation from R to SciPy is
316
+
317
+ scale = 1/dispersion
318
+ mu = mean * dispersion
319
+
320
+ > 1/result$estimate['dispersion'] # 1/dispersion
321
+ dispersion
322
+ 0.8155701
323
+ > result$estimate['mean'] * result$estimate['dispersion']
324
+ mean
325
+ 1.066716
326
+
327
+ Those last two values are the SciPy scale and shape parameters.
328
+ """
329
+ # One point is left-censored, and one is right-censored.
330
+ x = [0.4813096, 0.5571880, 0.5132463, 0.3801414,
331
+ 0.5904386, 0.4822340, 0.3478597, 0.7191797,
332
+ 1.5810902, 0.4442299]
333
+ data = CensoredData(uncensored=x, left=[0.15], right=[3])
334
+
335
+ # Fit only the shape parameter.
336
+ mu, loc, scale = invgauss.fit(data, floc=0, fscale=1, optimizer=optimizer)
337
+
338
+ assert_allclose(mu, 0.853469, rtol=5e-5)
339
+ assert loc == 0
340
+ assert scale == 1
341
+
342
+ # Fit the shape and scale.
343
+ mu, loc, scale = invgauss.fit(data, floc=0, optimizer=optimizer)
344
+
345
+ assert_allclose(mu, 1.066716, rtol=5e-5)
346
+ assert loc == 0
347
+ assert_allclose(scale, 0.8155701, rtol=5e-5)
348
+
349
+
350
+ def test_invweibull():
351
+ """
352
+ Fit invweibull to censored data.
353
+
354
+ Here is the calculation in R. The 'frechet' distribution from the evd
355
+ package matches SciPy's invweibull distribution. The `loc` parameter
356
+ is fixed at 0.
357
+
358
+ > library(evd)
359
+ > library(fitdistrplus)
360
+ > data = data.frame(left=c(0, 2, 3, 9, 10, 10),
361
+ + right=c(1, 2, 3, 9, NA, NA))
362
+ > result = fitdistcens(data, 'frechet',
363
+ + control=list(reltol=1e-14),
364
+ + start=list(loc=4, scale=5))
365
+ > result
366
+ Fitting of the distribution ' frechet ' on censored data by maximum
367
+ likelihood
368
+ Parameters:
369
+ estimate
370
+ scale 2.7902200
371
+ shape 0.6379845
372
+ Fixed parameters:
373
+ value
374
+ loc 0
375
+ """
376
+ # In the R data, the first value is interval-censored, and the last
377
+ # two are right-censored. The rest are not censored.
378
+ data = CensoredData(uncensored=[2, 3, 9], right=[10, 10],
379
+ interval=[[0, 1]])
380
+ c, loc, scale = invweibull.fit(data, floc=0, optimizer=optimizer)
381
+ assert_allclose(c, 0.6379845, rtol=5e-6)
382
+ assert loc == 0
383
+ assert_allclose(scale, 2.7902200, rtol=5e-6)
384
+
385
+
386
+ def test_laplace():
387
+ """
388
+ Fir the Laplace distribution to left- and right-censored data.
389
+
390
+ Calculation in R:
391
+
392
+ > library(fitdistrplus)
393
+ > dlaplace <- function(x, location=0, scale=1) {
394
+ + return(0.5*exp(-abs((x - location)/scale))/scale)
395
+ + }
396
+ > plaplace <- function(q, location=0, scale=1) {
397
+ + z <- (q - location)/scale
398
+ + s <- sign(z)
399
+ + f <- -s*0.5*exp(-abs(z)) + (s+1)/2
400
+ + return(f)
401
+ + }
402
+ > left <- c(NA, -41.564, 50.0, 15.7384, 50.0, 10.0452, -2.0684,
403
+ + -19.5399, 50.0, 9.0005, 27.1227, 4.3113, -3.7372,
404
+ + 25.3111, 14.7987, 34.0887, 50.0, 42.8496, 18.5862,
405
+ + 32.8921, 9.0448, -27.4591, NA, 19.5083, -9.7199)
406
+ > right <- c(-50.0, -41.564, NA, 15.7384, NA, 10.0452, -2.0684,
407
+ + -19.5399, NA, 9.0005, 27.1227, 4.3113, -3.7372,
408
+ + 25.3111, 14.7987, 34.0887, NA, 42.8496, 18.5862,
409
+ + 32.8921, 9.0448, -27.4591, -50.0, 19.5083, -9.7199)
410
+ > data <- data.frame(left=left, right=right)
411
+ > result <- fitdistcens(data, 'laplace', start=list(location=10, scale=10),
412
+ + control=list(reltol=1e-13))
413
+ > result
414
+ Fitting of the distribution ' laplace ' on censored data by maximum
415
+ likelihood
416
+ Parameters:
417
+ estimate
418
+ location 14.79870
419
+ scale 30.93601
420
+ > result$sd
421
+ location scale
422
+ 0.1758864 7.0972125
423
+ """
424
+ # The value -50 is left-censored, and the value 50 is right-censored.
425
+ obs = np.array([-50.0, -41.564, 50.0, 15.7384, 50.0, 10.0452, -2.0684,
426
+ -19.5399, 50.0, 9.0005, 27.1227, 4.3113, -3.7372,
427
+ 25.3111, 14.7987, 34.0887, 50.0, 42.8496, 18.5862,
428
+ 32.8921, 9.0448, -27.4591, -50.0, 19.5083, -9.7199])
429
+ x = obs[(obs != -50.0) & (obs != 50)]
430
+ left = obs[obs == -50.0]
431
+ right = obs[obs == 50.0]
432
+ data = CensoredData(uncensored=x, left=left, right=right)
433
+ loc, scale = laplace.fit(data, loc=10, scale=10, optimizer=optimizer)
434
+ assert_allclose(loc, 14.79870, rtol=5e-6)
435
+ assert_allclose(scale, 30.93601, rtol=5e-6)
436
+
437
+
438
+ def test_logistic():
439
+ """
440
+ Fit the logistic distribution to left-censored data.
441
+
442
+ Calculation in R:
443
+ > library(fitdistrplus)
444
+ > left = c(13.5401, 37.4235, 11.906 , 13.998 , NA , 0.4023, NA ,
445
+ + 10.9044, 21.0629, 9.6985, NA , 12.9016, 39.164 , 34.6396,
446
+ + NA , 20.3665, 16.5889, 18.0952, 45.3818, 35.3306, 8.4949,
447
+ + 3.4041, NA , 7.2828, 37.1265, 6.5969, 17.6868, 17.4977,
448
+ + 16.3391, 36.0541)
449
+ > right = c(13.5401, 37.4235, 11.906 , 13.998 , 0. , 0.4023, 0. ,
450
+ + 10.9044, 21.0629, 9.6985, 0. , 12.9016, 39.164 , 34.6396,
451
+ + 0. , 20.3665, 16.5889, 18.0952, 45.3818, 35.3306, 8.4949,
452
+ + 3.4041, 0. , 7.2828, 37.1265, 6.5969, 17.6868, 17.4977,
453
+ + 16.3391, 36.0541)
454
+ > data = data.frame(left=left, right=right)
455
+ > result = fitdistcens(data, 'logis', control=list(reltol=1e-14))
456
+ > result
457
+ Fitting of the distribution ' logis ' on censored data by maximum
458
+ likelihood
459
+ Parameters:
460
+ estimate
461
+ location 14.633459
462
+ scale 9.232736
463
+ > result$sd
464
+ location scale
465
+ 2.931505 1.546879
466
+ """
467
+ # Values that are zero are left-censored; the true values are less than 0.
468
+ x = np.array([13.5401, 37.4235, 11.906, 13.998, 0.0, 0.4023, 0.0, 10.9044,
469
+ 21.0629, 9.6985, 0.0, 12.9016, 39.164, 34.6396, 0.0, 20.3665,
470
+ 16.5889, 18.0952, 45.3818, 35.3306, 8.4949, 3.4041, 0.0,
471
+ 7.2828, 37.1265, 6.5969, 17.6868, 17.4977, 16.3391,
472
+ 36.0541])
473
+ data = CensoredData.left_censored(x, censored=(x == 0))
474
+ loc, scale = logistic.fit(data, optimizer=optimizer)
475
+ assert_allclose(loc, 14.633459, rtol=5e-7)
476
+ assert_allclose(scale, 9.232736, rtol=5e-6)
477
+
478
+
479
+ def test_lognorm():
480
+ """
481
+ Ref: https://math.montana.edu/jobo/st528/documents/relc.pdf
482
+
483
+ The data is the locomotive control time to failure example that starts
484
+ on page 8. That's the 8th page in the PDF; the page number shown in
485
+ the text is 270).
486
+ The document includes SAS output for the data.
487
+ """
488
+ # These are the uncensored measurements. There are also 59 right-censored
489
+ # measurements where the lower bound is 135.
490
+ miles_to_fail = [22.5, 37.5, 46.0, 48.5, 51.5, 53.0, 54.5, 57.5, 66.5,
491
+ 68.0, 69.5, 76.5, 77.0, 78.5, 80.0, 81.5, 82.0, 83.0,
492
+ 84.0, 91.5, 93.5, 102.5, 107.0, 108.5, 112.5, 113.5,
493
+ 116.0, 117.0, 118.5, 119.0, 120.0, 122.5, 123.0, 127.5,
494
+ 131.0, 132.5, 134.0]
495
+
496
+ data = CensoredData.right_censored(miles_to_fail + [135]*59,
497
+ [0]*len(miles_to_fail) + [1]*59)
498
+ sigma, loc, scale = lognorm.fit(data, floc=0)
499
+
500
+ assert loc == 0
501
+ # Convert the lognorm parameters to the mu and sigma of the underlying
502
+ # normal distribution.
503
+ mu = np.log(scale)
504
+ # The expected results are from the 17th page of the PDF document
505
+ # (labeled page 279), in the SAS output on the right side of the page.
506
+ assert_allclose(mu, 5.1169, rtol=5e-4)
507
+ assert_allclose(sigma, 0.7055, rtol=5e-3)
508
+
509
+
510
+ def test_nct():
511
+ """
512
+ Test fitting the noncentral t distribution to censored data.
513
+
514
+ Calculation in R:
515
+
516
+ > library(fitdistrplus)
517
+ > data <- data.frame(left=c(1, 2, 3, 5, 8, 10, 25, 25),
518
+ + right=c(1, 2, 3, 5, 8, 10, NA, NA))
519
+ > result = fitdistcens(data, 't', control=list(reltol=1e-14),
520
+ + start=list(df=1, ncp=2))
521
+ > result
522
+ Fitting of the distribution ' t ' on censored data by maximum likelihood
523
+ Parameters:
524
+ estimate
525
+ df 0.5432336
526
+ ncp 2.8893565
527
+
528
+ """
529
+ data = CensoredData.right_censored([1, 2, 3, 5, 8, 10, 25, 25],
530
+ [0, 0, 0, 0, 0, 0, 1, 1])
531
+ # Fit just the shape parameter df and nc; loc and scale are fixed.
532
+ with np.errstate(over='ignore'): # remove context when gh-14901 is closed
533
+ df, nc, loc, scale = nct.fit(data, floc=0, fscale=1,
534
+ optimizer=optimizer)
535
+ assert_allclose(df, 0.5432336, rtol=5e-6)
536
+ assert_allclose(nc, 2.8893565, rtol=5e-6)
537
+ assert loc == 0
538
+ assert scale == 1
539
+
540
+
541
+ def test_ncx2():
542
+ """
543
+ Test fitting the shape parameters (df, ncp) of ncx2 to mixed data.
544
+
545
+ Calculation in R, with
546
+ * 5 not censored values [2.7, 0.2, 6.5, 0.4, 0.1],
547
+ * 1 interval-censored value [[0.6, 1.0]], and
548
+ * 2 right-censored values [8, 8].
549
+
550
+ > library(fitdistrplus)
551
+ > data <- data.frame(left=c(2.7, 0.2, 6.5, 0.4, 0.1, 0.6, 8, 8),
552
+ + right=c(2.7, 0.2, 6.5, 0.4, 0.1, 1.0, NA, NA))
553
+ > result = fitdistcens(data, 'chisq', control=list(reltol=1e-14),
554
+ + start=list(df=1, ncp=2))
555
+ > result
556
+ Fitting of the distribution ' chisq ' on censored data by maximum
557
+ likelihood
558
+ Parameters:
559
+ estimate
560
+ df 1.052871
561
+ ncp 2.362934
562
+ """
563
+ data = CensoredData(uncensored=[2.7, 0.2, 6.5, 0.4, 0.1], right=[8, 8],
564
+ interval=[[0.6, 1.0]])
565
+ with np.errstate(over='ignore'): # remove context when gh-14901 is closed
566
+ df, ncp, loc, scale = ncx2.fit(data, floc=0, fscale=1,
567
+ optimizer=optimizer)
568
+ assert_allclose(df, 1.052871, rtol=5e-6)
569
+ assert_allclose(ncp, 2.362934, rtol=5e-6)
570
+ assert loc == 0
571
+ assert scale == 1
572
+
573
+
574
+ def test_norm():
575
+ """
576
+ Test fitting the normal distribution to interval-censored data.
577
+
578
+ Calculation in R:
579
+
580
+ > library(fitdistrplus)
581
+ > data <- data.frame(left=c(0.10, 0.50, 0.75, 0.80),
582
+ + right=c(0.20, 0.55, 0.90, 0.95))
583
+ > result = fitdistcens(data, 'norm', control=list(reltol=1e-14))
584
+
585
+ > result
586
+ Fitting of the distribution ' norm ' on censored data by maximum likelihood
587
+ Parameters:
588
+ estimate
589
+ mean 0.5919990
590
+ sd 0.2868042
591
+ > result$sd
592
+ mean sd
593
+ 0.1444432 0.1029451
594
+ """
595
+ data = CensoredData(interval=[[0.10, 0.20],
596
+ [0.50, 0.55],
597
+ [0.75, 0.90],
598
+ [0.80, 0.95]])
599
+
600
+ loc, scale = norm.fit(data, optimizer=optimizer)
601
+
602
+ assert_allclose(loc, 0.5919990, rtol=5e-6)
603
+ assert_allclose(scale, 0.2868042, rtol=5e-6)
604
+
605
+
606
+ def test_weibull_censored1():
607
+ # Ref: http://www.ams.sunysb.edu/~zhu/ams588/Lecture_3_likelihood.pdf
608
+
609
+ # Survival times; '*' indicates right-censored.
610
+ s = "3,5,6*,8,10*,11*,15,20*,22,23,27*,29,32,35,40,26,28,33*,21,24*"
611
+
612
+ times, cens = zip(*[(float(t[0]), len(t) == 2)
613
+ for t in [w.split('*') for w in s.split(',')]])
614
+ data = CensoredData.right_censored(times, cens)
615
+
616
+ c, loc, scale = weibull_min.fit(data, floc=0)
617
+
618
+ # Expected values are from the reference.
619
+ assert_allclose(c, 2.149, rtol=1e-3)
620
+ assert loc == 0
621
+ assert_allclose(scale, 28.99, rtol=1e-3)
622
+
623
+ # Flip the sign of the data, and make the censored values
624
+ # left-censored. We should get the same parameters when we fit
625
+ # weibull_max to the flipped data.
626
+ data2 = CensoredData.left_censored(-np.array(times), cens)
627
+
628
+ c2, loc2, scale2 = weibull_max.fit(data2, floc=0)
629
+
630
+ assert_allclose(c2, 2.149, rtol=1e-3)
631
+ assert loc2 == 0
632
+ assert_allclose(scale2, 28.99, rtol=1e-3)
633
+
634
+
635
+ def test_weibull_min_sas1():
636
+ # Data and SAS results from
637
+ # https://support.sas.com/documentation/cdl/en/qcug/63922/HTML/default/
638
+ # viewer.htm#qcug_reliability_sect004.htm
639
+
640
+ text = """
641
+ 450 0 460 1 1150 0 1150 0 1560 1
642
+ 1600 0 1660 1 1850 1 1850 1 1850 1
643
+ 1850 1 1850 1 2030 1 2030 1 2030 1
644
+ 2070 0 2070 0 2080 0 2200 1 3000 1
645
+ 3000 1 3000 1 3000 1 3100 0 3200 1
646
+ 3450 0 3750 1 3750 1 4150 1 4150 1
647
+ 4150 1 4150 1 4300 1 4300 1 4300 1
648
+ 4300 1 4600 0 4850 1 4850 1 4850 1
649
+ 4850 1 5000 1 5000 1 5000 1 6100 1
650
+ 6100 0 6100 1 6100 1 6300 1 6450 1
651
+ 6450 1 6700 1 7450 1 7800 1 7800 1
652
+ 8100 1 8100 1 8200 1 8500 1 8500 1
653
+ 8500 1 8750 1 8750 0 8750 1 9400 1
654
+ 9900 1 10100 1 10100 1 10100 1 11500 1
655
+ """
656
+
657
+ life, cens = np.array([int(w) for w in text.split()]).reshape(-1, 2).T
658
+ life = life/1000.0
659
+
660
+ data = CensoredData.right_censored(life, cens)
661
+
662
+ c, loc, scale = weibull_min.fit(data, floc=0, optimizer=optimizer)
663
+ assert_allclose(c, 1.0584, rtol=1e-4)
664
+ assert_allclose(scale, 26.2968, rtol=1e-5)
665
+ assert loc == 0
666
+
667
+
668
+ def test_weibull_min_sas2():
669
+ # http://support.sas.com/documentation/cdl/en/ormpug/67517/HTML/default/
670
+ # viewer.htm#ormpug_nlpsolver_examples06.htm
671
+
672
+ # The last two values are right-censored.
673
+ days = np.array([143, 164, 188, 188, 190, 192, 206, 209, 213, 216, 220,
674
+ 227, 230, 234, 246, 265, 304, 216, 244])
675
+
676
+ data = CensoredData.right_censored(days, [0]*(len(days) - 2) + [1]*2)
677
+
678
+ c, loc, scale = weibull_min.fit(data, 1, loc=100, scale=100,
679
+ optimizer=optimizer)
680
+
681
+ assert_allclose(c, 2.7112, rtol=5e-4)
682
+ assert_allclose(loc, 122.03, rtol=5e-4)
683
+ assert_allclose(scale, 108.37, rtol=5e-4)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_crosstab.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import numpy as np
3
+ from numpy.testing import assert_array_equal, assert_equal
4
+ from scipy.stats.contingency import crosstab
5
+
6
+
7
+ @pytest.mark.parametrize('sparse', [False, True])
8
+ def test_crosstab_basic(sparse):
9
+ a = [0, 0, 9, 9, 0, 0, 9]
10
+ b = [2, 1, 3, 1, 2, 3, 3]
11
+ expected_avals = [0, 9]
12
+ expected_bvals = [1, 2, 3]
13
+ expected_count = np.array([[1, 2, 1],
14
+ [1, 0, 2]])
15
+ (avals, bvals), count = crosstab(a, b, sparse=sparse)
16
+ assert_array_equal(avals, expected_avals)
17
+ assert_array_equal(bvals, expected_bvals)
18
+ if sparse:
19
+ assert_array_equal(count.toarray(), expected_count)
20
+ else:
21
+ assert_array_equal(count, expected_count)
22
+
23
+
24
+ def test_crosstab_basic_1d():
25
+ # Verify that a single input sequence works as expected.
26
+ x = [1, 2, 3, 1, 2, 3, 3]
27
+ expected_xvals = [1, 2, 3]
28
+ expected_count = np.array([2, 2, 3])
29
+ (xvals,), count = crosstab(x)
30
+ assert_array_equal(xvals, expected_xvals)
31
+ assert_array_equal(count, expected_count)
32
+
33
+
34
+ def test_crosstab_basic_3d():
35
+ # Verify the function for three input sequences.
36
+ a = 'a'
37
+ b = 'b'
38
+ x = [0, 0, 9, 9, 0, 0, 9, 9]
39
+ y = [a, a, a, a, b, b, b, a]
40
+ z = [1, 2, 3, 1, 2, 3, 3, 1]
41
+ expected_xvals = [0, 9]
42
+ expected_yvals = [a, b]
43
+ expected_zvals = [1, 2, 3]
44
+ expected_count = np.array([[[1, 1, 0],
45
+ [0, 1, 1]],
46
+ [[2, 0, 1],
47
+ [0, 0, 1]]])
48
+ (xvals, yvals, zvals), count = crosstab(x, y, z)
49
+ assert_array_equal(xvals, expected_xvals)
50
+ assert_array_equal(yvals, expected_yvals)
51
+ assert_array_equal(zvals, expected_zvals)
52
+ assert_array_equal(count, expected_count)
53
+
54
+
55
+ @pytest.mark.parametrize('sparse', [False, True])
56
+ def test_crosstab_levels(sparse):
57
+ a = [0, 0, 9, 9, 0, 0, 9]
58
+ b = [1, 2, 3, 1, 2, 3, 3]
59
+ expected_avals = [0, 9]
60
+ expected_bvals = [0, 1, 2, 3]
61
+ expected_count = np.array([[0, 1, 2, 1],
62
+ [0, 1, 0, 2]])
63
+ (avals, bvals), count = crosstab(a, b, levels=[None, [0, 1, 2, 3]],
64
+ sparse=sparse)
65
+ assert_array_equal(avals, expected_avals)
66
+ assert_array_equal(bvals, expected_bvals)
67
+ if sparse:
68
+ assert_array_equal(count.toarray(), expected_count)
69
+ else:
70
+ assert_array_equal(count, expected_count)
71
+
72
+
73
+ @pytest.mark.parametrize('sparse', [False, True])
74
+ def test_crosstab_extra_levels(sparse):
75
+ # The pair of values (-1, 3) will be ignored, because we explicitly
76
+ # request the counted `a` values to be [0, 9].
77
+ a = [0, 0, 9, 9, 0, 0, 9, -1]
78
+ b = [1, 2, 3, 1, 2, 3, 3, 3]
79
+ expected_avals = [0, 9]
80
+ expected_bvals = [0, 1, 2, 3]
81
+ expected_count = np.array([[0, 1, 2, 1],
82
+ [0, 1, 0, 2]])
83
+ (avals, bvals), count = crosstab(a, b, levels=[[0, 9], [0, 1, 2, 3]],
84
+ sparse=sparse)
85
+ assert_array_equal(avals, expected_avals)
86
+ assert_array_equal(bvals, expected_bvals)
87
+ if sparse:
88
+ assert_array_equal(count.toarray(), expected_count)
89
+ else:
90
+ assert_array_equal(count, expected_count)
91
+
92
+
93
+ def test_validation_at_least_one():
94
+ with pytest.raises(TypeError, match='At least one'):
95
+ crosstab()
96
+
97
+
98
+ def test_validation_same_lengths():
99
+ with pytest.raises(ValueError, match='must have the same length'):
100
+ crosstab([1, 2], [1, 2, 3, 4])
101
+
102
+
103
+ def test_validation_sparse_only_two_args():
104
+ with pytest.raises(ValueError, match='only two input sequences'):
105
+ crosstab([0, 1, 1], [8, 8, 9], [1, 3, 3], sparse=True)
106
+
107
+
108
+ def test_validation_len_levels_matches_args():
109
+ with pytest.raises(ValueError, match='number of input sequences'):
110
+ crosstab([0, 1, 1], [8, 8, 9], levels=([0, 1, 2, 3],))
111
+
112
+
113
+ def test_result():
114
+ res = crosstab([0, 1], [1, 2])
115
+ assert_equal((res.elements, res.count), res)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_basic.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy.testing as npt
2
+ from numpy.testing import assert_allclose
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from scipy import stats
8
+ from .common_tests import (check_normalization, check_moment,
9
+ check_mean_expect,
10
+ check_var_expect, check_skew_expect,
11
+ check_kurt_expect, check_entropy,
12
+ check_private_entropy, check_edge_support,
13
+ check_named_args, check_random_state_property,
14
+ check_pickling, check_rvs_broadcast,
15
+ check_freezing,)
16
+ from scipy.stats._distr_params import distdiscrete, invdistdiscrete
17
+ from scipy.stats._distn_infrastructure import rv_discrete_frozen
18
+
19
+ vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4])
20
+ distdiscrete += [[stats.rv_discrete(values=vals), ()]]
21
+
22
+ # For these distributions, test_discrete_basic only runs with test mode full
23
+ distslow = {'zipfian', 'nhypergeom'}
24
+
25
+ # Override number of ULPs adjustment for `check_cdf_ppf`
26
+ roundtrip_cdf_ppf_exceptions = {'nbinom': 30}
27
+
28
+ def cases_test_discrete_basic():
29
+ seen = set()
30
+ for distname, arg in distdiscrete:
31
+ if distname in distslow:
32
+ yield pytest.param(distname, arg, distname, marks=pytest.mark.slow)
33
+ else:
34
+ yield distname, arg, distname not in seen
35
+ seen.add(distname)
36
+
37
+
38
+ @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic())
39
+ def test_discrete_basic(distname, arg, first_case):
40
+ try:
41
+ distfn = getattr(stats, distname)
42
+ except TypeError:
43
+ distfn = distname
44
+ distname = 'sample distribution'
45
+ np.random.seed(9765456)
46
+ rvs = distfn.rvs(size=2000, *arg)
47
+ supp = np.unique(rvs)
48
+ m, v = distfn.stats(*arg)
49
+ check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf')
50
+
51
+ check_pmf_cdf(distfn, arg, distname)
52
+ check_oth(distfn, arg, supp, distname + ' oth')
53
+ check_edge_support(distfn, arg)
54
+
55
+ alpha = 0.01
56
+ check_discrete_chisquare(distfn, arg, rvs, alpha,
57
+ distname + ' chisquare')
58
+
59
+ if first_case:
60
+ locscale_defaults = (0,)
61
+ meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf,
62
+ distfn.logsf]
63
+ # make sure arguments are within support
64
+ # for some distributions, this needs to be overridden
65
+ spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0,
66
+ 'nchypergeom_wallenius': 6}
67
+ k = spec_k.get(distname, 1)
68
+ check_named_args(distfn, k, arg, locscale_defaults, meths)
69
+ if distname != 'sample distribution':
70
+ check_scale_docstring(distfn)
71
+ check_random_state_property(distfn, arg)
72
+ check_pickling(distfn, arg)
73
+ check_freezing(distfn, arg)
74
+
75
+ # Entropy
76
+ check_entropy(distfn, arg, distname)
77
+ if distfn.__class__._entropy != stats.rv_discrete._entropy:
78
+ check_private_entropy(distfn, arg, stats.rv_discrete)
79
+
80
+
81
+ @pytest.mark.parametrize('distname,arg', distdiscrete)
82
+ def test_moments(distname, arg):
83
+ try:
84
+ distfn = getattr(stats, distname)
85
+ except TypeError:
86
+ distfn = distname
87
+ distname = 'sample distribution'
88
+ m, v, s, k = distfn.stats(*arg, moments='mvsk')
89
+ check_normalization(distfn, arg, distname)
90
+
91
+ # compare `stats` and `moment` methods
92
+ check_moment(distfn, arg, m, v, distname)
93
+ check_mean_expect(distfn, arg, m, distname)
94
+ check_var_expect(distfn, arg, m, v, distname)
95
+ check_skew_expect(distfn, arg, m, v, s, distname)
96
+ with np.testing.suppress_warnings() as sup:
97
+ if distname in ['zipf', 'betanbinom']:
98
+ sup.filter(RuntimeWarning)
99
+ check_kurt_expect(distfn, arg, m, v, k, distname)
100
+
101
+ # frozen distr moments
102
+ check_moment_frozen(distfn, arg, m, 1)
103
+ check_moment_frozen(distfn, arg, v+m*m, 2)
104
+
105
+
106
+ @pytest.mark.parametrize('dist,shape_args', distdiscrete)
107
+ def test_rvs_broadcast(dist, shape_args):
108
+ # If shape_only is True, it means the _rvs method of the
109
+ # distribution uses more than one random number to generate a random
110
+ # variate. That means the result of using rvs with broadcasting or
111
+ # with a nontrivial size will not necessarily be the same as using the
112
+ # numpy.vectorize'd version of rvs(), so we can only compare the shapes
113
+ # of the results, not the values.
114
+ # Whether or not a distribution is in the following list is an
115
+ # implementation detail of the distribution, not a requirement. If
116
+ # the implementation the rvs() method of a distribution changes, this
117
+ # test might also have to be changed.
118
+ shape_only = dist in ['betabinom', 'betanbinom', 'skellam', 'yulesimon',
119
+ 'dlaplace', 'nchypergeom_fisher',
120
+ 'nchypergeom_wallenius']
121
+
122
+ try:
123
+ distfunc = getattr(stats, dist)
124
+ except TypeError:
125
+ distfunc = dist
126
+ dist = f'rv_discrete(values=({dist.xk!r}, {dist.pk!r}))'
127
+ loc = np.zeros(2)
128
+ nargs = distfunc.numargs
129
+ allargs = []
130
+ bshape = []
131
+ # Generate shape parameter arguments...
132
+ for k in range(nargs):
133
+ shp = (k + 3,) + (1,)*(k + 1)
134
+ param_val = shape_args[k]
135
+ allargs.append(np.full(shp, param_val))
136
+ bshape.insert(0, shp[0])
137
+ allargs.append(loc)
138
+ bshape.append(loc.size)
139
+ # bshape holds the expected shape when loc, scale, and the shape
140
+ # parameters are all broadcast together.
141
+ check_rvs_broadcast(
142
+ distfunc, dist, allargs, bshape, shape_only, [np.dtype(int)]
143
+ )
144
+
145
+
146
+ @pytest.mark.parametrize('dist,args', distdiscrete)
147
+ def test_ppf_with_loc(dist, args):
148
+ try:
149
+ distfn = getattr(stats, dist)
150
+ except TypeError:
151
+ distfn = dist
152
+ #check with a negative, no and positive relocation.
153
+ np.random.seed(1942349)
154
+ re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)]
155
+ _a, _b = distfn.support(*args)
156
+ for loc in re_locs:
157
+ npt.assert_array_equal(
158
+ [_a-1+loc, _b+loc],
159
+ [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)]
160
+ )
161
+
162
+
163
+ @pytest.mark.parametrize('dist, args', distdiscrete)
164
+ def test_isf_with_loc(dist, args):
165
+ try:
166
+ distfn = getattr(stats, dist)
167
+ except TypeError:
168
+ distfn = dist
169
+ # check with a negative, no and positive relocation.
170
+ np.random.seed(1942349)
171
+ re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)]
172
+ _a, _b = distfn.support(*args)
173
+ for loc in re_locs:
174
+ expected = _b + loc, _a - 1 + loc
175
+ res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc)
176
+ npt.assert_array_equal(expected, res)
177
+ # test broadcasting behaviour
178
+ re_locs = [np.random.randint(-10, -1, size=(5, 3)),
179
+ np.zeros((5, 3)),
180
+ np.random.randint(1, 10, size=(5, 3))]
181
+ _a, _b = distfn.support(*args)
182
+ for loc in re_locs:
183
+ expected = _b + loc, _a - 1 + loc
184
+ res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc)
185
+ npt.assert_array_equal(expected, res)
186
+
187
+
188
+ def check_cdf_ppf(distfn, arg, supp, msg):
189
+ # supp is assumed to be an array of integers in the support of distfn
190
+ # (but not necessarily all the integers in the support).
191
+ # This test assumes that the PMF of any value in the support of the
192
+ # distribution is greater than 1e-8.
193
+
194
+ # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer}
195
+ cdf_supp = distfn.cdf(supp, *arg)
196
+ # In very rare cases, the finite precision calculation of ppf(cdf(supp))
197
+ # can produce an array in which an element is off by one. We nudge the
198
+ # CDF values down by a few ULPs help to avoid this.
199
+ n_ulps = roundtrip_cdf_ppf_exceptions.get(distfn.name, 15)
200
+ cdf_supp0 = cdf_supp - n_ulps*np.spacing(cdf_supp)
201
+ npt.assert_array_equal(distfn.ppf(cdf_supp0, *arg),
202
+ supp, msg + '-roundtrip')
203
+ # Repeat the same calculation, but with the CDF values decreased by 1e-8.
204
+ npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg),
205
+ supp, msg + '-roundtrip')
206
+
207
+ if not hasattr(distfn, 'xk'):
208
+ _a, _b = distfn.support(*arg)
209
+ supp1 = supp[supp < _b]
210
+ npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg),
211
+ supp1 + distfn.inc, msg + ' ppf-cdf-next')
212
+
213
+
214
+ def check_pmf_cdf(distfn, arg, distname):
215
+ if hasattr(distfn, 'xk'):
216
+ index = distfn.xk
217
+ else:
218
+ startind = int(distfn.ppf(0.01, *arg) - 1)
219
+ index = list(range(startind, startind + 10))
220
+ cdfs = distfn.cdf(index, *arg)
221
+ pmfs_cum = distfn.pmf(index, *arg).cumsum()
222
+
223
+ atol, rtol = 1e-10, 1e-10
224
+ if distname == 'skellam': # ncx2 accuracy
225
+ atol, rtol = 1e-5, 1e-5
226
+ npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0],
227
+ atol=atol, rtol=rtol)
228
+
229
+ # also check that pmf at non-integral k is zero
230
+ k = np.asarray(index)
231
+ k_shifted = k[:-1] + np.diff(k)/2
232
+ npt.assert_equal(distfn.pmf(k_shifted, *arg), 0)
233
+
234
+ # better check frozen distributions, and also when loc != 0
235
+ loc = 0.5
236
+ dist = distfn(loc=loc, *arg)
237
+ npt.assert_allclose(dist.pmf(k[1:] + loc), np.diff(dist.cdf(k + loc)))
238
+ npt.assert_equal(dist.pmf(k_shifted + loc), 0)
239
+
240
+
241
+ def check_moment_frozen(distfn, arg, m, k):
242
+ npt.assert_allclose(distfn(*arg).moment(k), m,
243
+ atol=1e-10, rtol=1e-10)
244
+
245
+
246
+ def check_oth(distfn, arg, supp, msg):
247
+ # checking other methods of distfn
248
+ npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg),
249
+ atol=1e-10, rtol=1e-10)
250
+
251
+ q = np.linspace(0.01, 0.99, 20)
252
+ npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg),
253
+ atol=1e-10, rtol=1e-10)
254
+
255
+ median_sf = distfn.isf(0.5, *arg)
256
+ npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5)
257
+ npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5)
258
+
259
+
260
+ def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
261
+ """Perform chisquare test for random sample of a discrete distribution
262
+
263
+ Parameters
264
+ ----------
265
+ distname : string
266
+ name of distribution function
267
+ arg : sequence
268
+ parameters of distribution
269
+ alpha : float
270
+ significance level, threshold for p-value
271
+
272
+ Returns
273
+ -------
274
+ result : bool
275
+ 0 if test passes, 1 if test fails
276
+
277
+ """
278
+ wsupp = 0.05
279
+
280
+ # construct intervals with minimum mass `wsupp`.
281
+ # intervals are left-half-open as in a cdf difference
282
+ _a, _b = distfn.support(*arg)
283
+ lo = int(max(_a, -1000))
284
+ high = int(min(_b, 1000)) + 1
285
+ distsupport = range(lo, high)
286
+ last = 0
287
+ distsupp = [lo]
288
+ distmass = []
289
+ for ii in distsupport:
290
+ current = distfn.cdf(ii, *arg)
291
+ if current - last >= wsupp - 1e-14:
292
+ distsupp.append(ii)
293
+ distmass.append(current - last)
294
+ last = current
295
+ if current > (1 - wsupp):
296
+ break
297
+ if distsupp[-1] < _b:
298
+ distsupp.append(_b)
299
+ distmass.append(1 - last)
300
+ distsupp = np.array(distsupp)
301
+ distmass = np.array(distmass)
302
+
303
+ # convert intervals to right-half-open as required by histogram
304
+ histsupp = distsupp + 1e-8
305
+ histsupp[0] = _a
306
+
307
+ # find sample frequencies and perform chisquare test
308
+ freq, hsupp = np.histogram(rvs, histsupp)
309
+ chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass)
310
+
311
+ npt.assert_(
312
+ pval > alpha,
313
+ f'chisquare - test for {msg} at arg = {str(arg)} with pval = {str(pval)}'
314
+ )
315
+
316
+
317
+ def check_scale_docstring(distfn):
318
+ if distfn.__doc__ is not None:
319
+ # Docstrings can be stripped if interpreter is run with -OO
320
+ npt.assert_('scale' not in distfn.__doc__)
321
+
322
+
323
+ @pytest.mark.parametrize('method', ['pmf', 'logpmf', 'cdf', 'logcdf',
324
+ 'sf', 'logsf', 'ppf', 'isf'])
325
+ @pytest.mark.parametrize('distname, args', distdiscrete)
326
+ def test_methods_with_lists(method, distname, args):
327
+ # Test that the discrete distributions can accept Python lists
328
+ # as arguments.
329
+ try:
330
+ dist = getattr(stats, distname)
331
+ except TypeError:
332
+ return
333
+ if method in ['ppf', 'isf']:
334
+ z = [0.1, 0.2]
335
+ else:
336
+ z = [0, 1]
337
+ p2 = [[p]*2 for p in args]
338
+ loc = [0, 1]
339
+ result = dist.pmf(z, *p2, loc=loc)
340
+ npt.assert_allclose(result,
341
+ [dist.pmf(*v) for v in zip(z, *p2, loc)],
342
+ rtol=1e-15, atol=1e-15)
343
+
344
+
345
+ @pytest.mark.parametrize('distname, args', invdistdiscrete)
346
+ def test_cdf_gh13280_regression(distname, args):
347
+ # Test for nan output when shape parameters are invalid
348
+ dist = getattr(stats, distname)
349
+ x = np.arange(-2, 15)
350
+ vals = dist.cdf(x, *args)
351
+ expected = np.nan
352
+ npt.assert_equal(vals, expected)
353
+
354
+
355
+ def cases_test_discrete_integer_shapes():
356
+ # distributions parameters that are only allowed to be integral when
357
+ # fitting, but are allowed to be real as input to PDF, etc.
358
+ integrality_exceptions = {'nbinom': {'n'}, 'betanbinom': {'n'}}
359
+
360
+ seen = set()
361
+ for distname, shapes in distdiscrete:
362
+ if distname in seen:
363
+ continue
364
+ seen.add(distname)
365
+
366
+ try:
367
+ dist = getattr(stats, distname)
368
+ except TypeError:
369
+ continue
370
+
371
+ shape_info = dist._shape_info()
372
+
373
+ for i, shape in enumerate(shape_info):
374
+ if (shape.name in integrality_exceptions.get(distname, set()) or
375
+ not shape.integrality):
376
+ continue
377
+
378
+ yield distname, shape.name, shapes
379
+
380
+
381
+ @pytest.mark.parametrize('distname, shapename, shapes',
382
+ cases_test_discrete_integer_shapes())
383
+ def test_integer_shapes(distname, shapename, shapes):
384
+ dist = getattr(stats, distname)
385
+ shape_info = dist._shape_info()
386
+ shape_names = [shape.name for shape in shape_info]
387
+ i = shape_names.index(shapename) # this element of params must be integral
388
+
389
+ shapes_copy = list(shapes)
390
+
391
+ valid_shape = shapes[i]
392
+ invalid_shape = valid_shape - 0.5 # arbitrary non-integral value
393
+ new_valid_shape = valid_shape - 1
394
+ shapes_copy[i] = [[valid_shape], [invalid_shape], [new_valid_shape]]
395
+
396
+ a, b = dist.support(*shapes)
397
+ x = np.round(np.linspace(a, b, 5))
398
+
399
+ pmf = dist.pmf(x, *shapes_copy)
400
+ assert not np.any(np.isnan(pmf[0, :]))
401
+ assert np.all(np.isnan(pmf[1, :]))
402
+ assert not np.any(np.isnan(pmf[2, :]))
403
+
404
+
405
+ def test_frozen_attributes():
406
+ # gh-14827 reported that all frozen distributions had both pmf and pdf
407
+ # attributes; continuous should have pdf and discrete should have pmf.
408
+ message = "'rv_discrete_frozen' object has no attribute"
409
+ with pytest.raises(AttributeError, match=message):
410
+ stats.binom(10, 0.5).pdf
411
+ with pytest.raises(AttributeError, match=message):
412
+ stats.binom(10, 0.5).logpdf
413
+ stats.binom.pdf = "herring"
414
+ frozen_binom = stats.binom(10, 0.5)
415
+ assert isinstance(frozen_binom, rv_discrete_frozen)
416
+ delattr(stats.binom, 'pdf')
417
+
418
+
419
+ @pytest.mark.parametrize('distname, shapes', distdiscrete)
420
+ def test_interval(distname, shapes):
421
+ # gh-11026 reported that `interval` returns incorrect values when
422
+ # `confidence=1`. The values were not incorrect, but it was not intuitive
423
+ # that the left end of the interval should extend beyond the support of the
424
+ # distribution. Confirm that this is the behavior for all distributions.
425
+ if isinstance(distname, str):
426
+ dist = getattr(stats, distname)
427
+ else:
428
+ dist = distname
429
+ a, b = dist.support(*shapes)
430
+ npt.assert_equal(dist.ppf([0, 1], *shapes), (a-1, b))
431
+ npt.assert_equal(dist.isf([1, 0], *shapes), (a-1, b))
432
+ npt.assert_equal(dist.interval(1, *shapes), (a-1, b))
433
+
434
+
435
+ @pytest.mark.xfail_on_32bit("Sensible to machine precision")
436
+ def test_rv_sample():
437
+ # Thoroughly test rv_sample and check that gh-3758 is resolved
438
+
439
+ # Generate a random discrete distribution
440
+ rng = np.random.default_rng(98430143469)
441
+ xk = np.sort(rng.random(10) * 10)
442
+ pk = rng.random(10)
443
+ pk /= np.sum(pk)
444
+ dist = stats.rv_discrete(values=(xk, pk))
445
+
446
+ # Generate points to the left and right of xk
447
+ xk_left = (np.array([0] + xk[:-1].tolist()) + xk)/2
448
+ xk_right = (np.array(xk[1:].tolist() + [xk[-1]+1]) + xk)/2
449
+
450
+ # Generate points to the left and right of cdf
451
+ cdf2 = np.cumsum(pk)
452
+ cdf2_left = (np.array([0] + cdf2[:-1].tolist()) + cdf2)/2
453
+ cdf2_right = (np.array(cdf2[1:].tolist() + [1]) + cdf2)/2
454
+
455
+ # support - leftmost and rightmost xk
456
+ a, b = dist.support()
457
+ assert_allclose(a, xk[0])
458
+ assert_allclose(b, xk[-1])
459
+
460
+ # pmf - supported only on the xk
461
+ assert_allclose(dist.pmf(xk), pk)
462
+ assert_allclose(dist.pmf(xk_right), 0)
463
+ assert_allclose(dist.pmf(xk_left), 0)
464
+
465
+ # logpmf is log of the pmf; log(0) = -np.inf
466
+ with np.errstate(divide='ignore'):
467
+ assert_allclose(dist.logpmf(xk), np.log(pk))
468
+ assert_allclose(dist.logpmf(xk_right), -np.inf)
469
+ assert_allclose(dist.logpmf(xk_left), -np.inf)
470
+
471
+ # cdf - the cumulative sum of the pmf
472
+ assert_allclose(dist.cdf(xk), cdf2)
473
+ assert_allclose(dist.cdf(xk_right), cdf2)
474
+ assert_allclose(dist.cdf(xk_left), [0]+cdf2[:-1].tolist())
475
+
476
+ with np.errstate(divide='ignore'):
477
+ assert_allclose(dist.logcdf(xk), np.log(dist.cdf(xk)),
478
+ atol=1e-15)
479
+ assert_allclose(dist.logcdf(xk_right), np.log(dist.cdf(xk_right)),
480
+ atol=1e-15)
481
+ assert_allclose(dist.logcdf(xk_left), np.log(dist.cdf(xk_left)),
482
+ atol=1e-15)
483
+
484
+ # sf is 1-cdf
485
+ assert_allclose(dist.sf(xk), 1-dist.cdf(xk))
486
+ assert_allclose(dist.sf(xk_right), 1-dist.cdf(xk_right))
487
+ assert_allclose(dist.sf(xk_left), 1-dist.cdf(xk_left))
488
+
489
+ with np.errstate(divide='ignore'):
490
+ assert_allclose(dist.logsf(xk), np.log(dist.sf(xk)),
491
+ atol=1e-15)
492
+ assert_allclose(dist.logsf(xk_right), np.log(dist.sf(xk_right)),
493
+ atol=1e-15)
494
+ assert_allclose(dist.logsf(xk_left), np.log(dist.sf(xk_left)),
495
+ atol=1e-15)
496
+
497
+ # ppf
498
+ assert_allclose(dist.ppf(cdf2), xk)
499
+ assert_allclose(dist.ppf(cdf2_left), xk)
500
+ assert_allclose(dist.ppf(cdf2_right)[:-1], xk[1:])
501
+ assert_allclose(dist.ppf(0), a - 1)
502
+ assert_allclose(dist.ppf(1), b)
503
+
504
+ # isf
505
+ sf2 = dist.sf(xk)
506
+ assert_allclose(dist.isf(sf2), xk)
507
+ assert_allclose(dist.isf(1-cdf2_left), dist.ppf(cdf2_left))
508
+ assert_allclose(dist.isf(1-cdf2_right), dist.ppf(cdf2_right))
509
+ assert_allclose(dist.isf(0), b)
510
+ assert_allclose(dist.isf(1), a - 1)
511
+
512
+ # interval is (ppf(alpha/2), isf(alpha/2))
513
+ ps = np.linspace(0.01, 0.99, 10)
514
+ int2 = dist.ppf(ps/2), dist.isf(ps/2)
515
+ assert_allclose(dist.interval(1-ps), int2)
516
+ assert_allclose(dist.interval(0), dist.median())
517
+ assert_allclose(dist.interval(1), (a-1, b))
518
+
519
+ # median is simply ppf(0.5)
520
+ med2 = dist.ppf(0.5)
521
+ assert_allclose(dist.median(), med2)
522
+
523
+ # all four stats (mean, var, skew, and kurtosis) from the definitions
524
+ mean2 = np.sum(xk*pk)
525
+ var2 = np.sum((xk - mean2)**2 * pk)
526
+ skew2 = np.sum((xk - mean2)**3 * pk) / var2**(3/2)
527
+ kurt2 = np.sum((xk - mean2)**4 * pk) / var2**2 - 3
528
+ assert_allclose(dist.mean(), mean2)
529
+ assert_allclose(dist.std(), np.sqrt(var2))
530
+ assert_allclose(dist.var(), var2)
531
+ assert_allclose(dist.stats(moments='mvsk'), (mean2, var2, skew2, kurt2))
532
+
533
+ # noncentral moment against definition
534
+ mom3 = np.sum((xk**3) * pk)
535
+ assert_allclose(dist.moment(3), mom3)
536
+
537
+ # expect - check against moments
538
+ assert_allclose(dist.expect(lambda x: 1), 1)
539
+ assert_allclose(dist.expect(), mean2)
540
+ assert_allclose(dist.expect(lambda x: x**3), mom3)
541
+
542
+ # entropy is the negative of the expected value of log(p)
543
+ with np.errstate(divide='ignore'):
544
+ assert_allclose(-dist.expect(lambda x: dist.logpmf(x)), dist.entropy())
545
+
546
+ # RVS is just ppf of uniform random variates
547
+ rng = np.random.default_rng(98430143469)
548
+ rvs = dist.rvs(size=100, random_state=rng)
549
+ rng = np.random.default_rng(98430143469)
550
+ rvs0 = dist.ppf(rng.random(size=100))
551
+ assert_allclose(rvs, rvs0)
552
+
553
+ def test__pmf_float_input():
554
+ # gh-21272
555
+ # test that `rvs()` can be computed when `_pmf` requires float input
556
+
557
+ class rv_exponential(stats.rv_discrete):
558
+ def _pmf(self, i):
559
+ return (2/3)*3**(1 - i)
560
+
561
+ rv = rv_exponential(a=0.0, b=float('inf'))
562
+ rvs = rv.rvs(random_state=42) # should not crash due to integer input to `_pmf`
563
+ assert_allclose(rvs, 0)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_distns.py ADDED
@@ -0,0 +1,648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import itertools
3
+
4
+ from scipy.stats import (betabinom, betanbinom, hypergeom, nhypergeom,
5
+ bernoulli, boltzmann, skellam, zipf, zipfian, binom,
6
+ nbinom, nchypergeom_fisher, nchypergeom_wallenius,
7
+ randint)
8
+
9
+ import numpy as np
10
+ from numpy.testing import (
11
+ assert_almost_equal, assert_equal, assert_allclose, suppress_warnings
12
+ )
13
+ from scipy.special import binom as special_binom
14
+ from scipy.optimize import root_scalar
15
+ from scipy.integrate import quad
16
+
17
+
18
+ # The expected values were computed with Wolfram Alpha, using
19
+ # the expression CDF[HypergeometricDistribution[N, n, M], k].
20
+ @pytest.mark.parametrize('k, M, n, N, expected, rtol',
21
+ [(3, 10, 4, 5,
22
+ 0.9761904761904762, 1e-15),
23
+ (107, 10000, 3000, 215,
24
+ 0.9999999997226765, 1e-15),
25
+ (10, 10000, 3000, 215,
26
+ 2.681682217692179e-21, 5e-11)])
27
+ def test_hypergeom_cdf(k, M, n, N, expected, rtol):
28
+ p = hypergeom.cdf(k, M, n, N)
29
+ assert_allclose(p, expected, rtol=rtol)
30
+
31
+
32
+ # The expected values were computed with Wolfram Alpha, using
33
+ # the expression SurvivalFunction[HypergeometricDistribution[N, n, M], k].
34
+ @pytest.mark.parametrize('k, M, n, N, expected, rtol',
35
+ [(25, 10000, 3000, 215,
36
+ 0.9999999999052958, 1e-15),
37
+ (125, 10000, 3000, 215,
38
+ 1.4416781705752128e-18, 5e-11)])
39
+ def test_hypergeom_sf(k, M, n, N, expected, rtol):
40
+ p = hypergeom.sf(k, M, n, N)
41
+ assert_allclose(p, expected, rtol=rtol)
42
+
43
+
44
+ def test_hypergeom_logpmf():
45
+ # symmetries test
46
+ # f(k,N,K,n) = f(n-k,N,N-K,n) = f(K-k,N,K,N-n) = f(k,N,n,K)
47
+ k = 5
48
+ N = 50
49
+ K = 10
50
+ n = 5
51
+ logpmf1 = hypergeom.logpmf(k, N, K, n)
52
+ logpmf2 = hypergeom.logpmf(n - k, N, N - K, n)
53
+ logpmf3 = hypergeom.logpmf(K - k, N, K, N - n)
54
+ logpmf4 = hypergeom.logpmf(k, N, n, K)
55
+ assert_almost_equal(logpmf1, logpmf2, decimal=12)
56
+ assert_almost_equal(logpmf1, logpmf3, decimal=12)
57
+ assert_almost_equal(logpmf1, logpmf4, decimal=12)
58
+
59
+ # test related distribution
60
+ # Bernoulli distribution if n = 1
61
+ k = 1
62
+ N = 10
63
+ K = 7
64
+ n = 1
65
+ hypergeom_logpmf = hypergeom.logpmf(k, N, K, n)
66
+ bernoulli_logpmf = bernoulli.logpmf(k, K/N)
67
+ assert_almost_equal(hypergeom_logpmf, bernoulli_logpmf, decimal=12)
68
+
69
+
70
+ def test_nhypergeom_pmf():
71
+ # test with hypergeom
72
+ M, n, r = 45, 13, 8
73
+ k = 6
74
+ NHG = nhypergeom.pmf(k, M, n, r)
75
+ HG = hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1))
76
+ assert_allclose(HG, NHG, rtol=1e-10)
77
+
78
+
79
+ def test_nhypergeom_pmfcdf():
80
+ # test pmf and cdf with arbitrary values.
81
+ M = 8
82
+ n = 3
83
+ r = 4
84
+ support = np.arange(n+1)
85
+ pmf = nhypergeom.pmf(support, M, n, r)
86
+ cdf = nhypergeom.cdf(support, M, n, r)
87
+ assert_allclose(pmf, [1/14, 3/14, 5/14, 5/14], rtol=1e-13)
88
+ assert_allclose(cdf, [1/14, 4/14, 9/14, 1.0], rtol=1e-13)
89
+
90
+
91
+ def test_nhypergeom_r0():
92
+ # test with `r = 0`.
93
+ M = 10
94
+ n = 3
95
+ r = 0
96
+ pmf = nhypergeom.pmf([[0, 1, 2, 0], [1, 2, 0, 3]], M, n, r)
97
+ assert_allclose(pmf, [[1, 0, 0, 1], [0, 0, 1, 0]], rtol=1e-13)
98
+
99
+
100
+ def test_nhypergeom_rvs_shape():
101
+ # Check that when given a size with more dimensions than the
102
+ # dimensions of the broadcast parameters, rvs returns an array
103
+ # with the correct shape.
104
+ x = nhypergeom.rvs(22, [7, 8, 9], [[12], [13]], size=(5, 1, 2, 3))
105
+ assert x.shape == (5, 1, 2, 3)
106
+
107
+
108
+ def test_nhypergeom_accuracy():
109
+ # Check that nhypergeom.rvs post-gh-13431 gives the same values as
110
+ # inverse transform sampling
111
+ np.random.seed(0)
112
+ x = nhypergeom.rvs(22, 7, 11, size=100)
113
+ np.random.seed(0)
114
+ p = np.random.uniform(size=100)
115
+ y = nhypergeom.ppf(p, 22, 7, 11)
116
+ assert_equal(x, y)
117
+
118
+
119
+ def test_boltzmann_upper_bound():
120
+ k = np.arange(-3, 5)
121
+
122
+ N = 1
123
+ p = boltzmann.pmf(k, 0.123, N)
124
+ expected = k == 0
125
+ assert_equal(p, expected)
126
+
127
+ lam = np.log(2)
128
+ N = 3
129
+ p = boltzmann.pmf(k, lam, N)
130
+ expected = [0, 0, 0, 4/7, 2/7, 1/7, 0, 0]
131
+ assert_allclose(p, expected, rtol=1e-13)
132
+
133
+ c = boltzmann.cdf(k, lam, N)
134
+ expected = [0, 0, 0, 4/7, 6/7, 1, 1, 1]
135
+ assert_allclose(c, expected, rtol=1e-13)
136
+
137
+
138
+ def test_betabinom_a_and_b_unity():
139
+ # test limiting case that betabinom(n, 1, 1) is a discrete uniform
140
+ # distribution from 0 to n
141
+ n = 20
142
+ k = np.arange(n + 1)
143
+ p = betabinom(n, 1, 1).pmf(k)
144
+ expected = np.repeat(1 / (n + 1), n + 1)
145
+ assert_almost_equal(p, expected)
146
+
147
+
148
+ @pytest.mark.parametrize('dtypes', itertools.product(*[(int, float)]*3))
149
+ def test_betabinom_stats_a_and_b_integers_gh18026(dtypes):
150
+ # gh-18026 reported that `betabinom` kurtosis calculation fails when some
151
+ # parameters are integers. Check that this is resolved.
152
+ n_type, a_type, b_type = dtypes
153
+ n, a, b = n_type(10), a_type(2), b_type(3)
154
+ assert_allclose(betabinom.stats(n, a, b, moments='k'), -0.6904761904761907)
155
+
156
+
157
+ def test_betabinom_bernoulli():
158
+ # test limiting case that betabinom(1, a, b) = bernoulli(a / (a + b))
159
+ a = 2.3
160
+ b = 0.63
161
+ k = np.arange(2)
162
+ p = betabinom(1, a, b).pmf(k)
163
+ expected = bernoulli(a / (a + b)).pmf(k)
164
+ assert_almost_equal(p, expected)
165
+
166
+
167
+ def test_issue_10317():
168
+ alpha, n, p = 0.9, 10, 1
169
+ assert_equal(nbinom.interval(confidence=alpha, n=n, p=p), (0, 0))
170
+
171
+
172
+ def test_issue_11134():
173
+ alpha, n, p = 0.95, 10, 0
174
+ assert_equal(binom.interval(confidence=alpha, n=n, p=p), (0, 0))
175
+
176
+
177
+ def test_issue_7406():
178
+ np.random.seed(0)
179
+ assert_equal(binom.ppf(np.random.rand(10), 0, 0.5), 0)
180
+
181
+ # Also check that endpoints (q=0, q=1) are correct
182
+ assert_equal(binom.ppf(0, 0, 0.5), -1)
183
+ assert_equal(binom.ppf(1, 0, 0.5), 0)
184
+
185
+
186
+ def test_issue_5122():
187
+ p = 0
188
+ n = np.random.randint(100, size=10)
189
+
190
+ x = 0
191
+ ppf = binom.ppf(x, n, p)
192
+ assert_equal(ppf, -1)
193
+
194
+ x = np.linspace(0.01, 0.99, 10)
195
+ ppf = binom.ppf(x, n, p)
196
+ assert_equal(ppf, 0)
197
+
198
+ x = 1
199
+ ppf = binom.ppf(x, n, p)
200
+ assert_equal(ppf, n)
201
+
202
+
203
+ def test_issue_1603():
204
+ assert_equal(binom(1000, np.logspace(-3, -100)).ppf(0.01), 0)
205
+
206
+
207
+ def test_issue_5503():
208
+ p = 0.5
209
+ x = np.logspace(3, 14, 12)
210
+ assert_allclose(binom.cdf(x, 2*x, p), 0.5, atol=1e-2)
211
+
212
+
213
+ @pytest.mark.parametrize('x, n, p, cdf_desired', [
214
+ (300, 1000, 3/10, 0.51559351981411995636),
215
+ (3000, 10000, 3/10, 0.50493298381929698016),
216
+ (30000, 100000, 3/10, 0.50156000591726422864),
217
+ (300000, 1000000, 3/10, 0.50049331906666960038),
218
+ (3000000, 10000000, 3/10, 0.50015600124585261196),
219
+ (30000000, 100000000, 3/10, 0.50004933192735230102),
220
+ (30010000, 100000000, 3/10, 0.98545384016570790717),
221
+ (29990000, 100000000, 3/10, 0.01455017177985268670),
222
+ (29950000, 100000000, 3/10, 5.02250963487432024943e-28),
223
+ ])
224
+ def test_issue_5503pt2(x, n, p, cdf_desired):
225
+ assert_allclose(binom.cdf(x, n, p), cdf_desired)
226
+
227
+
228
+ def test_issue_5503pt3():
229
+ # From Wolfram Alpha: CDF[BinomialDistribution[1e12, 1e-12], 2]
230
+ assert_allclose(binom.cdf(2, 10**12, 10**-12), 0.91969860292869777384)
231
+
232
+
233
+ def test_issue_6682():
234
+ # Reference value from R:
235
+ # options(digits=16)
236
+ # print(pnbinom(250, 50, 32/63, lower.tail=FALSE))
237
+ assert_allclose(nbinom.sf(250, 50, 32./63.), 1.460458510976452e-35)
238
+
239
+
240
+ def test_issue_19747():
241
+ # test that negative k does not raise an error in nbinom.logcdf
242
+ result = nbinom.logcdf([5, -1, 1], 5, 0.5)
243
+ reference = [-0.47313352, -np.inf, -2.21297293]
244
+ assert_allclose(result, reference)
245
+
246
+
247
+ def test_boost_divide_by_zero_issue_15101():
248
+ n = 1000
249
+ p = 0.01
250
+ k = 996
251
+ assert_allclose(binom.pmf(k, n, p), 0.0)
252
+
253
+
254
+ def test_skellam_gh11474():
255
+ # test issue reported in gh-11474 caused by `cdfchn`
256
+ mu = [1, 10, 100, 1000, 5000, 5050, 5100, 5250, 6000]
257
+ cdf = skellam.cdf(0, mu, mu)
258
+ # generated in R
259
+ # library(skellam)
260
+ # options(digits = 16)
261
+ # mu = c(1, 10, 100, 1000, 5000, 5050, 5100, 5250, 6000)
262
+ # pskellam(0, mu, mu, TRUE)
263
+ cdf_expected = [0.6542541612768356, 0.5448901559424127, 0.5141135799745580,
264
+ 0.5044605891382528, 0.5019947363350450, 0.5019848365953181,
265
+ 0.5019750827993392, 0.5019466621805060, 0.5018209330219539]
266
+ assert_allclose(cdf, cdf_expected)
267
+
268
+
269
+ class TestZipfian:
270
+ def test_zipfian_asymptotic(self):
271
+ # test limiting case that zipfian(a, n) -> zipf(a) as n-> oo
272
+ a = 6.5
273
+ N = 10000000
274
+ k = np.arange(1, 21)
275
+ assert_allclose(zipfian.pmf(k, a, N), zipf.pmf(k, a))
276
+ assert_allclose(zipfian.cdf(k, a, N), zipf.cdf(k, a))
277
+ assert_allclose(zipfian.sf(k, a, N), zipf.sf(k, a))
278
+ assert_allclose(zipfian.stats(a, N, moments='msvk'),
279
+ zipf.stats(a, moments='msvk'))
280
+
281
+ def test_zipfian_continuity(self):
282
+ # test that zipfian(0.999999, n) ~ zipfian(1.000001, n)
283
+ # (a = 1 switches between methods of calculating harmonic sum)
284
+ alt1, agt1 = 0.99999999, 1.00000001
285
+ N = 30
286
+ k = np.arange(1, N + 1)
287
+ assert_allclose(zipfian.pmf(k, alt1, N), zipfian.pmf(k, agt1, N),
288
+ rtol=5e-7)
289
+ assert_allclose(zipfian.cdf(k, alt1, N), zipfian.cdf(k, agt1, N),
290
+ rtol=5e-7)
291
+ assert_allclose(zipfian.sf(k, alt1, N), zipfian.sf(k, agt1, N),
292
+ rtol=5e-7)
293
+ assert_allclose(zipfian.stats(alt1, N, moments='msvk'),
294
+ zipfian.stats(agt1, N, moments='msvk'), rtol=5e-7)
295
+
296
+ def test_zipfian_R(self):
297
+ # test against R VGAM package
298
+ # library(VGAM)
299
+ # k <- c(13, 16, 1, 4, 4, 8, 10, 19, 5, 7)
300
+ # a <- c(1.56712977, 3.72656295, 5.77665117, 9.12168729, 5.79977172,
301
+ # 4.92784796, 9.36078764, 4.3739616 , 7.48171872, 4.6824154)
302
+ # n <- c(70, 80, 48, 65, 83, 89, 50, 30, 20, 20)
303
+ # pmf <- dzipf(k, N = n, shape = a)
304
+ # cdf <- pzipf(k, N = n, shape = a)
305
+ # print(pmf)
306
+ # print(cdf)
307
+ np.random.seed(0)
308
+ k = np.random.randint(1, 20, size=10)
309
+ a = np.random.rand(10)*10 + 1
310
+ n = np.random.randint(1, 100, size=10)
311
+ pmf = [8.076972e-03, 2.950214e-05, 9.799333e-01, 3.216601e-06,
312
+ 3.158895e-04, 3.412497e-05, 4.350472e-10, 2.405773e-06,
313
+ 5.860662e-06, 1.053948e-04]
314
+ cdf = [0.8964133, 0.9998666, 0.9799333, 0.9999995, 0.9998584,
315
+ 0.9999458, 1.0000000, 0.9999920, 0.9999977, 0.9998498]
316
+ # skip the first point; zipUC is not accurate for low a, n
317
+ assert_allclose(zipfian.pmf(k, a, n)[1:], pmf[1:], rtol=1e-6)
318
+ assert_allclose(zipfian.cdf(k, a, n)[1:], cdf[1:], rtol=5e-5)
319
+
320
+ np.random.seed(0)
321
+ naive_tests = np.vstack((np.logspace(-2, 1, 10),
322
+ np.random.randint(2, 40, 10))).T
323
+
324
+ @pytest.mark.parametrize("a, n", naive_tests)
325
+ def test_zipfian_naive(self, a, n):
326
+ # test against bare-bones implementation
327
+
328
+ @np.vectorize
329
+ def Hns(n, s):
330
+ """Naive implementation of harmonic sum"""
331
+ return (1/np.arange(1, n+1)**s).sum()
332
+
333
+ @np.vectorize
334
+ def pzip(k, a, n):
335
+ """Naive implementation of zipfian pmf"""
336
+ if k < 1 or k > n:
337
+ return 0.
338
+ else:
339
+ return 1 / k**a / Hns(n, a)
340
+
341
+ k = np.arange(n+1)
342
+ pmf = pzip(k, a, n)
343
+ cdf = np.cumsum(pmf)
344
+ mean = np.average(k, weights=pmf)
345
+ var = np.average((k - mean)**2, weights=pmf)
346
+ std = var**0.5
347
+ skew = np.average(((k-mean)/std)**3, weights=pmf)
348
+ kurtosis = np.average(((k-mean)/std)**4, weights=pmf) - 3
349
+ assert_allclose(zipfian.pmf(k, a, n), pmf)
350
+ assert_allclose(zipfian.cdf(k, a, n), cdf)
351
+ assert_allclose(zipfian.stats(a, n, moments="mvsk"),
352
+ [mean, var, skew, kurtosis])
353
+
354
+ def test_pmf_integer_k(self):
355
+ k = np.arange(0, 1000)
356
+ k_int32 = k.astype(np.int32)
357
+ dist = zipfian(111, 22)
358
+ pmf = dist.pmf(k)
359
+ pmf_k_int32 = dist.pmf(k_int32)
360
+ assert_equal(pmf, pmf_k_int32)
361
+
362
+
363
+ class TestNCH:
364
+ np.random.seed(2) # seeds 0 and 1 had some xl = xu; randint failed
365
+ shape = (2, 4, 3)
366
+ max_m = 100
367
+ m1 = np.random.randint(1, max_m, size=shape) # red balls
368
+ m2 = np.random.randint(1, max_m, size=shape) # white balls
369
+ N = m1 + m2 # total balls
370
+ n = randint.rvs(0, N, size=N.shape) # number of draws
371
+ xl = np.maximum(0, n-m2) # lower bound of support
372
+ xu = np.minimum(n, m1) # upper bound of support
373
+ x = randint.rvs(xl, xu, size=xl.shape)
374
+ odds = np.random.rand(*x.shape)*2
375
+
376
+ # test output is more readable when function names (strings) are passed
377
+ @pytest.mark.parametrize('dist_name',
378
+ ['nchypergeom_fisher', 'nchypergeom_wallenius'])
379
+ def test_nch_hypergeom(self, dist_name):
380
+ # Both noncentral hypergeometric distributions reduce to the
381
+ # hypergeometric distribution when odds = 1
382
+ dists = {'nchypergeom_fisher': nchypergeom_fisher,
383
+ 'nchypergeom_wallenius': nchypergeom_wallenius}
384
+ dist = dists[dist_name]
385
+ x, N, m1, n = self.x, self.N, self.m1, self.n
386
+ assert_allclose(dist.pmf(x, N, m1, n, odds=1),
387
+ hypergeom.pmf(x, N, m1, n))
388
+
389
+ def test_nchypergeom_fisher_naive(self):
390
+ # test against a very simple implementation
391
+ x, N, m1, n, odds = self.x, self.N, self.m1, self.n, self.odds
392
+
393
+ @np.vectorize
394
+ def pmf_mean_var(x, N, m1, n, w):
395
+ # simple implementation of nchypergeom_fisher pmf
396
+ m2 = N - m1
397
+ xl = np.maximum(0, n-m2)
398
+ xu = np.minimum(n, m1)
399
+
400
+ def f(x):
401
+ t1 = special_binom(m1, x)
402
+ t2 = special_binom(m2, n - x)
403
+ return t1 * t2 * w**x
404
+
405
+ def P(k):
406
+ return sum(f(y)*y**k for y in range(xl, xu + 1))
407
+
408
+ P0 = P(0)
409
+ P1 = P(1)
410
+ P2 = P(2)
411
+ pmf = f(x) / P0
412
+ mean = P1 / P0
413
+ var = P2 / P0 - (P1 / P0)**2
414
+ return pmf, mean, var
415
+
416
+ pmf, mean, var = pmf_mean_var(x, N, m1, n, odds)
417
+ assert_allclose(nchypergeom_fisher.pmf(x, N, m1, n, odds), pmf)
418
+ assert_allclose(nchypergeom_fisher.stats(N, m1, n, odds, moments='m'),
419
+ mean)
420
+ assert_allclose(nchypergeom_fisher.stats(N, m1, n, odds, moments='v'),
421
+ var)
422
+
423
+ def test_nchypergeom_wallenius_naive(self):
424
+ # test against a very simple implementation
425
+
426
+ np.random.seed(2)
427
+ shape = (2, 4, 3)
428
+ max_m = 100
429
+ m1 = np.random.randint(1, max_m, size=shape)
430
+ m2 = np.random.randint(1, max_m, size=shape)
431
+ N = m1 + m2
432
+ n = randint.rvs(0, N, size=N.shape)
433
+ xl = np.maximum(0, n-m2)
434
+ xu = np.minimum(n, m1)
435
+ x = randint.rvs(xl, xu, size=xl.shape)
436
+ w = np.random.rand(*x.shape)*2
437
+
438
+ def support(N, m1, n, w):
439
+ m2 = N - m1
440
+ xl = np.maximum(0, n-m2)
441
+ xu = np.minimum(n, m1)
442
+ return xl, xu
443
+
444
+ @np.vectorize
445
+ def mean(N, m1, n, w):
446
+ m2 = N - m1
447
+ xl, xu = support(N, m1, n, w)
448
+
449
+ def fun(u):
450
+ return u/m1 + (1 - (n-u)/m2)**w - 1
451
+
452
+ return root_scalar(fun, bracket=(xl, xu)).root
453
+
454
+ with suppress_warnings() as sup:
455
+ sup.filter(RuntimeWarning,
456
+ message="invalid value encountered in mean")
457
+ assert_allclose(nchypergeom_wallenius.mean(N, m1, n, w),
458
+ mean(N, m1, n, w), rtol=2e-2)
459
+
460
+ @np.vectorize
461
+ def variance(N, m1, n, w):
462
+ m2 = N - m1
463
+ u = mean(N, m1, n, w)
464
+ a = u * (m1 - u)
465
+ b = (n-u)*(u + m2 - n)
466
+ return N*a*b / ((N-1) * (m1*b + m2*a))
467
+
468
+ with suppress_warnings() as sup:
469
+ sup.filter(RuntimeWarning,
470
+ message="invalid value encountered in mean")
471
+ assert_allclose(
472
+ nchypergeom_wallenius.stats(N, m1, n, w, moments='v'),
473
+ variance(N, m1, n, w),
474
+ rtol=5e-2
475
+ )
476
+
477
+ @np.vectorize
478
+ def pmf(x, N, m1, n, w):
479
+ m2 = N - m1
480
+ xl, xu = support(N, m1, n, w)
481
+
482
+ def integrand(t):
483
+ D = w*(m1 - x) + (m2 - (n-x))
484
+ res = (1-t**(w/D))**x * (1-t**(1/D))**(n-x)
485
+ return res
486
+
487
+ def f(x):
488
+ t1 = special_binom(m1, x)
489
+ t2 = special_binom(m2, n - x)
490
+ the_integral = quad(integrand, 0, 1,
491
+ epsrel=1e-16, epsabs=1e-16)
492
+ return t1 * t2 * the_integral[0]
493
+
494
+ return f(x)
495
+
496
+ pmf0 = pmf(x, N, m1, n, w)
497
+ pmf1 = nchypergeom_wallenius.pmf(x, N, m1, n, w)
498
+
499
+ atol, rtol = 1e-6, 1e-6
500
+ i = np.abs(pmf1 - pmf0) < atol + rtol*np.abs(pmf0)
501
+ assert i.sum() > np.prod(shape) / 2 # works at least half the time
502
+
503
+ # for those that fail, discredit the naive implementation
504
+ for N, m1, n, w in zip(N[~i], m1[~i], n[~i], w[~i]):
505
+ # get the support
506
+ m2 = N - m1
507
+ xl, xu = support(N, m1, n, w)
508
+ x = np.arange(xl, xu + 1)
509
+
510
+ # calculate sum of pmf over the support
511
+ # the naive implementation is very wrong in these cases
512
+ assert pmf(x, N, m1, n, w).sum() < .5
513
+ assert_allclose(nchypergeom_wallenius.pmf(x, N, m1, n, w).sum(), 1)
514
+
515
+ def test_wallenius_against_mpmath(self):
516
+ # precompute data with mpmath since naive implementation above
517
+ # is not reliable. See source code in gh-13330.
518
+ M = 50
519
+ n = 30
520
+ N = 20
521
+ odds = 2.25
522
+ # Expected results, computed with mpmath.
523
+ sup = np.arange(21)
524
+ pmf = np.array([3.699003068656875e-20,
525
+ 5.89398584245431e-17,
526
+ 2.1594437742911123e-14,
527
+ 3.221458044649955e-12,
528
+ 2.4658279241205077e-10,
529
+ 1.0965862603981212e-08,
530
+ 3.057890479665704e-07,
531
+ 5.622818831643761e-06,
532
+ 7.056482841531681e-05,
533
+ 0.000618899425358671,
534
+ 0.003854172932571669,
535
+ 0.01720592676256026,
536
+ 0.05528844897093792,
537
+ 0.12772363313574242,
538
+ 0.21065898367825722,
539
+ 0.24465958845359234,
540
+ 0.1955114898110033,
541
+ 0.10355390084949237,
542
+ 0.03414490375225675,
543
+ 0.006231989845775931,
544
+ 0.0004715577304677075])
545
+ mean = 14.808018384813426
546
+ var = 2.6085975877923717
547
+
548
+ # nchypergeom_wallenius.pmf returns 0 for pmf(0) and pmf(1), and pmf(2)
549
+ # has only three digits of accuracy (~ 2.1511e-14).
550
+ assert_allclose(nchypergeom_wallenius.pmf(sup, M, n, N, odds), pmf,
551
+ rtol=1e-13, atol=1e-13)
552
+ assert_allclose(nchypergeom_wallenius.mean(M, n, N, odds),
553
+ mean, rtol=1e-13)
554
+ assert_allclose(nchypergeom_wallenius.var(M, n, N, odds),
555
+ var, rtol=1e-11)
556
+
557
+ @pytest.mark.parametrize('dist_name',
558
+ ['nchypergeom_fisher', 'nchypergeom_wallenius'])
559
+ def test_rvs_shape(self, dist_name):
560
+ # Check that when given a size with more dimensions than the
561
+ # dimensions of the broadcast parameters, rvs returns an array
562
+ # with the correct shape.
563
+ dists = {'nchypergeom_fisher': nchypergeom_fisher,
564
+ 'nchypergeom_wallenius': nchypergeom_wallenius}
565
+ dist = dists[dist_name]
566
+ x = dist.rvs(50, 30, [[10], [20]], [0.5, 1.0, 2.0], size=(5, 1, 2, 3))
567
+ assert x.shape == (5, 1, 2, 3)
568
+
569
+
570
+ @pytest.mark.parametrize("mu, q, expected",
571
+ [[10, 120, -1.240089881791596e-38],
572
+ [1500, 0, -86.61466680572661]])
573
+ def test_nbinom_11465(mu, q, expected):
574
+ # test nbinom.logcdf at extreme tails
575
+ size = 20
576
+ n, p = size, size/(size+mu)
577
+ # In R:
578
+ # options(digits=16)
579
+ # pnbinom(mu=10, size=20, q=120, log.p=TRUE)
580
+ assert_allclose(nbinom.logcdf(q, n, p), expected)
581
+
582
+
583
+ def test_gh_17146():
584
+ # Check that discrete distributions return PMF of zero at non-integral x.
585
+ # See gh-17146.
586
+ x = np.linspace(0, 1, 11)
587
+ p = 0.8
588
+ pmf = bernoulli(p).pmf(x)
589
+ i = (x % 1 == 0)
590
+ assert_allclose(pmf[-1], p)
591
+ assert_allclose(pmf[0], 1-p)
592
+ assert_equal(pmf[~i], 0)
593
+
594
+
595
+ class TestBetaNBinom:
596
+ @pytest.mark.parametrize('x, n, a, b, ref',
597
+ [[5, 5e6, 5, 20, 1.1520944824139114e-107],
598
+ [100, 50, 5, 20, 0.002855762954310226],
599
+ [10000, 1000, 5, 20, 1.9648515726019154e-05]])
600
+ def test_betanbinom_pmf(self, x, n, a, b, ref):
601
+ # test that PMF stays accurate in the distribution tails
602
+ # reference values computed with mpmath
603
+ # from mpmath import mp
604
+ # mp.dps = 500
605
+ # def betanbinom_pmf(k, n, a, b):
606
+ # k = mp.mpf(k)
607
+ # a = mp.mpf(a)
608
+ # b = mp.mpf(b)
609
+ # n = mp.mpf(n)
610
+ # return float(mp.binomial(n + k - mp.one, k)
611
+ # * mp.beta(a + n, b + k) / mp.beta(a, b))
612
+ assert_allclose(betanbinom.pmf(x, n, a, b), ref, rtol=1e-10)
613
+
614
+
615
+ @pytest.mark.parametrize('n, a, b, ref',
616
+ [[10000, 5000, 50, 0.12841520515722202],
617
+ [10, 9, 9, 7.9224400871459695],
618
+ [100, 1000, 10, 1.5849602176622748]])
619
+ def test_betanbinom_kurtosis(self, n, a, b, ref):
620
+ # reference values were computed via mpmath
621
+ # from mpmath import mp
622
+ # def kurtosis_betanegbinom(n, a, b):
623
+ # n = mp.mpf(n)
624
+ # a = mp.mpf(a)
625
+ # b = mp.mpf(b)
626
+ # four = mp.mpf(4.)
627
+ # mean = n * b / (a - mp.one)
628
+ # var = (n * b * (n + a - 1.) * (a + b - 1.)
629
+ # / ((a - 2.) * (a - 1.)**2.))
630
+ # def f(k):
631
+ # return (mp.binomial(n + k - mp.one, k)
632
+ # * mp.beta(a + n, b + k) / mp.beta(a, b)
633
+ # * (k - mean)**four)
634
+ # fourth_moment = mp.nsum(f, [0, mp.inf])
635
+ # return float(fourth_moment/var**2 - 3.)
636
+ assert_allclose(betanbinom.stats(n, a, b, moments="k"),
637
+ ref, rtol=3e-15)
638
+
639
+
640
+ class TestZipf:
641
+ def test_gh20692(self):
642
+ # test that int32 data for k generates same output as double
643
+ k = np.arange(0, 1000)
644
+ k_int32 = k.astype(np.int32)
645
+ dist = zipf(9)
646
+ pmf = dist.pmf(k)
647
+ pmf_k_int32 = dist.pmf(k_int32)
648
+ assert_equal(pmf, pmf_k_int32)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_distributions.py ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_entropy.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import pytest
3
+ from pytest import raises as assert_raises
4
+
5
+ import numpy as np
6
+ from numpy.testing import assert_allclose
7
+
8
+ from scipy import stats
9
+ from scipy.conftest import array_api_compatible
10
+ from scipy._lib._array_api import xp_assert_close, xp_assert_equal, xp_assert_less
11
+
12
+ class TestEntropy:
13
+ @array_api_compatible
14
+ def test_entropy_positive(self, xp):
15
+ # See ticket #497
16
+ pk = xp.asarray([0.5, 0.2, 0.3])
17
+ qk = xp.asarray([0.1, 0.25, 0.65])
18
+ eself = stats.entropy(pk, pk)
19
+ edouble = stats.entropy(pk, qk)
20
+ xp_assert_equal(eself, xp.asarray(0.))
21
+ xp_assert_less(-edouble, xp.asarray(0.))
22
+
23
+ @array_api_compatible
24
+ def test_entropy_base(self, xp):
25
+ pk = xp.ones(16)
26
+ S = stats.entropy(pk, base=2.)
27
+ xp_assert_less(xp.abs(S - 4.), xp.asarray(1.e-5))
28
+
29
+ qk = xp.ones(16)
30
+ qk = xp.where(xp.arange(16) < 8, xp.asarray(2.), qk)
31
+ S = stats.entropy(pk, qk)
32
+ S2 = stats.entropy(pk, qk, base=2.)
33
+ xp_assert_less(xp.abs(S/S2 - math.log(2.)), xp.asarray(1.e-5))
34
+
35
+ @array_api_compatible
36
+ def test_entropy_zero(self, xp):
37
+ # Test for PR-479
38
+ x = xp.asarray([0., 1., 2.])
39
+ xp_assert_close(stats.entropy(x),
40
+ xp.asarray(0.63651416829481278))
41
+
42
+ @array_api_compatible
43
+ def test_entropy_2d(self, xp):
44
+ pk = xp.asarray([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
45
+ qk = xp.asarray([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
46
+ xp_assert_close(stats.entropy(pk, qk),
47
+ xp.asarray([0.1933259, 0.18609809]))
48
+
49
+ @array_api_compatible
50
+ def test_entropy_2d_zero(self, xp):
51
+ pk = xp.asarray([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
52
+ qk = xp.asarray([[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]])
53
+ xp_assert_close(stats.entropy(pk, qk),
54
+ xp.asarray([xp.inf, 0.18609809]))
55
+
56
+ pk = xp.asarray([[0.0, 0.2], [0.6, 0.3], [0.3, 0.5]])
57
+ xp_assert_close(stats.entropy(pk, qk),
58
+ xp.asarray([0.17403988, 0.18609809]))
59
+
60
+ @array_api_compatible
61
+ def test_entropy_base_2d_nondefault_axis(self, xp):
62
+ pk = xp.asarray([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
63
+ xp_assert_close(stats.entropy(pk, axis=1),
64
+ xp.asarray([0.63651417, 0.63651417, 0.66156324]))
65
+
66
+ @array_api_compatible
67
+ def test_entropy_2d_nondefault_axis(self, xp):
68
+ pk = xp.asarray([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
69
+ qk = xp.asarray([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
70
+ xp_assert_close(stats.entropy(pk, qk, axis=1),
71
+ xp.asarray([0.23104906, 0.23104906, 0.12770641]))
72
+
73
+ @array_api_compatible
74
+ def test_entropy_raises_value_error(self, xp):
75
+ pk = xp.asarray([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
76
+ qk = xp.asarray([[0.1, 0.2], [0.6, 0.3]])
77
+ message = "Array shapes are incompatible for broadcasting."
78
+ with pytest.raises(ValueError, match=message):
79
+ stats.entropy(pk, qk)
80
+
81
+ @array_api_compatible
82
+ def test_base_entropy_with_axis_0_is_equal_to_default(self, xp):
83
+ pk = xp.asarray([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
84
+ xp_assert_close(stats.entropy(pk, axis=0),
85
+ stats.entropy(pk))
86
+
87
+ @array_api_compatible
88
+ def test_entropy_with_axis_0_is_equal_to_default(self, xp):
89
+ pk = xp.asarray([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
90
+ qk = xp.asarray([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
91
+ xp_assert_close(stats.entropy(pk, qk, axis=0),
92
+ stats.entropy(pk, qk))
93
+
94
+ @array_api_compatible
95
+ def test_base_entropy_transposed(self, xp):
96
+ pk = xp.asarray([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
97
+ xp_assert_close(stats.entropy(pk.T),
98
+ stats.entropy(pk, axis=1))
99
+
100
+ @array_api_compatible
101
+ def test_entropy_transposed(self, xp):
102
+ pk = xp.asarray([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
103
+ qk = xp.asarray([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
104
+ xp_assert_close(stats.entropy(pk.T, qk.T),
105
+ stats.entropy(pk, qk, axis=1))
106
+
107
+ @array_api_compatible
108
+ def test_entropy_broadcasting(self, xp):
109
+ rng = np.random.default_rng(74187315492831452)
110
+ x = xp.asarray(rng.random(3))
111
+ y = xp.asarray(rng.random((2, 1)))
112
+ res = stats.entropy(x, y, axis=-1)
113
+ xp_assert_equal(res[0], stats.entropy(x, y[0, ...]))
114
+ xp_assert_equal(res[1], stats.entropy(x, y[1, ...]))
115
+
116
+ @array_api_compatible
117
+ def test_entropy_shape_mismatch(self, xp):
118
+ x = xp.ones((10, 1, 12))
119
+ y = xp.ones((11, 2))
120
+ message = "Array shapes are incompatible for broadcasting."
121
+ with pytest.raises(ValueError, match=message):
122
+ stats.entropy(x, y)
123
+
124
+ @array_api_compatible
125
+ def test_input_validation(self, xp):
126
+ x = xp.ones(10)
127
+ message = "`base` must be a positive number."
128
+ with pytest.raises(ValueError, match=message):
129
+ stats.entropy(x, base=-2)
130
+
131
+
132
+ class TestDifferentialEntropy:
133
+ """
134
+ Vasicek results are compared with the R package vsgoftest.
135
+
136
+ # library(vsgoftest)
137
+ #
138
+ # samp <- c(<values>)
139
+ # entropy.estimate(x = samp, window = <window_length>)
140
+
141
+ """
142
+
143
+ def test_differential_entropy_vasicek(self):
144
+
145
+ random_state = np.random.RandomState(0)
146
+ values = random_state.standard_normal(100)
147
+
148
+ entropy = stats.differential_entropy(values, method='vasicek')
149
+ assert_allclose(entropy, 1.342551, rtol=1e-6)
150
+
151
+ entropy = stats.differential_entropy(values, window_length=1,
152
+ method='vasicek')
153
+ assert_allclose(entropy, 1.122044, rtol=1e-6)
154
+
155
+ entropy = stats.differential_entropy(values, window_length=8,
156
+ method='vasicek')
157
+ assert_allclose(entropy, 1.349401, rtol=1e-6)
158
+
159
+ def test_differential_entropy_vasicek_2d_nondefault_axis(self):
160
+ random_state = np.random.RandomState(0)
161
+ values = random_state.standard_normal((3, 100))
162
+
163
+ entropy = stats.differential_entropy(values, axis=1, method='vasicek')
164
+ assert_allclose(
165
+ entropy,
166
+ [1.342551, 1.341826, 1.293775],
167
+ rtol=1e-6,
168
+ )
169
+
170
+ entropy = stats.differential_entropy(values, axis=1, window_length=1,
171
+ method='vasicek')
172
+ assert_allclose(
173
+ entropy,
174
+ [1.122044, 1.102944, 1.129616],
175
+ rtol=1e-6,
176
+ )
177
+
178
+ entropy = stats.differential_entropy(values, axis=1, window_length=8,
179
+ method='vasicek')
180
+ assert_allclose(
181
+ entropy,
182
+ [1.349401, 1.338514, 1.292332],
183
+ rtol=1e-6,
184
+ )
185
+
186
+ def test_differential_entropy_raises_value_error(self):
187
+ random_state = np.random.RandomState(0)
188
+ values = random_state.standard_normal((3, 100))
189
+
190
+ error_str = (
191
+ r"Window length \({window_length}\) must be positive and less "
192
+ r"than half the sample size \({sample_size}\)."
193
+ )
194
+
195
+ sample_size = values.shape[1]
196
+
197
+ for window_length in {-1, 0, sample_size//2, sample_size}:
198
+
199
+ formatted_error_str = error_str.format(
200
+ window_length=window_length,
201
+ sample_size=sample_size,
202
+ )
203
+
204
+ with assert_raises(ValueError, match=formatted_error_str):
205
+ stats.differential_entropy(
206
+ values,
207
+ window_length=window_length,
208
+ axis=1,
209
+ )
210
+
211
+ def test_base_differential_entropy_with_axis_0_is_equal_to_default(self):
212
+ random_state = np.random.RandomState(0)
213
+ values = random_state.standard_normal((100, 3))
214
+
215
+ entropy = stats.differential_entropy(values, axis=0)
216
+ default_entropy = stats.differential_entropy(values)
217
+ assert_allclose(entropy, default_entropy)
218
+
219
+ def test_base_differential_entropy_transposed(self):
220
+ random_state = np.random.RandomState(0)
221
+ values = random_state.standard_normal((3, 100))
222
+
223
+ assert_allclose(
224
+ stats.differential_entropy(values.T).T,
225
+ stats.differential_entropy(values, axis=1),
226
+ )
227
+
228
+ def test_input_validation(self):
229
+ x = np.random.rand(10)
230
+
231
+ message = "`base` must be a positive number or `None`."
232
+ with pytest.raises(ValueError, match=message):
233
+ stats.differential_entropy(x, base=-2)
234
+
235
+ message = "`method` must be one of..."
236
+ with pytest.raises(ValueError, match=message):
237
+ stats.differential_entropy(x, method='ekki-ekki')
238
+
239
+ @pytest.mark.parametrize('method', ['vasicek', 'van es',
240
+ 'ebrahimi', 'correa'])
241
+ def test_consistency(self, method):
242
+ # test that method is a consistent estimator
243
+ n = 10000 if method == 'correa' else 1000000
244
+ rvs = stats.norm.rvs(size=n, random_state=0)
245
+ expected = stats.norm.entropy()
246
+ res = stats.differential_entropy(rvs, method=method)
247
+ assert_allclose(res, expected, rtol=0.005)
248
+
249
+ # values from differential_entropy reference [6], table 1, n=50, m=7
250
+ norm_rmse_std_cases = { # method: (RMSE, STD)
251
+ 'vasicek': (0.198, 0.109),
252
+ 'van es': (0.212, 0.110),
253
+ 'correa': (0.135, 0.112),
254
+ 'ebrahimi': (0.128, 0.109)
255
+ }
256
+
257
+ @pytest.mark.parametrize('method, expected',
258
+ list(norm_rmse_std_cases.items()))
259
+ def test_norm_rmse_std(self, method, expected):
260
+ # test that RMSE and standard deviation of estimators matches values
261
+ # given in differential_entropy reference [6]. Incidentally, also
262
+ # tests vectorization.
263
+ reps, n, m = 10000, 50, 7
264
+ rmse_expected, std_expected = expected
265
+ rvs = stats.norm.rvs(size=(reps, n), random_state=0)
266
+ true_entropy = stats.norm.entropy()
267
+ res = stats.differential_entropy(rvs, window_length=m,
268
+ method=method, axis=-1)
269
+ assert_allclose(np.sqrt(np.mean((res - true_entropy)**2)),
270
+ rmse_expected, atol=0.005)
271
+ assert_allclose(np.std(res), std_expected, atol=0.002)
272
+
273
+ # values from differential_entropy reference [6], table 2, n=50, m=7
274
+ expon_rmse_std_cases = { # method: (RMSE, STD)
275
+ 'vasicek': (0.194, 0.148),
276
+ 'van es': (0.179, 0.149),
277
+ 'correa': (0.155, 0.152),
278
+ 'ebrahimi': (0.151, 0.148)
279
+ }
280
+
281
+ @pytest.mark.parametrize('method, expected',
282
+ list(expon_rmse_std_cases.items()))
283
+ def test_expon_rmse_std(self, method, expected):
284
+ # test that RMSE and standard deviation of estimators matches values
285
+ # given in differential_entropy reference [6]. Incidentally, also
286
+ # tests vectorization.
287
+ reps, n, m = 10000, 50, 7
288
+ rmse_expected, std_expected = expected
289
+ rvs = stats.expon.rvs(size=(reps, n), random_state=0)
290
+ true_entropy = stats.expon.entropy()
291
+ res = stats.differential_entropy(rvs, window_length=m,
292
+ method=method, axis=-1)
293
+ assert_allclose(np.sqrt(np.mean((res - true_entropy)**2)),
294
+ rmse_expected, atol=0.005)
295
+ assert_allclose(np.std(res), std_expected, atol=0.002)
296
+
297
+ @pytest.mark.parametrize('n, method', [(8, 'van es'),
298
+ (12, 'ebrahimi'),
299
+ (1001, 'vasicek')])
300
+ def test_method_auto(self, n, method):
301
+ rvs = stats.norm.rvs(size=(n,), random_state=0)
302
+ res1 = stats.differential_entropy(rvs)
303
+ res2 = stats.differential_entropy(rvs, method=method)
304
+ assert res1 == res2
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_fast_gen_inversion.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import warnings
3
+ import numpy as np
4
+ from numpy.testing import (assert_array_equal, assert_allclose,
5
+ suppress_warnings)
6
+ from copy import deepcopy
7
+ from scipy.stats.sampling import FastGeneratorInversion
8
+ from scipy import stats
9
+
10
+
11
+ def test_bad_args():
12
+ # loc and scale must be scalar
13
+ with pytest.raises(ValueError, match="loc must be scalar"):
14
+ FastGeneratorInversion(stats.norm(loc=(1.2, 1.3)))
15
+ with pytest.raises(ValueError, match="scale must be scalar"):
16
+ FastGeneratorInversion(stats.norm(scale=[1.5, 5.7]))
17
+
18
+ with pytest.raises(ValueError, match="'test' cannot be used to seed"):
19
+ FastGeneratorInversion(stats.norm(), random_state="test")
20
+
21
+ msg = "Each of the 1 shape parameters must be a scalar"
22
+ with pytest.raises(ValueError, match=msg):
23
+ FastGeneratorInversion(stats.gamma([1.3, 2.5]))
24
+
25
+ with pytest.raises(ValueError, match="`dist` must be a frozen"):
26
+ FastGeneratorInversion("xy")
27
+
28
+ with pytest.raises(ValueError, match="Distribution 'truncnorm' is not"):
29
+ FastGeneratorInversion(stats.truncnorm(1.3, 4.5))
30
+
31
+
32
+ def test_random_state():
33
+ # fixed seed
34
+ gen = FastGeneratorInversion(stats.norm(), random_state=68734509)
35
+ x1 = gen.rvs(size=10)
36
+ gen.random_state = 68734509
37
+ x2 = gen.rvs(size=10)
38
+ assert_array_equal(x1, x2)
39
+
40
+ # Generator
41
+ urng = np.random.default_rng(20375857)
42
+ gen = FastGeneratorInversion(stats.norm(), random_state=urng)
43
+ x1 = gen.rvs(size=10)
44
+ gen.random_state = np.random.default_rng(20375857)
45
+ x2 = gen.rvs(size=10)
46
+ assert_array_equal(x1, x2)
47
+
48
+ # RandomState
49
+ urng = np.random.RandomState(2364)
50
+ gen = FastGeneratorInversion(stats.norm(), random_state=urng)
51
+ x1 = gen.rvs(size=10)
52
+ gen.random_state = np.random.RandomState(2364)
53
+ x2 = gen.rvs(size=10)
54
+ assert_array_equal(x1, x2)
55
+
56
+ # if evaluate_error is called, it must not interfere with the random_state
57
+ # used by rvs
58
+ gen = FastGeneratorInversion(stats.norm(), random_state=68734509)
59
+ x1 = gen.rvs(size=10)
60
+ _ = gen.evaluate_error(size=5) # this will generate 5 uniform rvs
61
+ x2 = gen.rvs(size=10)
62
+ gen.random_state = 68734509
63
+ x3 = gen.rvs(size=20)
64
+ assert_array_equal(x2, x3[10:])
65
+
66
+
67
+ dists_with_params = [
68
+ ("alpha", (3.5,)),
69
+ ("anglit", ()),
70
+ ("argus", (3.5,)),
71
+ ("argus", (5.1,)),
72
+ ("beta", (1.5, 0.9)),
73
+ ("cosine", ()),
74
+ ("betaprime", (2.5, 3.3)),
75
+ ("bradford", (1.2,)),
76
+ ("burr", (1.3, 2.4)),
77
+ ("burr12", (0.7, 1.2)),
78
+ ("cauchy", ()),
79
+ ("chi2", (3.5,)),
80
+ ("chi", (4.5,)),
81
+ ("crystalball", (0.7, 1.2)),
82
+ ("expon", ()),
83
+ ("gamma", (1.5,)),
84
+ ("gennorm", (2.7,)),
85
+ ("gumbel_l", ()),
86
+ ("gumbel_r", ()),
87
+ ("hypsecant", ()),
88
+ ("invgauss", (3.1,)),
89
+ ("invweibull", (1.5,)),
90
+ ("laplace", ()),
91
+ ("logistic", ()),
92
+ ("maxwell", ()),
93
+ ("moyal", ()),
94
+ ("norm", ()),
95
+ ("pareto", (1.3,)),
96
+ ("powerlaw", (7.6,)),
97
+ ("rayleigh", ()),
98
+ ("semicircular", ()),
99
+ ("t", (5.7,)),
100
+ ("wald", ()),
101
+ ("weibull_max", (2.4,)),
102
+ ("weibull_min", (1.2,)),
103
+ ]
104
+
105
+
106
+ @pytest.mark.parametrize(("distname, args"), dists_with_params)
107
+ def test_rvs_and_ppf(distname, args):
108
+ # check sample against rvs generated by rv_continuous
109
+ urng = np.random.default_rng(9807324628097097)
110
+ rng1 = getattr(stats, distname)(*args)
111
+ rvs1 = rng1.rvs(size=500, random_state=urng)
112
+ rng2 = FastGeneratorInversion(rng1, random_state=urng)
113
+ rvs2 = rng2.rvs(size=500)
114
+ assert stats.cramervonmises_2samp(rvs1, rvs2).pvalue > 0.01
115
+
116
+ # check ppf
117
+ q = [0.001, 0.1, 0.5, 0.9, 0.999]
118
+ assert_allclose(rng1.ppf(q), rng2.ppf(q), atol=1e-10)
119
+
120
+
121
+ @pytest.mark.parametrize(("distname, args"), dists_with_params)
122
+ def test_u_error(distname, args):
123
+ # check sample against rvs generated by rv_continuous
124
+ dist = getattr(stats, distname)(*args)
125
+ with suppress_warnings() as sup:
126
+ # filter the warnings thrown by UNU.RAN
127
+ sup.filter(RuntimeWarning)
128
+ rng = FastGeneratorInversion(dist)
129
+ u_error, x_error = rng.evaluate_error(
130
+ size=10_000, random_state=9807324628097097, x_error=False
131
+ )
132
+ assert u_error <= 1e-10
133
+
134
+
135
+ @pytest.mark.xslow
136
+ @pytest.mark.xfail(reason="geninvgauss CDF is not accurate")
137
+ def test_geninvgauss_uerror():
138
+ dist = stats.geninvgauss(3.2, 1.5)
139
+ rng = FastGeneratorInversion(dist)
140
+ err = rng.evaluate_error(size=10_000, random_state=67982)
141
+ assert err[0] < 1e-10
142
+
143
+
144
+ # TODO: add more distributions
145
+ @pytest.mark.parametrize(("distname, args"), [("beta", (0.11, 0.11))])
146
+ def test_error_extreme_params(distname, args):
147
+ # take extreme parameters where u-error might not be below the tolerance
148
+ # due to limitations of floating point arithmetic
149
+ with suppress_warnings() as sup:
150
+ # filter the warnings thrown by UNU.RAN for such extreme parameters
151
+ sup.filter(RuntimeWarning)
152
+ dist = getattr(stats, distname)(*args)
153
+ rng = FastGeneratorInversion(dist)
154
+ u_error, x_error = rng.evaluate_error(
155
+ size=10_000, random_state=980732462809709732623, x_error=True
156
+ )
157
+ if u_error >= 2.5 * 1e-10:
158
+ assert x_error < 1e-9
159
+
160
+
161
+ def test_evaluate_error_inputs():
162
+ gen = FastGeneratorInversion(stats.norm())
163
+ with pytest.raises(ValueError, match="size must be an integer"):
164
+ gen.evaluate_error(size=3.5)
165
+ with pytest.raises(ValueError, match="size must be an integer"):
166
+ gen.evaluate_error(size=(3, 3))
167
+
168
+
169
+ def test_rvs_ppf_loc_scale():
170
+ loc, scale = 3.5, 2.3
171
+ dist = stats.norm(loc=loc, scale=scale)
172
+ rng = FastGeneratorInversion(dist, random_state=1234)
173
+ r = rng.rvs(size=1000)
174
+ r_rescaled = (r - loc) / scale
175
+ assert stats.cramervonmises(r_rescaled, "norm").pvalue > 0.01
176
+ q = [0.001, 0.1, 0.5, 0.9, 0.999]
177
+ assert_allclose(rng._ppf(q), rng.ppf(q), atol=1e-10)
178
+
179
+
180
+ def test_domain():
181
+ # only a basic check that the domain argument is passed to the
182
+ # UNU.RAN generators
183
+ rng = FastGeneratorInversion(stats.norm(), domain=(-1, 1))
184
+ r = rng.rvs(size=100)
185
+ assert -1 <= r.min() < r.max() <= 1
186
+
187
+ # if loc and scale are used, new domain is loc + scale*domain
188
+ loc, scale = 3.5, 1.3
189
+ dist = stats.norm(loc=loc, scale=scale)
190
+ rng = FastGeneratorInversion(dist, domain=(-1.5, 2))
191
+ r = rng.rvs(size=100)
192
+ lb, ub = loc - scale * 1.5, loc + scale * 2
193
+ assert lb <= r.min() < r.max() <= ub
194
+
195
+
196
+ @pytest.mark.parametrize(("distname, args, expected"),
197
+ [("beta", (3.5, 2.5), (0, 1)),
198
+ ("norm", (), (-np.inf, np.inf))])
199
+ def test_support(distname, args, expected):
200
+ # test that the support is updated if truncation and loc/scale are applied
201
+ # use beta distribution since it is a transformed betaprime distribution,
202
+ # so it is important that the correct support is considered
203
+ # (i.e., the support of beta is (0,1), while betaprime is (0, inf))
204
+ dist = getattr(stats, distname)(*args)
205
+ rng = FastGeneratorInversion(dist)
206
+ assert_array_equal(rng.support(), expected)
207
+ rng.loc = 1
208
+ rng.scale = 2
209
+ assert_array_equal(rng.support(), 1 + 2*np.array(expected))
210
+
211
+
212
+ @pytest.mark.parametrize(("distname, args"),
213
+ [("beta", (3.5, 2.5)), ("norm", ())])
214
+ def test_support_truncation(distname, args):
215
+ # similar test for truncation
216
+ dist = getattr(stats, distname)(*args)
217
+ rng = FastGeneratorInversion(dist, domain=(0.5, 0.7))
218
+ assert_array_equal(rng.support(), (0.5, 0.7))
219
+ rng.loc = 1
220
+ rng.scale = 2
221
+ assert_array_equal(rng.support(), (1 + 2 * 0.5, 1 + 2 * 0.7))
222
+
223
+
224
+ def test_domain_shift_truncation():
225
+ # center of norm is zero, it should be shifted to the left endpoint of
226
+ # domain. if this was not the case, PINV in UNURAN would raise a warning
227
+ # as the center is not inside the domain
228
+ with warnings.catch_warnings():
229
+ warnings.simplefilter("error")
230
+ rng = FastGeneratorInversion(stats.norm(), domain=(1, 2))
231
+ r = rng.rvs(size=100)
232
+ assert 1 <= r.min() < r.max() <= 2
233
+
234
+
235
+ def test_non_rvs_methods_with_domain():
236
+ # as a first step, compare truncated normal against stats.truncnorm
237
+ rng = FastGeneratorInversion(stats.norm(), domain=(2.3, 3.2))
238
+ trunc_norm = stats.truncnorm(2.3, 3.2)
239
+ # take values that are inside and outside the domain
240
+ x = (2.0, 2.4, 3.0, 3.4)
241
+ p = (0.01, 0.5, 0.99)
242
+ assert_allclose(rng._cdf(x), trunc_norm.cdf(x))
243
+ assert_allclose(rng._ppf(p), trunc_norm.ppf(p))
244
+ loc, scale = 2, 3
245
+ rng.loc = 2
246
+ rng.scale = 3
247
+ trunc_norm = stats.truncnorm(2.3, 3.2, loc=loc, scale=scale)
248
+ x = np.array(x) * scale + loc
249
+ assert_allclose(rng._cdf(x), trunc_norm.cdf(x))
250
+ assert_allclose(rng._ppf(p), trunc_norm.ppf(p))
251
+
252
+ # do another sanity check with beta distribution
253
+ # in that case, it is important to use the correct domain since beta
254
+ # is a transformation of betaprime which has a different support
255
+ rng = FastGeneratorInversion(stats.beta(2.5, 3.5), domain=(0.3, 0.7))
256
+ rng.loc = 2
257
+ rng.scale = 2.5
258
+ # the support is 2.75, , 3.75 (2 + 2.5 * 0.3, 2 + 2.5 * 0.7)
259
+ assert_array_equal(rng.support(), (2.75, 3.75))
260
+ x = np.array([2.74, 2.76, 3.74, 3.76])
261
+ # the cdf needs to be zero outside of the domain
262
+ y_cdf = rng._cdf(x)
263
+ assert_array_equal((y_cdf[0], y_cdf[3]), (0, 1))
264
+ assert np.min(y_cdf[1:3]) > 0
265
+ # ppf needs to map 0 and 1 to the boundaries
266
+ assert_allclose(rng._ppf(y_cdf), (2.75, 2.76, 3.74, 3.75))
267
+
268
+
269
+ def test_non_rvs_methods_without_domain():
270
+ norm_dist = stats.norm()
271
+ rng = FastGeneratorInversion(norm_dist)
272
+ x = np.linspace(-3, 3, num=10)
273
+ p = (0.01, 0.5, 0.99)
274
+ assert_allclose(rng._cdf(x), norm_dist.cdf(x))
275
+ assert_allclose(rng._ppf(p), norm_dist.ppf(p))
276
+ loc, scale = 0.5, 1.3
277
+ rng.loc = loc
278
+ rng.scale = scale
279
+ norm_dist = stats.norm(loc=loc, scale=scale)
280
+ assert_allclose(rng._cdf(x), norm_dist.cdf(x))
281
+ assert_allclose(rng._ppf(p), norm_dist.ppf(p))
282
+
283
+ @pytest.mark.parametrize(("domain, x"),
284
+ [(None, 0.5),
285
+ ((0, 1), 0.5),
286
+ ((0, 1), 1.5)])
287
+ def test_scalar_inputs(domain, x):
288
+ """ pdf, cdf etc should map scalar values to scalars. check with and
289
+ w/o domain since domain impacts pdf, cdf etc
290
+ Take x inside and outside of domain """
291
+ rng = FastGeneratorInversion(stats.norm(), domain=domain)
292
+ assert np.isscalar(rng._cdf(x))
293
+ assert np.isscalar(rng._ppf(0.5))
294
+
295
+
296
+ def test_domain_argus_large_chi():
297
+ # for large chi, the Gamma distribution is used and the domain has to be
298
+ # transformed. this is a test to ensure that the transformation works
299
+ chi, lb, ub = 5.5, 0.25, 0.75
300
+ rng = FastGeneratorInversion(stats.argus(chi), domain=(lb, ub))
301
+ rng.random_state = 4574
302
+ r = rng.rvs(size=500)
303
+ assert lb <= r.min() < r.max() <= ub
304
+ # perform goodness of fit test with conditional cdf
305
+ cdf = stats.argus(chi).cdf
306
+ prob = cdf(ub) - cdf(lb)
307
+ assert stats.cramervonmises(r, lambda x: cdf(x) / prob).pvalue > 0.05
308
+
309
+
310
+ def test_setting_loc_scale():
311
+ rng = FastGeneratorInversion(stats.norm(), random_state=765765864)
312
+ r1 = rng.rvs(size=1000)
313
+ rng.loc = 3.0
314
+ rng.scale = 2.5
315
+ r2 = rng.rvs(1000)
316
+ # rescaled r2 should be again standard normal
317
+ assert stats.cramervonmises_2samp(r1, (r2 - 3) / 2.5).pvalue > 0.05
318
+ # reset values to default loc=0, scale=1
319
+ rng.loc = 0
320
+ rng.scale = 1
321
+ r2 = rng.rvs(1000)
322
+ assert stats.cramervonmises_2samp(r1, r2).pvalue > 0.05
323
+
324
+
325
+ def test_ignore_shape_range():
326
+ msg = "No generator is defined for the shape parameters"
327
+ with pytest.raises(ValueError, match=msg):
328
+ rng = FastGeneratorInversion(stats.t(0.03))
329
+ rng = FastGeneratorInversion(stats.t(0.03), ignore_shape_range=True)
330
+ # we can ignore the recommended range of shape parameters
331
+ # but u-error can be expected to be too large in that case
332
+ u_err, _ = rng.evaluate_error(size=1000, random_state=234)
333
+ assert u_err >= 1e-6
334
+
335
+ @pytest.mark.xfail_on_32bit(
336
+ "NumericalInversePolynomial.qrvs fails for Win 32-bit"
337
+ )
338
+ class TestQRVS:
339
+ def test_input_validation(self):
340
+ gen = FastGeneratorInversion(stats.norm())
341
+
342
+ match = "`qmc_engine` must be an instance of..."
343
+ with pytest.raises(ValueError, match=match):
344
+ gen.qrvs(qmc_engine=0)
345
+
346
+ match = "`d` must be consistent with dimension of `qmc_engine`."
347
+ with pytest.raises(ValueError, match=match):
348
+ gen.qrvs(d=3, qmc_engine=stats.qmc.Halton(2))
349
+
350
+ qrngs = [None, stats.qmc.Sobol(1, seed=0), stats.qmc.Halton(3, seed=0)]
351
+ # `size=None` should not add anything to the shape, `size=1` should
352
+ sizes = [
353
+ (None, tuple()),
354
+ (1, (1,)),
355
+ (4, (4,)),
356
+ ((4,), (4,)),
357
+ ((2, 4), (2, 4)),
358
+ ]
359
+ # Neither `d=None` nor `d=1` should add anything to the shape
360
+ ds = [(None, tuple()), (1, tuple()), (3, (3,))]
361
+
362
+ @pytest.mark.parametrize("qrng", qrngs)
363
+ @pytest.mark.parametrize("size_in, size_out", sizes)
364
+ @pytest.mark.parametrize("d_in, d_out", ds)
365
+ def test_QRVS_shape_consistency(self, qrng, size_in, size_out,
366
+ d_in, d_out):
367
+ gen = FastGeneratorInversion(stats.norm())
368
+
369
+ # If d and qrng.d are inconsistent, an error is raised
370
+ if d_in is not None and qrng is not None and qrng.d != d_in:
371
+ match = "`d` must be consistent with dimension of `qmc_engine`."
372
+ with pytest.raises(ValueError, match=match):
373
+ gen.qrvs(size_in, d=d_in, qmc_engine=qrng)
374
+ return
375
+
376
+ # Sometimes d is really determined by qrng
377
+ if d_in is None and qrng is not None and qrng.d != 1:
378
+ d_out = (qrng.d,)
379
+
380
+ shape_expected = size_out + d_out
381
+
382
+ qrng2 = deepcopy(qrng)
383
+ qrvs = gen.qrvs(size=size_in, d=d_in, qmc_engine=qrng)
384
+ if size_in is not None:
385
+ assert qrvs.shape == shape_expected
386
+
387
+ if qrng2 is not None:
388
+ uniform = qrng2.random(np.prod(size_in) or 1)
389
+ qrvs2 = stats.norm.ppf(uniform).reshape(shape_expected)
390
+ assert_allclose(qrvs, qrvs2, atol=1e-12)
391
+
392
+ def test_QRVS_size_tuple(self):
393
+ # QMCEngine samples are always of shape (n, d). When `size` is a tuple,
394
+ # we set `n = prod(size)` in the call to qmc_engine.random, transform
395
+ # the sample, and reshape it to the final dimensions. When we reshape,
396
+ # we need to be careful, because the _columns_ of the sample returned
397
+ # by a QMCEngine are "independent"-ish, but the elements within the
398
+ # columns are not. We need to make sure that this doesn't get mixed up
399
+ # by reshaping: qrvs[..., i] should remain "independent"-ish of
400
+ # qrvs[..., i+1], but the elements within qrvs[..., i] should be
401
+ # transformed from the same low-discrepancy sequence.
402
+
403
+ gen = FastGeneratorInversion(stats.norm())
404
+
405
+ size = (3, 4)
406
+ d = 5
407
+ qrng = stats.qmc.Halton(d, seed=0)
408
+ qrng2 = stats.qmc.Halton(d, seed=0)
409
+
410
+ uniform = qrng2.random(np.prod(size))
411
+
412
+ qrvs = gen.qrvs(size=size, d=d, qmc_engine=qrng)
413
+ qrvs2 = stats.norm.ppf(uniform)
414
+
415
+ for i in range(d):
416
+ sample = qrvs[..., i]
417
+ sample2 = qrvs2[:, i].reshape(size)
418
+ assert_allclose(sample, sample2, atol=1e-12)
419
+
420
+
421
+ def test_burr_overflow():
422
+ # this case leads to an overflow error if math.exp is used
423
+ # in the definition of the burr pdf instead of np.exp
424
+ # a direct implementation of the PDF as x**(-c-1) / (1+x**(-c))**(d+1)
425
+ # also leads to an overflow error in the setup
426
+ args = (1.89128135, 0.30195177)
427
+ with suppress_warnings() as sup:
428
+ # filter potential overflow warning
429
+ sup.filter(RuntimeWarning)
430
+ gen = FastGeneratorInversion(stats.burr(*args))
431
+ u_error, _ = gen.evaluate_error(random_state=4326)
432
+ assert u_error <= 1e-10
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_fit.py ADDED
@@ -0,0 +1,1038 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import numpy.testing as npt
4
+ from numpy.testing import assert_allclose, assert_equal
5
+ import pytest
6
+ from scipy import stats
7
+ from scipy.optimize import differential_evolution
8
+
9
+ from .test_continuous_basic import distcont
10
+ from scipy.stats._distn_infrastructure import FitError
11
+ from scipy.stats._distr_params import distdiscrete
12
+ from scipy.stats import goodness_of_fit
13
+
14
+
15
+ # this is not a proper statistical test for convergence, but only
16
+ # verifies that the estimate and true values don't differ by too much
17
+
18
+ fit_sizes = [1000, 5000, 10000] # sample sizes to try
19
+
20
+ thresh_percent = 0.25 # percent of true parameters for fail cut-off
21
+ thresh_min = 0.75 # minimum difference estimate - true to fail test
22
+
23
+ mle_failing_fits = [
24
+ 'gausshyper',
25
+ 'genexpon',
26
+ 'gengamma',
27
+ 'irwinhall',
28
+ 'kappa4',
29
+ 'ksone',
30
+ 'kstwo',
31
+ 'ncf',
32
+ 'ncx2',
33
+ 'truncexpon',
34
+ 'tukeylambda',
35
+ 'vonmises',
36
+ 'levy_stable',
37
+ 'trapezoid',
38
+ 'truncweibull_min',
39
+ 'studentized_range',
40
+ ]
41
+
42
+ # these pass but are XSLOW (>1s)
43
+ mle_Xslow_fits = ['betaprime', 'crystalball', 'exponweib', 'f', 'geninvgauss',
44
+ 'jf_skew_t', 'recipinvgauss', 'rel_breitwigner', 'vonmises_line']
45
+
46
+ # The MLE fit method of these distributions doesn't perform well when all
47
+ # parameters are fit, so test them with the location fixed at 0.
48
+ mle_use_floc0 = [
49
+ 'burr',
50
+ 'chi',
51
+ 'chi2',
52
+ 'mielke',
53
+ 'pearson3',
54
+ 'genhalflogistic',
55
+ 'rdist',
56
+ 'pareto',
57
+ 'powerlaw', # distfn.nnlf(est2, rvs) > distfn.nnlf(est1, rvs) otherwise
58
+ 'powerlognorm',
59
+ 'wrapcauchy',
60
+ 'rel_breitwigner',
61
+ ]
62
+
63
+ mm_failing_fits = ['alpha', 'betaprime', 'burr', 'burr12', 'cauchy', 'chi',
64
+ 'chi2', 'crystalball', 'dgamma', 'dweibull', 'f',
65
+ 'fatiguelife', 'fisk', 'foldcauchy', 'genextreme',
66
+ 'gengamma', 'genhyperbolic', 'gennorm', 'genpareto',
67
+ 'halfcauchy', 'invgamma', 'invweibull', 'irwinhall', 'jf_skew_t',
68
+ 'johnsonsu', 'kappa3', 'ksone', 'kstwo', 'levy', 'levy_l',
69
+ 'levy_stable', 'loglaplace', 'lomax', 'mielke', 'nakagami',
70
+ 'ncf', 'nct', 'ncx2', 'pareto', 'powerlognorm', 'powernorm',
71
+ 'rel_breitwigner', 'skewcauchy', 't', 'trapezoid', 'triang',
72
+ 'truncpareto', 'truncweibull_min', 'tukeylambda',
73
+ 'studentized_range']
74
+
75
+ # not sure if these fail, but they caused my patience to fail
76
+ mm_XXslow_fits = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon',
77
+ 'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb',
78
+ 'kappa4', 'kstwobign', 'recipinvgauss',
79
+ 'truncexpon', 'vonmises', 'vonmises_line']
80
+
81
+ # these pass but are XSLOW (>1s)
82
+ mm_Xslow_fits = ['wrapcauchy']
83
+
84
+ failing_fits = {"MM": mm_failing_fits + mm_XXslow_fits, "MLE": mle_failing_fits}
85
+ xslow_fits = {"MM": mm_Xslow_fits, "MLE": mle_Xslow_fits}
86
+ fail_interval_censored = {"truncpareto"}
87
+
88
+ # Don't run the fit test on these:
89
+ skip_fit = [
90
+ 'erlang', # Subclass of gamma, generates a warning.
91
+ 'genhyperbolic', 'norminvgauss', # too slow
92
+ ]
93
+
94
+
95
+ def cases_test_cont_fit():
96
+ # this tests the closeness of the estimated parameters to the true
97
+ # parameters with fit method of continuous distributions
98
+ # Note: is slow, some distributions don't converge with sample
99
+ # size <= 10000
100
+ for distname, arg in distcont:
101
+ if distname not in skip_fit:
102
+ yield distname, arg
103
+
104
+
105
+ @pytest.mark.slow
106
+ @pytest.mark.parametrize('distname,arg', cases_test_cont_fit())
107
+ @pytest.mark.parametrize('method', ["MLE", "MM"])
108
+ def test_cont_fit(distname, arg, method):
109
+ run_xfail = int(os.getenv('SCIPY_XFAIL', default=False))
110
+ run_xslow = int(os.getenv('SCIPY_XSLOW', default=False))
111
+
112
+ if distname in failing_fits[method] and not run_xfail:
113
+ # The generic `fit` method can't be expected to work perfectly for all
114
+ # distributions, data, and guesses. Some failures are expected.
115
+ msg = "Failure expected; set environment variable SCIPY_XFAIL=1 to run."
116
+ pytest.xfail(msg)
117
+
118
+ if distname in xslow_fits[method] and not run_xslow:
119
+ msg = "Very slow; set environment variable SCIPY_XSLOW=1 to run."
120
+ pytest.skip(msg)
121
+
122
+ distfn = getattr(stats, distname)
123
+
124
+ truearg = np.hstack([arg, [0.0, 1.0]])
125
+ diffthreshold = np.max(np.vstack([truearg*thresh_percent,
126
+ np.full(distfn.numargs+2, thresh_min)]),
127
+ 0)
128
+
129
+ for fit_size in fit_sizes:
130
+ # Note that if a fit succeeds, the other fit_sizes are skipped
131
+ np.random.seed(1234)
132
+
133
+ with np.errstate(all='ignore'):
134
+ rvs = distfn.rvs(size=fit_size, *arg)
135
+ if method == 'MLE' and distfn.name in mle_use_floc0:
136
+ kwds = {'floc': 0}
137
+ else:
138
+ kwds = {}
139
+ # start with default values
140
+ est = distfn.fit(rvs, method=method, **kwds)
141
+ if method == 'MLE':
142
+ # Trivial test of the use of CensoredData. The fit() method
143
+ # will check that data contains no actual censored data, and
144
+ # do a regular uncensored fit.
145
+ data1 = stats.CensoredData(rvs)
146
+ est1 = distfn.fit(data1, **kwds)
147
+ msg = ('Different results fitting uncensored data wrapped as'
148
+ f' CensoredData: {distfn.name}: est={est} est1={est1}')
149
+ assert_allclose(est1, est, rtol=1e-10, err_msg=msg)
150
+ if method == 'MLE' and distname not in fail_interval_censored:
151
+ # Convert the first `nic` values in rvs to interval-censored
152
+ # values. The interval is small, so est2 should be close to
153
+ # est.
154
+ nic = 15
155
+ interval = np.column_stack((rvs, rvs))
156
+ interval[:nic, 0] *= 0.99
157
+ interval[:nic, 1] *= 1.01
158
+ interval.sort(axis=1)
159
+ data2 = stats.CensoredData(interval=interval)
160
+ est2 = distfn.fit(data2, **kwds)
161
+ msg = ('Different results fitting interval-censored'
162
+ f' data: {distfn.name}: est={est} est2={est2}')
163
+ assert_allclose(est2, est, rtol=0.05, err_msg=msg)
164
+
165
+ diff = est - truearg
166
+
167
+ # threshold for location
168
+ diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,
169
+ thresh_min])
170
+
171
+ if np.any(np.isnan(est)):
172
+ raise AssertionError('nan returned in fit')
173
+ else:
174
+ if np.all(np.abs(diff) <= diffthreshold):
175
+ break
176
+ else:
177
+ txt = 'parameter: %s\n' % str(truearg)
178
+ txt += 'estimated: %s\n' % str(est)
179
+ txt += 'diff : %s\n' % str(diff)
180
+ raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
181
+
182
+
183
+ def _check_loc_scale_mle_fit(name, data, desired, atol=None):
184
+ d = getattr(stats, name)
185
+ actual = d.fit(data)[-2:]
186
+ assert_allclose(actual, desired, atol=atol,
187
+ err_msg='poor mle fit of (loc, scale) in %s' % name)
188
+
189
+
190
+ def test_non_default_loc_scale_mle_fit():
191
+ data = np.array([1.01, 1.78, 1.78, 1.78, 1.88, 1.88, 1.88, 2.00])
192
+ _check_loc_scale_mle_fit('uniform', data, [1.01, 0.99], 1e-3)
193
+ _check_loc_scale_mle_fit('expon', data, [1.01, 0.73875], 1e-3)
194
+
195
+
196
+ def test_expon_fit():
197
+ """gh-6167"""
198
+ data = [0, 0, 0, 0, 2, 2, 2, 2]
199
+ phat = stats.expon.fit(data, floc=0)
200
+ assert_allclose(phat, [0, 1.0], atol=1e-3)
201
+
202
+
203
+ def test_fit_error():
204
+ data = np.concatenate([np.zeros(29), np.ones(21)])
205
+ message = "Optimization converged to parameters that are..."
206
+ with pytest.raises(FitError, match=message), \
207
+ pytest.warns(RuntimeWarning):
208
+ stats.beta.fit(data)
209
+
210
+
211
+ @pytest.mark.parametrize("dist, params",
212
+ [(stats.norm, (0.5, 2.5)), # type: ignore[attr-defined]
213
+ (stats.binom, (10, 0.3, 2))]) # type: ignore[attr-defined]
214
+ def test_nnlf_and_related_methods(dist, params):
215
+ rng = np.random.default_rng(983459824)
216
+
217
+ if hasattr(dist, 'pdf'):
218
+ logpxf = dist.logpdf
219
+ else:
220
+ logpxf = dist.logpmf
221
+
222
+ x = dist.rvs(*params, size=100, random_state=rng)
223
+ ref = -logpxf(x, *params).sum()
224
+ res1 = dist.nnlf(params, x)
225
+ res2 = dist._penalized_nnlf(params, x)
226
+ assert_allclose(res1, ref)
227
+ assert_allclose(res2, ref)
228
+
229
+
230
+ def cases_test_fit_mle():
231
+ # These fail default test or hang
232
+ skip_basic_fit = {'argus', 'irwinhall', 'foldnorm', 'truncpareto',
233
+ 'truncweibull_min', 'ksone', 'levy_stable',
234
+ 'studentized_range', 'kstwo', 'arcsine'}
235
+
236
+ # Please keep this list in alphabetical order...
237
+ slow_basic_fit = {'alpha', 'betaprime', 'binom', 'bradford', 'burr12',
238
+ 'chi', 'crystalball', 'dweibull', 'erlang', 'exponnorm',
239
+ 'exponpow', 'f', 'fatiguelife', 'fisk', 'foldcauchy', 'gamma',
240
+ 'genexpon', 'genextreme', 'gennorm', 'genpareto',
241
+ 'gompertz', 'halfgennorm', 'invgamma', 'invgauss', 'invweibull',
242
+ 'jf_skew_t', 'johnsonsb', 'johnsonsu', 'kappa3',
243
+ 'kstwobign', 'loglaplace', 'lognorm', 'lomax', 'mielke',
244
+ 'nakagami', 'nbinom', 'norminvgauss',
245
+ 'pareto', 'pearson3', 'powerlaw', 'powernorm',
246
+ 'randint', 'rdist', 'recipinvgauss', 'rice', 'skewnorm',
247
+ 't', 'uniform', 'weibull_max', 'weibull_min', 'wrapcauchy'}
248
+
249
+ # Please keep this list in alphabetical order...
250
+ xslow_basic_fit = {'beta', 'betabinom', 'betanbinom', 'burr', 'exponweib',
251
+ 'gausshyper', 'gengamma', 'genhalflogistic',
252
+ 'genhyperbolic', 'geninvgauss',
253
+ 'hypergeom', 'kappa4', 'loguniform',
254
+ 'ncf', 'nchypergeom_fisher', 'nchypergeom_wallenius',
255
+ 'nct', 'ncx2', 'nhypergeom',
256
+ 'powerlognorm', 'reciprocal', 'rel_breitwigner',
257
+ 'skellam', 'trapezoid', 'triang', 'truncnorm',
258
+ 'tukeylambda', 'vonmises', 'zipfian'}
259
+
260
+ for dist in dict(distdiscrete + distcont):
261
+ if dist in skip_basic_fit or not isinstance(dist, str):
262
+ reason = "tested separately"
263
+ yield pytest.param(dist, marks=pytest.mark.skip(reason=reason))
264
+ elif dist in slow_basic_fit:
265
+ reason = "too slow (>= 0.25s)"
266
+ yield pytest.param(dist, marks=pytest.mark.slow(reason=reason))
267
+ elif dist in xslow_basic_fit:
268
+ reason = "too slow (>= 1.0s)"
269
+ yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason))
270
+ else:
271
+ yield dist
272
+
273
+
274
+ def cases_test_fit_mse():
275
+ # the first four are so slow that I'm not sure whether they would pass
276
+ skip_basic_fit = {'levy_stable', 'studentized_range', 'ksone', 'skewnorm',
277
+ 'irwinhall', # hangs
278
+ 'norminvgauss', # super slow (~1 hr) but passes
279
+ 'kstwo', # very slow (~25 min) but passes
280
+ 'geninvgauss', # quite slow (~4 minutes) but passes
281
+ 'gausshyper', 'genhyperbolic', # integration warnings
282
+ 'tukeylambda', # close, but doesn't meet tolerance
283
+ 'vonmises', # can have negative CDF; doesn't play nice
284
+ 'argus'} # doesn't meet tolerance; tested separately
285
+
286
+ # Please keep this list in alphabetical order...
287
+ slow_basic_fit = {'alpha', 'anglit', 'arcsine', 'betabinom', 'bradford',
288
+ 'chi', 'chi2', 'crystalball', 'dweibull',
289
+ 'erlang', 'exponnorm', 'exponpow', 'exponweib',
290
+ 'fatiguelife', 'fisk', 'foldcauchy', 'foldnorm',
291
+ 'gamma', 'genexpon', 'genextreme', 'genhalflogistic',
292
+ 'genlogistic', 'genpareto', 'gompertz',
293
+ 'hypergeom', 'invweibull',
294
+ 'johnsonsu', 'kappa3', 'kstwobign',
295
+ 'laplace_asymmetric', 'loggamma', 'loglaplace',
296
+ 'lognorm', 'lomax',
297
+ 'maxwell', 'nhypergeom',
298
+ 'pareto', 'powernorm', 'randint', 'recipinvgauss',
299
+ 'semicircular',
300
+ 't', 'triang', 'truncexpon', 'truncpareto',
301
+ 'uniform',
302
+ 'wald', 'weibull_max', 'weibull_min', 'wrapcauchy'}
303
+
304
+ # Please keep this list in alphabetical order...
305
+ xslow_basic_fit = {'argus', 'beta', 'betaprime', 'burr', 'burr12',
306
+ 'dgamma', 'f', 'gengamma', 'gennorm',
307
+ 'halfgennorm', 'invgamma', 'invgauss', 'jf_skew_t',
308
+ 'johnsonsb', 'kappa4', 'loguniform', 'mielke',
309
+ 'nakagami', 'ncf', 'nchypergeom_fisher',
310
+ 'nchypergeom_wallenius', 'nct', 'ncx2',
311
+ 'pearson3', 'powerlaw', 'powerlognorm',
312
+ 'rdist', 'reciprocal', 'rel_breitwigner', 'rice',
313
+ 'trapezoid', 'truncnorm', 'truncweibull_min',
314
+ 'vonmises_line', 'zipfian'}
315
+
316
+ warns_basic_fit = {'skellam'} # can remove mark after gh-14901 is resolved
317
+
318
+ for dist in dict(distdiscrete + distcont):
319
+ if dist in skip_basic_fit or not isinstance(dist, str):
320
+ reason = "Fails. Oh well."
321
+ yield pytest.param(dist, marks=pytest.mark.skip(reason=reason))
322
+ elif dist in slow_basic_fit:
323
+ reason = "too slow (>= 0.25s)"
324
+ yield pytest.param(dist, marks=pytest.mark.slow(reason=reason))
325
+ elif dist in xslow_basic_fit:
326
+ reason = "too slow (>= 1.0s)"
327
+ yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason))
328
+ elif dist in warns_basic_fit:
329
+ mark = pytest.mark.filterwarnings('ignore::RuntimeWarning')
330
+ yield pytest.param(dist, marks=mark)
331
+ else:
332
+ yield dist
333
+
334
+
335
+ def cases_test_fitstart():
336
+ for distname, shapes in dict(distcont).items():
337
+ if (not isinstance(distname, str) or
338
+ distname in {'studentized_range', 'recipinvgauss'}): # slow
339
+ continue
340
+ yield distname, shapes
341
+
342
+
343
+ @pytest.mark.parametrize('distname, shapes', cases_test_fitstart())
344
+ def test_fitstart(distname, shapes):
345
+ dist = getattr(stats, distname)
346
+ rng = np.random.default_rng(216342614)
347
+ data = rng.random(10)
348
+
349
+ with np.errstate(invalid='ignore', divide='ignore'): # irrelevant to test
350
+ guess = dist._fitstart(data)
351
+
352
+ assert dist._argcheck(*guess[:-2])
353
+
354
+
355
+ def assert_nlff_less_or_close(dist, data, params1, params0, rtol=1e-7, atol=0,
356
+ nlff_name='nnlf'):
357
+ nlff = getattr(dist, nlff_name)
358
+ nlff1 = nlff(params1, data)
359
+ nlff0 = nlff(params0, data)
360
+ if not (nlff1 < nlff0):
361
+ np.testing.assert_allclose(nlff1, nlff0, rtol=rtol, atol=atol)
362
+
363
+
364
+ class TestFit:
365
+ dist = stats.binom # type: ignore[attr-defined]
366
+ seed = 654634816187
367
+ rng = np.random.default_rng(seed)
368
+ data = stats.binom.rvs(5, 0.5, size=100, random_state=rng) # type: ignore[attr-defined] # noqa: E501
369
+ shape_bounds_a = [(1, 10), (0, 1)]
370
+ shape_bounds_d = {'n': (1, 10), 'p': (0, 1)}
371
+ atol = 5e-2
372
+ rtol = 1e-2
373
+ tols = {'atol': atol, 'rtol': rtol}
374
+
375
+ def opt(self, *args, **kwds):
376
+ return differential_evolution(*args, seed=0, **kwds)
377
+
378
+ def test_dist_iv(self):
379
+ message = "`dist` must be an instance of..."
380
+ with pytest.raises(ValueError, match=message):
381
+ stats.fit(10, self.data, self.shape_bounds_a)
382
+
383
+ def test_data_iv(self):
384
+ message = "`data` must be exactly one-dimensional."
385
+ with pytest.raises(ValueError, match=message):
386
+ stats.fit(self.dist, [[1, 2, 3]], self.shape_bounds_a)
387
+
388
+ message = "All elements of `data` must be finite numbers."
389
+ with pytest.raises(ValueError, match=message):
390
+ stats.fit(self.dist, [1, 2, 3, np.nan], self.shape_bounds_a)
391
+ with pytest.raises(ValueError, match=message):
392
+ stats.fit(self.dist, [1, 2, 3, np.inf], self.shape_bounds_a)
393
+ with pytest.raises(ValueError, match=message):
394
+ stats.fit(self.dist, ['1', '2', '3'], self.shape_bounds_a)
395
+
396
+ def test_bounds_iv(self):
397
+ message = "Bounds provided for the following unrecognized..."
398
+ shape_bounds = {'n': (1, 10), 'p': (0, 1), '1': (0, 10)}
399
+ with pytest.warns(RuntimeWarning, match=message):
400
+ stats.fit(self.dist, self.data, shape_bounds)
401
+
402
+ message = "Each element of a `bounds` sequence must be a tuple..."
403
+ shape_bounds = [(1, 10, 3), (0, 1)]
404
+ with pytest.raises(ValueError, match=message):
405
+ stats.fit(self.dist, self.data, shape_bounds)
406
+
407
+ message = "Each element of `bounds` must be a tuple specifying..."
408
+ shape_bounds = [(1, 10, 3), (0, 1, 0.5)]
409
+ with pytest.raises(ValueError, match=message):
410
+ stats.fit(self.dist, self.data, shape_bounds)
411
+ shape_bounds = [1, 0]
412
+ with pytest.raises(ValueError, match=message):
413
+ stats.fit(self.dist, self.data, shape_bounds)
414
+
415
+ message = "A `bounds` sequence must contain at least 2 elements..."
416
+ shape_bounds = [(1, 10)]
417
+ with pytest.raises(ValueError, match=message):
418
+ stats.fit(self.dist, self.data, shape_bounds)
419
+
420
+ message = "A `bounds` sequence may not contain more than 3 elements..."
421
+ bounds = [(1, 10), (1, 10), (1, 10), (1, 10)]
422
+ with pytest.raises(ValueError, match=message):
423
+ stats.fit(self.dist, self.data, bounds)
424
+
425
+ message = "There are no values for `p` on the interval..."
426
+ shape_bounds = {'n': (1, 10), 'p': (1, 0)}
427
+ with pytest.raises(ValueError, match=message):
428
+ stats.fit(self.dist, self.data, shape_bounds)
429
+
430
+ message = "There are no values for `n` on the interval..."
431
+ shape_bounds = [(10, 1), (0, 1)]
432
+ with pytest.raises(ValueError, match=message):
433
+ stats.fit(self.dist, self.data, shape_bounds)
434
+
435
+ message = "There are no integer values for `n` on the interval..."
436
+ shape_bounds = [(1.4, 1.6), (0, 1)]
437
+ with pytest.raises(ValueError, match=message):
438
+ stats.fit(self.dist, self.data, shape_bounds)
439
+
440
+ message = "The intersection of user-provided bounds for `n`"
441
+ with pytest.raises(ValueError, match=message):
442
+ stats.fit(self.dist, self.data)
443
+ shape_bounds = [(-np.inf, np.inf), (0, 1)]
444
+ with pytest.raises(ValueError, match=message):
445
+ stats.fit(self.dist, self.data, shape_bounds)
446
+
447
+ def test_guess_iv(self):
448
+ message = "Guesses provided for the following unrecognized..."
449
+ guess = {'n': 1, 'p': 0.5, '1': 255}
450
+ with pytest.warns(RuntimeWarning, match=message):
451
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
452
+
453
+ message = "Each element of `guess` must be a scalar..."
454
+ guess = {'n': 1, 'p': 'hi'}
455
+ with pytest.raises(ValueError, match=message):
456
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
457
+ guess = [1, 'f']
458
+ with pytest.raises(ValueError, match=message):
459
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
460
+ guess = [[1, 2]]
461
+ with pytest.raises(ValueError, match=message):
462
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
463
+
464
+ message = "A `guess` sequence must contain at least 2..."
465
+ guess = [1]
466
+ with pytest.raises(ValueError, match=message):
467
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
468
+
469
+ message = "A `guess` sequence may not contain more than 3..."
470
+ guess = [1, 2, 3, 4]
471
+ with pytest.raises(ValueError, match=message):
472
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
473
+
474
+ message = "Guess for parameter `n` rounded.*|Guess for parameter `p` clipped.*"
475
+ guess = {'n': 4.5, 'p': -0.5}
476
+ with pytest.warns(RuntimeWarning, match=message):
477
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
478
+
479
+ message = "Guess for parameter `loc` rounded..."
480
+ guess = [5, 0.5, 0.5]
481
+ with pytest.warns(RuntimeWarning, match=message):
482
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
483
+
484
+ message = "Guess for parameter `p` clipped..."
485
+ guess = {'n': 5, 'p': -0.5}
486
+ with pytest.warns(RuntimeWarning, match=message):
487
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
488
+
489
+ message = "Guess for parameter `loc` clipped..."
490
+ guess = [5, 0.5, 1]
491
+ with pytest.warns(RuntimeWarning, match=message):
492
+ stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
493
+
494
+ def basic_fit_test(self, dist_name, method):
495
+
496
+ N = 5000
497
+ dist_data = dict(distcont + distdiscrete)
498
+ rng = np.random.default_rng(self.seed)
499
+ dist = getattr(stats, dist_name)
500
+ shapes = np.array(dist_data[dist_name])
501
+ bounds = np.empty((len(shapes) + 2, 2), dtype=np.float64)
502
+ bounds[:-2, 0] = shapes/10.**np.sign(shapes)
503
+ bounds[:-2, 1] = shapes*10.**np.sign(shapes)
504
+ bounds[-2] = (0, 10)
505
+ bounds[-1] = (1e-16, 10)
506
+ loc = rng.uniform(*bounds[-2])
507
+ scale = rng.uniform(*bounds[-1])
508
+ ref = list(dist_data[dist_name]) + [loc, scale]
509
+
510
+ if getattr(dist, 'pmf', False):
511
+ ref = ref[:-1]
512
+ ref[-1] = np.floor(loc)
513
+ data = dist.rvs(*ref, size=N, random_state=rng)
514
+ bounds = bounds[:-1]
515
+ if getattr(dist, 'pdf', False):
516
+ data = dist.rvs(*ref, size=N, random_state=rng)
517
+
518
+ with npt.suppress_warnings() as sup:
519
+ sup.filter(RuntimeWarning, "overflow encountered")
520
+ res = stats.fit(dist, data, bounds, method=method,
521
+ optimizer=self.opt)
522
+
523
+ nlff_names = {'mle': 'nnlf', 'mse': '_penalized_nlpsf'}
524
+ nlff_name = nlff_names[method]
525
+ assert_nlff_less_or_close(dist, data, res.params, ref, **self.tols,
526
+ nlff_name=nlff_name)
527
+
528
+ @pytest.mark.parametrize("dist_name", cases_test_fit_mle())
529
+ def test_basic_fit_mle(self, dist_name):
530
+ self.basic_fit_test(dist_name, "mle")
531
+
532
+ @pytest.mark.parametrize("dist_name", cases_test_fit_mse())
533
+ def test_basic_fit_mse(self, dist_name):
534
+ self.basic_fit_test(dist_name, "mse")
535
+
536
+ def test_arcsine(self):
537
+ # Can't guarantee that all distributions will fit all data with
538
+ # arbitrary bounds. This distribution just happens to fail above.
539
+ # Try something slightly different.
540
+ N = 1000
541
+ rng = np.random.default_rng(self.seed)
542
+ dist = stats.arcsine
543
+ shapes = (1., 2.)
544
+ data = dist.rvs(*shapes, size=N, random_state=rng)
545
+ shape_bounds = {'loc': (0.1, 10), 'scale': (0.1, 10)}
546
+ res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
547
+ assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
548
+
549
+ @pytest.mark.parametrize("method", ('mle', 'mse'))
550
+ def test_argus(self, method):
551
+ # Can't guarantee that all distributions will fit all data with
552
+ # arbitrary bounds. This distribution just happens to fail above.
553
+ # Try something slightly different.
554
+ N = 1000
555
+ rng = np.random.default_rng(self.seed)
556
+ dist = stats.argus
557
+ shapes = (1., 2., 3.)
558
+ data = dist.rvs(*shapes, size=N, random_state=rng)
559
+ shape_bounds = {'chi': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)}
560
+ res = stats.fit(dist, data, shape_bounds, optimizer=self.opt, method=method)
561
+
562
+ assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
563
+
564
+ def test_foldnorm(self):
565
+ # Can't guarantee that all distributions will fit all data with
566
+ # arbitrary bounds. This distribution just happens to fail above.
567
+ # Try something slightly different.
568
+ N = 1000
569
+ rng = np.random.default_rng(self.seed)
570
+ dist = stats.foldnorm
571
+ shapes = (1.952125337355587, 2., 3.)
572
+ data = dist.rvs(*shapes, size=N, random_state=rng)
573
+ shape_bounds = {'c': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)}
574
+ res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
575
+
576
+ assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
577
+
578
+ def test_truncpareto(self):
579
+ # Can't guarantee that all distributions will fit all data with
580
+ # arbitrary bounds. This distribution just happens to fail above.
581
+ # Try something slightly different.
582
+ N = 1000
583
+ rng = np.random.default_rng(self.seed)
584
+ dist = stats.truncpareto
585
+ shapes = (1.8, 5.3, 2.3, 4.1)
586
+ data = dist.rvs(*shapes, size=N, random_state=rng)
587
+ shape_bounds = [(0.1, 10)]*4
588
+ res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
589
+
590
+ assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
591
+
592
+ def test_truncweibull_min(self):
593
+ # Can't guarantee that all distributions will fit all data with
594
+ # arbitrary bounds. This distribution just happens to fail above.
595
+ # Try something slightly different.
596
+ N = 1000
597
+ rng = np.random.default_rng(self.seed)
598
+ dist = stats.truncweibull_min
599
+ shapes = (2.5, 0.25, 1.75, 2., 3.)
600
+ data = dist.rvs(*shapes, size=N, random_state=rng)
601
+ shape_bounds = [(0.1, 10)]*5
602
+ res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
603
+
604
+ assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
605
+
606
+ def test_missing_shape_bounds(self):
607
+ # some distributions have a small domain w.r.t. a parameter, e.g.
608
+ # $p \in [0, 1]$ for binomial distribution
609
+ # User does not need to provide these because the intersection of the
610
+ # user's bounds (none) and the distribution's domain is finite
611
+ N = 1000
612
+ rng = np.random.default_rng(self.seed)
613
+
614
+ dist = stats.binom
615
+ n, p, loc = 10, 0.65, 0
616
+ data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
617
+ shape_bounds = {'n': np.array([0, 20])} # check arrays are OK, too
618
+ res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
619
+ assert_allclose(res.params, (n, p, loc), **self.tols)
620
+
621
+ dist = stats.bernoulli
622
+ p, loc = 0.314159, 0
623
+ data = dist.rvs(p, loc=loc, size=N, random_state=rng)
624
+ res = stats.fit(dist, data, optimizer=self.opt)
625
+ assert_allclose(res.params, (p, loc), **self.tols)
626
+
627
+ def test_fit_only_loc_scale(self):
628
+ # fit only loc
629
+ N = 5000
630
+ rng = np.random.default_rng(self.seed)
631
+
632
+ dist = stats.norm
633
+ loc, scale = 1.5, 1
634
+ data = dist.rvs(loc=loc, size=N, random_state=rng)
635
+ loc_bounds = (0, 5)
636
+ bounds = {'loc': loc_bounds}
637
+ res = stats.fit(dist, data, bounds, optimizer=self.opt)
638
+ assert_allclose(res.params, (loc, scale), **self.tols)
639
+
640
+ # fit only scale
641
+ loc, scale = 0, 2.5
642
+ data = dist.rvs(scale=scale, size=N, random_state=rng)
643
+ scale_bounds = (0.01, 5)
644
+ bounds = {'scale': scale_bounds}
645
+ res = stats.fit(dist, data, bounds, optimizer=self.opt)
646
+ assert_allclose(res.params, (loc, scale), **self.tols)
647
+
648
+ # fit only loc and scale
649
+ dist = stats.norm
650
+ loc, scale = 1.5, 2.5
651
+ data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
652
+ bounds = {'loc': loc_bounds, 'scale': scale_bounds}
653
+ res = stats.fit(dist, data, bounds, optimizer=self.opt)
654
+ assert_allclose(res.params, (loc, scale), **self.tols)
655
+
656
+ def test_everything_fixed(self):
657
+ N = 5000
658
+ rng = np.random.default_rng(self.seed)
659
+
660
+ dist = stats.norm
661
+ loc, scale = 1.5, 2.5
662
+ data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
663
+
664
+ # loc, scale fixed to 0, 1 by default
665
+ res = stats.fit(dist, data)
666
+ assert_allclose(res.params, (0, 1), **self.tols)
667
+
668
+ # loc, scale explicitly fixed
669
+ bounds = {'loc': (loc, loc), 'scale': (scale, scale)}
670
+ res = stats.fit(dist, data, bounds)
671
+ assert_allclose(res.params, (loc, scale), **self.tols)
672
+
673
+ # `n` gets fixed during polishing
674
+ dist = stats.binom
675
+ n, p, loc = 10, 0.65, 0
676
+ data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
677
+ shape_bounds = {'n': (0, 20), 'p': (0.65, 0.65)}
678
+ res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
679
+ assert_allclose(res.params, (n, p, loc), **self.tols)
680
+
681
+ def test_failure(self):
682
+ N = 5000
683
+ rng = np.random.default_rng(self.seed)
684
+
685
+ dist = stats.nbinom
686
+ shapes = (5, 0.5)
687
+ data = dist.rvs(*shapes, size=N, random_state=rng)
688
+
689
+ assert data.min() == 0
690
+ # With lower bounds on location at 0.5, likelihood is zero
691
+ bounds = [(0, 30), (0, 1), (0.5, 10)]
692
+ res = stats.fit(dist, data, bounds)
693
+ message = "Optimization converged to parameter values that are"
694
+ assert res.message.startswith(message)
695
+ assert res.success is False
696
+
697
+ @pytest.mark.xslow
698
+ def test_guess(self):
699
+ # Test that guess helps DE find the desired solution
700
+ N = 2000
701
+ # With some seeds, `fit` doesn't need a guess
702
+ rng = np.random.default_rng(196390444561)
703
+ dist = stats.nhypergeom
704
+ params = (20, 7, 12, 0)
705
+ bounds = [(2, 200), (0.7, 70), (1.2, 120), (0, 10)]
706
+
707
+ data = dist.rvs(*params, size=N, random_state=rng)
708
+
709
+ res = stats.fit(dist, data, bounds, optimizer=self.opt)
710
+ assert not np.allclose(res.params, params, **self.tols)
711
+
712
+ res = stats.fit(dist, data, bounds, guess=params, optimizer=self.opt)
713
+ assert_allclose(res.params, params, **self.tols)
714
+
715
+ def test_mse_accuracy_1(self):
716
+ # Test maximum spacing estimation against example from Wikipedia
717
+ # https://en.wikipedia.org/wiki/Maximum_spacing_estimation#Examples
718
+ data = [2, 4]
719
+ dist = stats.expon
720
+ bounds = {'loc': (0, 0), 'scale': (1e-8, 10)}
721
+ res_mle = stats.fit(dist, data, bounds=bounds, method='mle')
722
+ assert_allclose(res_mle.params.scale, 3, atol=1e-3)
723
+ res_mse = stats.fit(dist, data, bounds=bounds, method='mse')
724
+ assert_allclose(res_mse.params.scale, 3.915, atol=1e-3)
725
+
726
+ def test_mse_accuracy_2(self):
727
+ # Test maximum spacing estimation against example from Wikipedia
728
+ # https://en.wikipedia.org/wiki/Maximum_spacing_estimation#Examples
729
+ rng = np.random.default_rng(9843212616816518964)
730
+
731
+ dist = stats.uniform
732
+ n = 10
733
+ data = dist(3, 6).rvs(size=n, random_state=rng)
734
+ bounds = {'loc': (0, 10), 'scale': (1e-8, 10)}
735
+ res = stats.fit(dist, data, bounds=bounds, method='mse')
736
+ # (loc=3.608118420015416, scale=5.509323262055043)
737
+
738
+ x = np.sort(data)
739
+ a = (n*x[0] - x[-1])/(n - 1)
740
+ b = (n*x[-1] - x[0])/(n - 1)
741
+ ref = a, b-a # (3.6081133632151503, 5.509328130317254)
742
+ assert_allclose(res.params, ref, rtol=1e-4)
743
+
744
+
745
+ # Data from Matlab: https://www.mathworks.com/help/stats/lillietest.html
746
+ examgrades = [65, 61, 81, 88, 69, 89, 55, 84, 86, 84, 71, 81, 84, 81, 78, 67,
747
+ 96, 66, 73, 75, 59, 71, 69, 63, 79, 76, 63, 85, 87, 88, 80, 71,
748
+ 65, 84, 71, 75, 81, 79, 64, 65, 84, 77, 70, 75, 84, 75, 73, 92,
749
+ 90, 79, 80, 71, 73, 71, 58, 79, 73, 64, 77, 82, 81, 59, 54, 82,
750
+ 57, 79, 79, 73, 74, 82, 63, 64, 73, 69, 87, 68, 81, 73, 83, 73,
751
+ 80, 73, 73, 71, 66, 78, 64, 74, 68, 67, 75, 75, 80, 85, 74, 76,
752
+ 80, 77, 93, 70, 86, 80, 81, 83, 68, 60, 85, 64, 74, 82, 81, 77,
753
+ 66, 85, 75, 81, 69, 60, 83, 72]
754
+
755
+
756
+ class TestGoodnessOfFit:
757
+
758
+ def test_gof_iv(self):
759
+ dist = stats.norm
760
+ x = [1, 2, 3]
761
+
762
+ message = r"`dist` must be a \(non-frozen\) instance of..."
763
+ with pytest.raises(TypeError, match=message):
764
+ goodness_of_fit(stats.norm(), x)
765
+
766
+ message = "`data` must be a one-dimensional array of numbers."
767
+ with pytest.raises(ValueError, match=message):
768
+ goodness_of_fit(dist, [[1, 2, 3]])
769
+
770
+ message = "`statistic` must be one of..."
771
+ with pytest.raises(ValueError, match=message):
772
+ goodness_of_fit(dist, x, statistic='mm')
773
+
774
+ message = "`n_mc_samples` must be an integer."
775
+ with pytest.raises(TypeError, match=message):
776
+ goodness_of_fit(dist, x, n_mc_samples=1000.5)
777
+
778
+ message = "'herring' cannot be used to seed a"
779
+ with pytest.raises(ValueError, match=message):
780
+ goodness_of_fit(dist, x, random_state='herring')
781
+
782
+ def test_against_ks(self):
783
+ rng = np.random.default_rng(8517426291317196949)
784
+ x = examgrades
785
+ known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
786
+ res = goodness_of_fit(stats.norm, x, known_params=known_params,
787
+ statistic='ks', random_state=rng)
788
+ ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact')
789
+ assert_allclose(res.statistic, ref.statistic) # ~0.0848
790
+ assert_allclose(res.pvalue, ref.pvalue, atol=5e-3) # ~0.335
791
+
792
+ def test_against_lilliefors(self):
793
+ rng = np.random.default_rng(2291803665717442724)
794
+ x = examgrades
795
+ res = goodness_of_fit(stats.norm, x, statistic='ks', random_state=rng)
796
+ known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
797
+ ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact')
798
+ assert_allclose(res.statistic, ref.statistic) # ~0.0848
799
+ assert_allclose(res.pvalue, 0.0348, atol=5e-3)
800
+
801
+ def test_against_cvm(self):
802
+ rng = np.random.default_rng(8674330857509546614)
803
+ x = examgrades
804
+ known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
805
+ res = goodness_of_fit(stats.norm, x, known_params=known_params,
806
+ statistic='cvm', random_state=rng)
807
+ ref = stats.cramervonmises(x, stats.norm(**known_params).cdf)
808
+ assert_allclose(res.statistic, ref.statistic) # ~0.090
809
+ assert_allclose(res.pvalue, ref.pvalue, atol=5e-3) # ~0.636
810
+
811
+ def test_against_anderson_case_0(self):
812
+ # "Case 0" is where loc and scale are known [1]
813
+ rng = np.random.default_rng(7384539336846690410)
814
+ x = np.arange(1, 101)
815
+ # loc that produced critical value of statistic found w/ root_scalar
816
+ known_params = {'loc': 45.01575354024957, 'scale': 30}
817
+ res = goodness_of_fit(stats.norm, x, known_params=known_params,
818
+ statistic='ad', random_state=rng)
819
+ assert_allclose(res.statistic, 2.492) # See [1] Table 1A 1.0
820
+ assert_allclose(res.pvalue, 0.05, atol=5e-3)
821
+
822
+ def test_against_anderson_case_1(self):
823
+ # "Case 1" is where scale is known and loc is fit [1]
824
+ rng = np.random.default_rng(5040212485680146248)
825
+ x = np.arange(1, 101)
826
+ # scale that produced critical value of statistic found w/ root_scalar
827
+ known_params = {'scale': 29.957112639101933}
828
+ res = goodness_of_fit(stats.norm, x, known_params=known_params,
829
+ statistic='ad', random_state=rng)
830
+ assert_allclose(res.statistic, 0.908) # See [1] Table 1B 1.1
831
+ assert_allclose(res.pvalue, 0.1, atol=5e-3)
832
+
833
+ def test_against_anderson_case_2(self):
834
+ # "Case 2" is where loc is known and scale is fit [1]
835
+ rng = np.random.default_rng(726693985720914083)
836
+ x = np.arange(1, 101)
837
+ # loc that produced critical value of statistic found w/ root_scalar
838
+ known_params = {'loc': 44.5680212261933}
839
+ res = goodness_of_fit(stats.norm, x, known_params=known_params,
840
+ statistic='ad', random_state=rng)
841
+ assert_allclose(res.statistic, 2.904) # See [1] Table 1B 1.2
842
+ assert_allclose(res.pvalue, 0.025, atol=5e-3)
843
+
844
+ def test_against_anderson_case_3(self):
845
+ # "Case 3" is where both loc and scale are fit [1]
846
+ rng = np.random.default_rng(6763691329830218206)
847
+ # c that produced critical value of statistic found w/ root_scalar
848
+ x = stats.skewnorm.rvs(1.4477847789132101, loc=1, scale=2, size=100,
849
+ random_state=rng)
850
+ res = goodness_of_fit(stats.norm, x, statistic='ad', random_state=rng)
851
+ assert_allclose(res.statistic, 0.559) # See [1] Table 1B 1.2
852
+ assert_allclose(res.pvalue, 0.15, atol=5e-3)
853
+
854
+ @pytest.mark.xslow
855
+ def test_against_anderson_gumbel_r(self):
856
+ rng = np.random.default_rng(7302761058217743)
857
+ # c that produced critical value of statistic found w/ root_scalar
858
+ x = stats.genextreme(0.051896837188595134, loc=0.5,
859
+ scale=1.5).rvs(size=1000, random_state=rng)
860
+ res = goodness_of_fit(stats.gumbel_r, x, statistic='ad',
861
+ random_state=rng)
862
+ ref = stats.anderson(x, dist='gumbel_r')
863
+ assert_allclose(res.statistic, ref.critical_values[0])
864
+ assert_allclose(res.pvalue, ref.significance_level[0]/100, atol=5e-3)
865
+
866
+ def test_against_filliben_norm(self):
867
+ # Test against `stats.fit` ref. [7] Section 8 "Example"
868
+ rng = np.random.default_rng(8024266430745011915)
869
+ y = [6, 1, -4, 8, -2, 5, 0]
870
+ known_params = {'loc': 0, 'scale': 1}
871
+ res = stats.goodness_of_fit(stats.norm, y, known_params=known_params,
872
+ statistic="filliben", random_state=rng)
873
+ # Slight discrepancy presumably due to roundoff in Filliben's
874
+ # calculation. Using exact order statistic medians instead of
875
+ # Filliben's approximation doesn't account for it.
876
+ assert_allclose(res.statistic, 0.98538, atol=1e-4)
877
+ assert 0.75 < res.pvalue < 0.9
878
+
879
+ # Using R's ppcc library:
880
+ # library(ppcc)
881
+ # options(digits=16)
882
+ # x < - c(6, 1, -4, 8, -2, 5, 0)
883
+ # set.seed(100)
884
+ # ppccTest(x, "qnorm", ppos="Filliben")
885
+ # Discrepancy with
886
+ assert_allclose(res.statistic, 0.98540957187084, rtol=2e-5)
887
+ assert_allclose(res.pvalue, 0.8875, rtol=2e-3)
888
+
889
+ def test_filliben_property(self):
890
+ # Filliben's statistic should be independent of data location and scale
891
+ rng = np.random.default_rng(8535677809395478813)
892
+ x = rng.normal(loc=10, scale=0.5, size=100)
893
+ res = stats.goodness_of_fit(stats.norm, x,
894
+ statistic="filliben", random_state=rng)
895
+ known_params = {'loc': 0, 'scale': 1}
896
+ ref = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
897
+ statistic="filliben", random_state=rng)
898
+ assert_allclose(res.statistic, ref.statistic, rtol=1e-15)
899
+
900
+ @pytest.mark.parametrize('case', [(25, [.928, .937, .950, .958, .966]),
901
+ (50, [.959, .965, .972, .977, .981]),
902
+ (95, [.977, .979, .983, .986, .989])])
903
+ def test_against_filliben_norm_table(self, case):
904
+ # Test against `stats.fit` ref. [7] Table 1
905
+ rng = np.random.default_rng(504569995557928957)
906
+ n, ref = case
907
+ x = rng.random(n)
908
+ known_params = {'loc': 0, 'scale': 1}
909
+ res = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
910
+ statistic="filliben", random_state=rng)
911
+ percentiles = np.array([0.005, 0.01, 0.025, 0.05, 0.1])
912
+ res = stats.scoreatpercentile(res.null_distribution, percentiles*100)
913
+ assert_allclose(res, ref, atol=2e-3)
914
+
915
+ @pytest.mark.xslow
916
+ @pytest.mark.parametrize('case', [(5, 0.95772790260469, 0.4755),
917
+ (6, 0.95398832257958, 0.3848),
918
+ (7, 0.9432692889277, 0.2328)])
919
+ def test_against_ppcc(self, case):
920
+ # Test against R ppcc, e.g.
921
+ # library(ppcc)
922
+ # options(digits=16)
923
+ # x < - c(0.52325412, 1.06907699, -0.36084066, 0.15305959, 0.99093194)
924
+ # set.seed(100)
925
+ # ppccTest(x, "qrayleigh", ppos="Filliben")
926
+ n, ref_statistic, ref_pvalue = case
927
+ rng = np.random.default_rng(7777775561439803116)
928
+ x = rng.normal(size=n)
929
+ res = stats.goodness_of_fit(stats.rayleigh, x, statistic="filliben",
930
+ random_state=rng)
931
+ assert_allclose(res.statistic, ref_statistic, rtol=1e-4)
932
+ assert_allclose(res.pvalue, ref_pvalue, atol=1.5e-2)
933
+
934
+ def test_params_effects(self):
935
+ # Ensure that `guessed_params`, `fit_params`, and `known_params` have
936
+ # the intended effects.
937
+ rng = np.random.default_rng(9121950977643805391)
938
+ x = stats.skewnorm.rvs(-5.044559778383153, loc=1, scale=2, size=50,
939
+ random_state=rng)
940
+
941
+ # Show that `guessed_params` don't fit to the guess,
942
+ # but `fit_params` and `known_params` respect the provided fit
943
+ guessed_params = {'c': 13.4}
944
+ fit_params = {'scale': 13.73}
945
+ known_params = {'loc': -13.85}
946
+ rng = np.random.default_rng(9121950977643805391)
947
+ res1 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
948
+ guessed_params=guessed_params,
949
+ fit_params=fit_params,
950
+ known_params=known_params, random_state=rng)
951
+ assert not np.allclose(res1.fit_result.params.c, 13.4)
952
+ assert_equal(res1.fit_result.params.scale, 13.73)
953
+ assert_equal(res1.fit_result.params.loc, -13.85)
954
+
955
+ # Show that changing the guess changes the parameter that gets fit,
956
+ # and it changes the null distribution
957
+ guessed_params = {'c': 2}
958
+ rng = np.random.default_rng(9121950977643805391)
959
+ res2 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
960
+ guessed_params=guessed_params,
961
+ fit_params=fit_params,
962
+ known_params=known_params, random_state=rng)
963
+ assert not np.allclose(res2.fit_result.params.c,
964
+ res1.fit_result.params.c, rtol=1e-8)
965
+ assert not np.allclose(res2.null_distribution,
966
+ res1.null_distribution, rtol=1e-8)
967
+ assert_equal(res2.fit_result.params.scale, 13.73)
968
+ assert_equal(res2.fit_result.params.loc, -13.85)
969
+
970
+ # If we set all parameters as fit_params and known_params,
971
+ # they're all fixed to those values, but the null distribution
972
+ # varies.
973
+ fit_params = {'c': 13.4, 'scale': 13.73}
974
+ rng = np.random.default_rng(9121950977643805391)
975
+ res3 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
976
+ guessed_params=guessed_params,
977
+ fit_params=fit_params,
978
+ known_params=known_params, random_state=rng)
979
+ assert_equal(res3.fit_result.params.c, 13.4)
980
+ assert_equal(res3.fit_result.params.scale, 13.73)
981
+ assert_equal(res3.fit_result.params.loc, -13.85)
982
+ assert not np.allclose(res3.null_distribution, res1.null_distribution)
983
+
984
+ def test_custom_statistic(self):
985
+ # Test support for custom statistic function.
986
+
987
+ # References:
988
+ # [1] Pyke, R. (1965). "Spacings". Journal of the Royal Statistical
989
+ # Society: Series B (Methodological), 27(3): 395-436.
990
+ # [2] Burrows, P. M. (1979). "Selected Percentage Points of
991
+ # Greenwood's Statistics". Journal of the Royal Statistical
992
+ # Society. Series A (General), 142(2): 256-258.
993
+
994
+ # Use the Greenwood statistic for illustration; see [1, p.402].
995
+ def greenwood(dist, data, *, axis):
996
+ x = np.sort(data, axis=axis)
997
+ y = dist.cdf(x)
998
+ d = np.diff(y, axis=axis, prepend=0, append=1)
999
+ return np.sum(d ** 2, axis=axis)
1000
+
1001
+ # Run the Monte Carlo test with sample size = 5 on a fully specified
1002
+ # null distribution, and compare the simulated quantiles to the exact
1003
+ # ones given in [2, Table 1, column (n = 5)].
1004
+ rng = np.random.default_rng(9121950977643805391)
1005
+ data = stats.expon.rvs(size=5, random_state=rng)
1006
+ result = goodness_of_fit(stats.expon, data,
1007
+ known_params={'loc': 0, 'scale': 1},
1008
+ statistic=greenwood, random_state=rng)
1009
+ p = [.01, .05, .1, .2, .3, .4, .5, .6, .7, .8, .9, .95, .99]
1010
+ exact_quantiles = [
1011
+ .183863, .199403, .210088, .226040, .239947, .253677, .268422,
1012
+ .285293, .306002, .334447, .382972, .432049, .547468]
1013
+ simulated_quantiles = np.quantile(result.null_distribution, p)
1014
+ assert_allclose(simulated_quantiles, exact_quantiles, atol=0.005)
1015
+
1016
+ class TestFitResult:
1017
+ def test_plot_iv(self):
1018
+ rng = np.random.default_rng(1769658657308472721)
1019
+ data = stats.norm.rvs(0, 1, size=100, random_state=rng)
1020
+
1021
+ def optimizer(*args, **kwargs):
1022
+ return differential_evolution(*args, **kwargs, seed=rng)
1023
+
1024
+ bounds = [(0, 30), (0, 1)]
1025
+ res = stats.fit(stats.norm, data, bounds, optimizer=optimizer)
1026
+ try:
1027
+ import matplotlib # noqa: F401
1028
+ message = r"`plot_type` must be one of \{'..."
1029
+ with pytest.raises(ValueError, match=message):
1030
+ res.plot(plot_type='llama')
1031
+ except (ModuleNotFoundError, ImportError):
1032
+ # Avoid trying to call MPL with numpy 2.0-dev, because that fails
1033
+ # too often due to ABI mismatches and is hard to avoid. This test
1034
+ # will work fine again once MPL has done a 2.0-compatible release.
1035
+ if not np.__version__.startswith('2.0.0.dev0'):
1036
+ message = r"matplotlib must be installed to use method `plot`."
1037
+ with pytest.raises(ModuleNotFoundError, match=message):
1038
+ res.plot(plot_type='llama')
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_hypotests.py ADDED
@@ -0,0 +1,1857 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+
3
+ import numpy as np
4
+ import random
5
+ import functools
6
+ import pytest
7
+ from numpy.testing import (assert_, assert_equal, assert_allclose,
8
+ assert_almost_equal) # avoid new uses
9
+ from pytest import raises as assert_raises
10
+
11
+ import scipy.stats as stats
12
+ from scipy.stats import distributions
13
+ from scipy.stats._hypotests import (epps_singleton_2samp, cramervonmises,
14
+ _cdf_cvm, cramervonmises_2samp,
15
+ _pval_cvm_2samp_exact, barnard_exact,
16
+ boschloo_exact)
17
+ from scipy.stats._mannwhitneyu import mannwhitneyu, _mwu_state
18
+ from .common_tests import check_named_results
19
+ from scipy._lib._testutils import _TestPythranFunc
20
+ from scipy.stats._axis_nan_policy import SmallSampleWarning, too_small_1d_not_omit
21
+
22
+
23
+ class TestEppsSingleton:
24
+ def test_statistic_1(self):
25
+ # first example in Goerg & Kaiser, also in original paper of
26
+ # Epps & Singleton. Note: values do not match exactly, the
27
+ # value of the interquartile range varies depending on how
28
+ # quantiles are computed
29
+ x = np.array([-0.35, 2.55, 1.73, 0.73, 0.35,
30
+ 2.69, 0.46, -0.94, -0.37, 12.07])
31
+ y = np.array([-1.15, -0.15, 2.48, 3.25, 3.71,
32
+ 4.29, 5.00, 7.74, 8.38, 8.60])
33
+ w, p = epps_singleton_2samp(x, y)
34
+ assert_almost_equal(w, 15.14, decimal=1)
35
+ assert_almost_equal(p, 0.00442, decimal=3)
36
+
37
+ def test_statistic_2(self):
38
+ # second example in Goerg & Kaiser, again not a perfect match
39
+ x = np.array((0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 5, 5, 6, 10,
40
+ 10, 10, 10))
41
+ y = np.array((10, 4, 0, 5, 10, 10, 0, 5, 6, 7, 10, 3, 1, 7, 0, 8, 1,
42
+ 5, 8, 10))
43
+ w, p = epps_singleton_2samp(x, y)
44
+ assert_allclose(w, 8.900, atol=0.001)
45
+ assert_almost_equal(p, 0.06364, decimal=3)
46
+
47
+ def test_epps_singleton_array_like(self):
48
+ np.random.seed(1234)
49
+ x, y = np.arange(30), np.arange(28)
50
+
51
+ w1, p1 = epps_singleton_2samp(list(x), list(y))
52
+ w2, p2 = epps_singleton_2samp(tuple(x), tuple(y))
53
+ w3, p3 = epps_singleton_2samp(x, y)
54
+
55
+ assert_(w1 == w2 == w3)
56
+ assert_(p1 == p2 == p3)
57
+
58
+ def test_epps_singleton_size(self):
59
+ # warns if sample contains fewer than 5 elements
60
+ x, y = (1, 2, 3, 4), np.arange(10)
61
+ with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
62
+ res = epps_singleton_2samp(x, y)
63
+ assert_equal(res.statistic, np.nan)
64
+ assert_equal(res.pvalue, np.nan)
65
+
66
+ def test_epps_singleton_nonfinite(self):
67
+ # raise error if there are non-finite values
68
+ x, y = (1, 2, 3, 4, 5, np.inf), np.arange(10)
69
+ assert_raises(ValueError, epps_singleton_2samp, x, y)
70
+
71
+ def test_names(self):
72
+ x, y = np.arange(20), np.arange(30)
73
+ res = epps_singleton_2samp(x, y)
74
+ attributes = ('statistic', 'pvalue')
75
+ check_named_results(res, attributes)
76
+
77
+
78
+ class TestCvm:
79
+ # the expected values of the cdfs are taken from Table 1 in
80
+ # Csorgo / Faraway: The Exact and Asymptotic Distribution of
81
+ # Cramér-von Mises Statistics, 1996.
82
+ def test_cdf_4(self):
83
+ assert_allclose(
84
+ _cdf_cvm([0.02983, 0.04111, 0.12331, 0.94251], 4),
85
+ [0.01, 0.05, 0.5, 0.999],
86
+ atol=1e-4)
87
+
88
+ def test_cdf_10(self):
89
+ assert_allclose(
90
+ _cdf_cvm([0.02657, 0.03830, 0.12068, 0.56643], 10),
91
+ [0.01, 0.05, 0.5, 0.975],
92
+ atol=1e-4)
93
+
94
+ def test_cdf_1000(self):
95
+ assert_allclose(
96
+ _cdf_cvm([0.02481, 0.03658, 0.11889, 1.16120], 1000),
97
+ [0.01, 0.05, 0.5, 0.999],
98
+ atol=1e-4)
99
+
100
+ def test_cdf_inf(self):
101
+ assert_allclose(
102
+ _cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204]),
103
+ [0.01, 0.05, 0.5, 0.999],
104
+ atol=1e-4)
105
+
106
+ def test_cdf_support(self):
107
+ # cdf has support on [1/(12*n), n/3]
108
+ assert_equal(_cdf_cvm([1/(12*533), 533/3], 533), [0, 1])
109
+ assert_equal(_cdf_cvm([1/(12*(27 + 1)), (27 + 1)/3], 27), [0, 1])
110
+
111
+ def test_cdf_large_n(self):
112
+ # test that asymptotic cdf and cdf for large samples are close
113
+ assert_allclose(
114
+ _cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100], 10000),
115
+ _cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100]),
116
+ atol=1e-4)
117
+
118
+ def test_large_x(self):
119
+ # for large values of x and n, the series used to compute the cdf
120
+ # converges slowly.
121
+ # this leads to bug in R package goftest and MAPLE code that is
122
+ # the basis of the implementation in scipy
123
+ # note: cdf = 1 for x >= 1000/3 and n = 1000
124
+ assert_(0.99999 < _cdf_cvm(333.3, 1000) < 1.0)
125
+ assert_(0.99999 < _cdf_cvm(333.3) < 1.0)
126
+
127
+ def test_low_p(self):
128
+ # _cdf_cvm can return values larger than 1. In that case, we just
129
+ # return a p-value of zero.
130
+ n = 12
131
+ res = cramervonmises(np.ones(n)*0.8, 'norm')
132
+ assert_(_cdf_cvm(res.statistic, n) > 1.0)
133
+ assert_equal(res.pvalue, 0)
134
+
135
+ @pytest.mark.parametrize('x', [(), [1.5]])
136
+ def test_invalid_input(self, x):
137
+ with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
138
+ res = cramervonmises(x, "norm")
139
+ assert_equal(res.statistic, np.nan)
140
+ assert_equal(res.pvalue, np.nan)
141
+
142
+ def test_values_R(self):
143
+ # compared against R package goftest, version 1.1.1
144
+ # goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6), "pnorm")
145
+ res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm")
146
+ assert_allclose(res.statistic, 0.288156, atol=1e-6)
147
+ assert_allclose(res.pvalue, 0.1453465, atol=1e-6)
148
+
149
+ # goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6),
150
+ # "pnorm", mean = 3, sd = 1.5)
151
+ res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm", (3, 1.5))
152
+ assert_allclose(res.statistic, 0.9426685, atol=1e-6)
153
+ assert_allclose(res.pvalue, 0.002026417, atol=1e-6)
154
+
155
+ # goftest::cvm.test(c(1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5), "pexp")
156
+ res = cramervonmises([1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5], "expon")
157
+ assert_allclose(res.statistic, 0.8421854, atol=1e-6)
158
+ assert_allclose(res.pvalue, 0.004433406, atol=1e-6)
159
+
160
+ def test_callable_cdf(self):
161
+ x, args = np.arange(5), (1.4, 0.7)
162
+ r1 = cramervonmises(x, distributions.expon.cdf)
163
+ r2 = cramervonmises(x, "expon")
164
+ assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
165
+
166
+ r1 = cramervonmises(x, distributions.beta.cdf, args)
167
+ r2 = cramervonmises(x, "beta", args)
168
+ assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
169
+
170
+
171
+ class TestMannWhitneyU:
172
+
173
+ # All magic numbers are from R wilcox.test unless otherwise specified
174
+ # https://rdrr.io/r/stats/wilcox.test.html
175
+
176
+ # --- Test Input Validation ---
177
+
178
+ @pytest.mark.parametrize('kwargs_update', [{'x': []}, {'y': []},
179
+ {'x': [], 'y': []}])
180
+ def test_empty(self, kwargs_update):
181
+ x = np.array([1, 2]) # generic, valid inputs
182
+ y = np.array([3, 4])
183
+ kwargs = dict(x=x, y=y)
184
+ kwargs.update(kwargs_update)
185
+ with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
186
+ res = mannwhitneyu(**kwargs)
187
+ assert_equal(res.statistic, np.nan)
188
+ assert_equal(res.pvalue, np.nan)
189
+
190
+ def test_input_validation(self):
191
+ x = np.array([1, 2]) # generic, valid inputs
192
+ y = np.array([3, 4])
193
+ with assert_raises(ValueError, match="`use_continuity` must be one"):
194
+ mannwhitneyu(x, y, use_continuity='ekki')
195
+ with assert_raises(ValueError, match="`alternative` must be one of"):
196
+ mannwhitneyu(x, y, alternative='ekki')
197
+ with assert_raises(ValueError, match="`axis` must be an integer"):
198
+ mannwhitneyu(x, y, axis=1.5)
199
+ with assert_raises(ValueError, match="`method` must be one of"):
200
+ mannwhitneyu(x, y, method='ekki')
201
+
202
+ def test_auto(self):
203
+ # Test that default method ('auto') chooses intended method
204
+
205
+ np.random.seed(1)
206
+ n = 8 # threshold to switch from exact to asymptotic
207
+
208
+ # both inputs are smaller than threshold; should use exact
209
+ x = np.random.rand(n-1)
210
+ y = np.random.rand(n-1)
211
+ auto = mannwhitneyu(x, y)
212
+ asymptotic = mannwhitneyu(x, y, method='asymptotic')
213
+ exact = mannwhitneyu(x, y, method='exact')
214
+ assert auto.pvalue == exact.pvalue
215
+ assert auto.pvalue != asymptotic.pvalue
216
+
217
+ # one input is smaller than threshold; should use exact
218
+ x = np.random.rand(n-1)
219
+ y = np.random.rand(n+1)
220
+ auto = mannwhitneyu(x, y)
221
+ asymptotic = mannwhitneyu(x, y, method='asymptotic')
222
+ exact = mannwhitneyu(x, y, method='exact')
223
+ assert auto.pvalue == exact.pvalue
224
+ assert auto.pvalue != asymptotic.pvalue
225
+
226
+ # other input is smaller than threshold; should use exact
227
+ auto = mannwhitneyu(y, x)
228
+ asymptotic = mannwhitneyu(x, y, method='asymptotic')
229
+ exact = mannwhitneyu(x, y, method='exact')
230
+ assert auto.pvalue == exact.pvalue
231
+ assert auto.pvalue != asymptotic.pvalue
232
+
233
+ # both inputs are larger than threshold; should use asymptotic
234
+ x = np.random.rand(n+1)
235
+ y = np.random.rand(n+1)
236
+ auto = mannwhitneyu(x, y)
237
+ asymptotic = mannwhitneyu(x, y, method='asymptotic')
238
+ exact = mannwhitneyu(x, y, method='exact')
239
+ assert auto.pvalue != exact.pvalue
240
+ assert auto.pvalue == asymptotic.pvalue
241
+
242
+ # both inputs are smaller than threshold, but there is a tie
243
+ # should use asymptotic
244
+ x = np.random.rand(n-1)
245
+ y = np.random.rand(n-1)
246
+ y[3] = x[3]
247
+ auto = mannwhitneyu(x, y)
248
+ asymptotic = mannwhitneyu(x, y, method='asymptotic')
249
+ exact = mannwhitneyu(x, y, method='exact')
250
+ assert auto.pvalue != exact.pvalue
251
+ assert auto.pvalue == asymptotic.pvalue
252
+
253
+ # --- Test Basic Functionality ---
254
+
255
+ x = [210.052110, 110.190630, 307.918612]
256
+ y = [436.08811482466416, 416.37397329768191, 179.96975939463582,
257
+ 197.8118754228619, 34.038757281225756, 138.54220550921517,
258
+ 128.7769351470246, 265.92721427951852, 275.6617533155341,
259
+ 592.34083395416258, 448.73177590617018, 300.61495185038905,
260
+ 187.97508449019588]
261
+
262
+ # This test was written for mann_whitney_u in gh-4933.
263
+ # Originally, the p-values for alternatives were swapped;
264
+ # this has been corrected and the tests have been refactored for
265
+ # compactness, but otherwise the tests are unchanged.
266
+ # R code for comparison, e.g.:
267
+ # options(digits = 16)
268
+ # x = c(210.052110, 110.190630, 307.918612)
269
+ # y = c(436.08811482466416, 416.37397329768191, 179.96975939463582,
270
+ # 197.8118754228619, 34.038757281225756, 138.54220550921517,
271
+ # 128.7769351470246, 265.92721427951852, 275.6617533155341,
272
+ # 592.34083395416258, 448.73177590617018, 300.61495185038905,
273
+ # 187.97508449019588)
274
+ # wilcox.test(x, y, alternative="g", exact=TRUE)
275
+ cases_basic = [[{"alternative": 'two-sided', "method": "asymptotic"},
276
+ (16, 0.6865041817876)],
277
+ [{"alternative": 'less', "method": "asymptotic"},
278
+ (16, 0.3432520908938)],
279
+ [{"alternative": 'greater', "method": "asymptotic"},
280
+ (16, 0.7047591913255)],
281
+ [{"alternative": 'two-sided', "method": "exact"},
282
+ (16, 0.7035714285714)],
283
+ [{"alternative": 'less', "method": "exact"},
284
+ (16, 0.3517857142857)],
285
+ [{"alternative": 'greater', "method": "exact"},
286
+ (16, 0.6946428571429)]]
287
+
288
+ @pytest.mark.parametrize(("kwds", "expected"), cases_basic)
289
+ def test_basic(self, kwds, expected):
290
+ res = mannwhitneyu(self.x, self.y, **kwds)
291
+ assert_allclose(res, expected)
292
+
293
+ cases_continuity = [[{"alternative": 'two-sided', "use_continuity": True},
294
+ (23, 0.6865041817876)],
295
+ [{"alternative": 'less', "use_continuity": True},
296
+ (23, 0.7047591913255)],
297
+ [{"alternative": 'greater', "use_continuity": True},
298
+ (23, 0.3432520908938)],
299
+ [{"alternative": 'two-sided', "use_continuity": False},
300
+ (23, 0.6377328900502)],
301
+ [{"alternative": 'less', "use_continuity": False},
302
+ (23, 0.6811335549749)],
303
+ [{"alternative": 'greater', "use_continuity": False},
304
+ (23, 0.3188664450251)]]
305
+
306
+ @pytest.mark.parametrize(("kwds", "expected"), cases_continuity)
307
+ def test_continuity(self, kwds, expected):
308
+ # When x and y are interchanged, less and greater p-values should
309
+ # swap (compare to above). This wouldn't happen if the continuity
310
+ # correction were applied in the wrong direction. Note that less and
311
+ # greater p-values do not sum to 1 when continuity correction is on,
312
+ # which is what we'd expect. Also check that results match R when
313
+ # continuity correction is turned off.
314
+ # Note that method='asymptotic' -> exact=FALSE
315
+ # and use_continuity=False -> correct=FALSE, e.g.:
316
+ # wilcox.test(x, y, alternative="t", exact=FALSE, correct=FALSE)
317
+ res = mannwhitneyu(self.y, self.x, method='asymptotic', **kwds)
318
+ assert_allclose(res, expected)
319
+
320
+ def test_tie_correct(self):
321
+ # Test tie correction against R's wilcox.test
322
+ # options(digits = 16)
323
+ # x = c(1, 2, 3, 4)
324
+ # y = c(1, 2, 3, 4, 5)
325
+ # wilcox.test(x, y, exact=FALSE)
326
+ x = [1, 2, 3, 4]
327
+ y0 = np.array([1, 2, 3, 4, 5])
328
+ dy = np.array([0, 1, 0, 1, 0])*0.01
329
+ dy2 = np.array([0, 0, 1, 0, 0])*0.01
330
+ y = [y0-0.01, y0-dy, y0-dy2, y0, y0+dy2, y0+dy, y0+0.01]
331
+ res = mannwhitneyu(x, y, axis=-1, method="asymptotic")
332
+ U_expected = [10, 9, 8.5, 8, 7.5, 7, 6]
333
+ p_expected = [1, 0.9017048037317, 0.804080657472, 0.7086240584439,
334
+ 0.6197963884941, 0.5368784563079, 0.3912672792826]
335
+ assert_equal(res.statistic, U_expected)
336
+ assert_allclose(res.pvalue, p_expected)
337
+
338
+ # --- Test Exact Distribution of U ---
339
+
340
+ # These are tabulated values of the CDF of the exact distribution of
341
+ # the test statistic from pg 52 of reference [1] (Mann-Whitney Original)
342
+ pn3 = {1: [0.25, 0.5, 0.75], 2: [0.1, 0.2, 0.4, 0.6],
343
+ 3: [0.05, .1, 0.2, 0.35, 0.5, 0.65]}
344
+ pn4 = {1: [0.2, 0.4, 0.6], 2: [0.067, 0.133, 0.267, 0.4, 0.6],
345
+ 3: [0.028, 0.057, 0.114, 0.2, .314, 0.429, 0.571],
346
+ 4: [0.014, 0.029, 0.057, 0.1, 0.171, 0.243, 0.343, 0.443, 0.557]}
347
+ pm5 = {1: [0.167, 0.333, 0.5, 0.667],
348
+ 2: [0.047, 0.095, 0.19, 0.286, 0.429, 0.571],
349
+ 3: [0.018, 0.036, 0.071, 0.125, 0.196, 0.286, 0.393, 0.5, 0.607],
350
+ 4: [0.008, 0.016, 0.032, 0.056, 0.095, 0.143,
351
+ 0.206, 0.278, 0.365, 0.452, 0.548],
352
+ 5: [0.004, 0.008, 0.016, 0.028, 0.048, 0.075, 0.111,
353
+ 0.155, 0.21, 0.274, 0.345, .421, 0.5, 0.579]}
354
+ pm6 = {1: [0.143, 0.286, 0.428, 0.571],
355
+ 2: [0.036, 0.071, 0.143, 0.214, 0.321, 0.429, 0.571],
356
+ 3: [0.012, 0.024, 0.048, 0.083, 0.131,
357
+ 0.19, 0.274, 0.357, 0.452, 0.548],
358
+ 4: [0.005, 0.01, 0.019, 0.033, 0.057, 0.086, 0.129,
359
+ 0.176, 0.238, 0.305, 0.381, 0.457, 0.543], # the last element
360
+ # of the previous list, 0.543, has been modified from 0.545;
361
+ # I assume it was a typo
362
+ 5: [0.002, 0.004, 0.009, 0.015, 0.026, 0.041, 0.063, 0.089,
363
+ 0.123, 0.165, 0.214, 0.268, 0.331, 0.396, 0.465, 0.535],
364
+ 6: [0.001, 0.002, 0.004, 0.008, 0.013, 0.021, 0.032, 0.047,
365
+ 0.066, 0.09, 0.12, 0.155, 0.197, 0.242, 0.294, 0.350,
366
+ 0.409, 0.469, 0.531]}
367
+
368
+ def test_exact_distribution(self):
369
+ # I considered parametrize. I decided against it.
370
+ p_tables = {3: self.pn3, 4: self.pn4, 5: self.pm5, 6: self.pm6}
371
+ for n, table in p_tables.items():
372
+ for m, p in table.items():
373
+ # check p-value against table
374
+ u = np.arange(0, len(p))
375
+ _mwu_state.set_shapes(m, n)
376
+ assert_allclose(_mwu_state.cdf(k=u), p, atol=1e-3)
377
+
378
+ # check identity CDF + SF - PMF = 1
379
+ # ( In this implementation, SF(U) includes PMF(U) )
380
+ u2 = np.arange(0, m*n+1)
381
+ assert_allclose(_mwu_state.cdf(k=u2)
382
+ + _mwu_state.sf(k=u2)
383
+ - _mwu_state.pmf(k=u2), 1)
384
+
385
+ # check symmetry about mean of U, i.e. pmf(U) = pmf(m*n-U)
386
+ pmf = _mwu_state.pmf(k=u2)
387
+ assert_allclose(pmf, pmf[::-1])
388
+
389
+ # check symmetry w.r.t. interchange of m, n
390
+ _mwu_state.set_shapes(n, m)
391
+ pmf2 = _mwu_state.pmf(k=u2)
392
+ assert_allclose(pmf, pmf2)
393
+
394
+ def test_asymptotic_behavior(self):
395
+ np.random.seed(0)
396
+
397
+ # for small samples, the asymptotic test is not very accurate
398
+ x = np.random.rand(5)
399
+ y = np.random.rand(5)
400
+ res1 = mannwhitneyu(x, y, method="exact")
401
+ res2 = mannwhitneyu(x, y, method="asymptotic")
402
+ assert res1.statistic == res2.statistic
403
+ assert np.abs(res1.pvalue - res2.pvalue) > 1e-2
404
+
405
+ # for large samples, they agree reasonably well
406
+ x = np.random.rand(40)
407
+ y = np.random.rand(40)
408
+ res1 = mannwhitneyu(x, y, method="exact")
409
+ res2 = mannwhitneyu(x, y, method="asymptotic")
410
+ assert res1.statistic == res2.statistic
411
+ assert np.abs(res1.pvalue - res2.pvalue) < 1e-3
412
+
413
+ # --- Test Corner Cases ---
414
+
415
+ def test_exact_U_equals_mean(self):
416
+ # Test U == m*n/2 with exact method
417
+ # Without special treatment, two-sided p-value > 1 because both
418
+ # one-sided p-values are > 0.5
419
+ res_l = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="less",
420
+ method="exact")
421
+ res_g = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="greater",
422
+ method="exact")
423
+ assert_equal(res_l.pvalue, res_g.pvalue)
424
+ assert res_l.pvalue > 0.5
425
+
426
+ res = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="two-sided",
427
+ method="exact")
428
+ assert_equal(res, (3, 1))
429
+ # U == m*n/2 for asymptotic case tested in test_gh_2118
430
+ # The reason it's tricky for the asymptotic test has to do with
431
+ # continuity correction.
432
+
433
+ cases_scalar = [[{"alternative": 'two-sided', "method": "asymptotic"},
434
+ (0, 1)],
435
+ [{"alternative": 'less', "method": "asymptotic"},
436
+ (0, 0.5)],
437
+ [{"alternative": 'greater', "method": "asymptotic"},
438
+ (0, 0.977249868052)],
439
+ [{"alternative": 'two-sided', "method": "exact"}, (0, 1)],
440
+ [{"alternative": 'less', "method": "exact"}, (0, 0.5)],
441
+ [{"alternative": 'greater', "method": "exact"}, (0, 1)]]
442
+
443
+ @pytest.mark.parametrize(("kwds", "result"), cases_scalar)
444
+ def test_scalar_data(self, kwds, result):
445
+ # just making sure scalars work
446
+ assert_allclose(mannwhitneyu(1, 2, **kwds), result)
447
+
448
+ def test_equal_scalar_data(self):
449
+ # when two scalars are equal, there is an -0.5/0 in the asymptotic
450
+ # approximation. R gives pvalue=1.0 for alternatives 'less' and
451
+ # 'greater' but NA for 'two-sided'. I don't see why, so I don't
452
+ # see a need for a special case to match that behavior.
453
+ assert_equal(mannwhitneyu(1, 1, method="exact"), (0.5, 1))
454
+ assert_equal(mannwhitneyu(1, 1, method="asymptotic"), (0.5, 1))
455
+
456
+ # without continuity correction, this becomes 0/0, which really
457
+ # is undefined
458
+ assert_equal(mannwhitneyu(1, 1, method="asymptotic",
459
+ use_continuity=False), (0.5, np.nan))
460
+
461
+ # --- Test Enhancements / Bug Reports ---
462
+
463
+ @pytest.mark.parametrize("method", ["asymptotic", "exact"])
464
+ def test_gh_12837_11113(self, method):
465
+ # Test that behavior for broadcastable nd arrays is appropriate:
466
+ # output shape is correct and all values are equal to when the test
467
+ # is performed on one pair of samples at a time.
468
+ # Tests that gh-12837 and gh-11113 (requests for n-d input)
469
+ # are resolved
470
+ np.random.seed(0)
471
+
472
+ # arrays are broadcastable except for axis = -3
473
+ axis = -3
474
+ m, n = 7, 10 # sample sizes
475
+ x = np.random.rand(m, 3, 8)
476
+ y = np.random.rand(6, n, 1, 8) + 0.1
477
+ res = mannwhitneyu(x, y, method=method, axis=axis)
478
+
479
+ shape = (6, 3, 8) # appropriate shape of outputs, given inputs
480
+ assert res.pvalue.shape == shape
481
+ assert res.statistic.shape == shape
482
+
483
+ # move axis of test to end for simplicity
484
+ x, y = np.moveaxis(x, axis, -1), np.moveaxis(y, axis, -1)
485
+
486
+ x = x[None, ...] # give x a zeroth dimension
487
+ assert x.ndim == y.ndim
488
+
489
+ x = np.broadcast_to(x, shape + (m,))
490
+ y = np.broadcast_to(y, shape + (n,))
491
+ assert x.shape[:-1] == shape
492
+ assert y.shape[:-1] == shape
493
+
494
+ # loop over pairs of samples
495
+ statistics = np.zeros(shape)
496
+ pvalues = np.zeros(shape)
497
+ for indices in product(*[range(i) for i in shape]):
498
+ xi = x[indices]
499
+ yi = y[indices]
500
+ temp = mannwhitneyu(xi, yi, method=method)
501
+ statistics[indices] = temp.statistic
502
+ pvalues[indices] = temp.pvalue
503
+
504
+ np.testing.assert_equal(res.pvalue, pvalues)
505
+ np.testing.assert_equal(res.statistic, statistics)
506
+
507
+ def test_gh_11355(self):
508
+ # Test for correct behavior with NaN/Inf in input
509
+ x = [1, 2, 3, 4]
510
+ y = [3, 6, 7, 8, 9, 3, 2, 1, 4, 4, 5]
511
+ res1 = mannwhitneyu(x, y)
512
+
513
+ # Inf is not a problem. This is a rank test, and it's the largest value
514
+ y[4] = np.inf
515
+ res2 = mannwhitneyu(x, y)
516
+
517
+ assert_equal(res1.statistic, res2.statistic)
518
+ assert_equal(res1.pvalue, res2.pvalue)
519
+
520
+ # NaNs should propagate by default.
521
+ y[4] = np.nan
522
+ res3 = mannwhitneyu(x, y)
523
+ assert_equal(res3.statistic, np.nan)
524
+ assert_equal(res3.pvalue, np.nan)
525
+
526
+ cases_11355 = [([1, 2, 3, 4],
527
+ [3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
528
+ 10, 0.1297704873477),
529
+ ([1, 2, 3, 4],
530
+ [3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
531
+ 8.5, 0.08735617507695),
532
+ ([1, 2, np.inf, 4],
533
+ [3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
534
+ 17.5, 0.5988856695752),
535
+ ([1, 2, np.inf, 4],
536
+ [3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
537
+ 16, 0.4687165824462),
538
+ ([1, np.inf, np.inf, 4],
539
+ [3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
540
+ 24.5, 0.7912517950119)]
541
+
542
+ @pytest.mark.parametrize(("x", "y", "statistic", "pvalue"), cases_11355)
543
+ def test_gh_11355b(self, x, y, statistic, pvalue):
544
+ # Test for correct behavior with NaN/Inf in input
545
+ res = mannwhitneyu(x, y, method='asymptotic')
546
+ assert_allclose(res.statistic, statistic, atol=1e-12)
547
+ assert_allclose(res.pvalue, pvalue, atol=1e-12)
548
+
549
+ cases_9184 = [[True, "less", "asymptotic", 0.900775348204],
550
+ [True, "greater", "asymptotic", 0.1223118025635],
551
+ [True, "two-sided", "asymptotic", 0.244623605127],
552
+ [False, "less", "asymptotic", 0.8896643190401],
553
+ [False, "greater", "asymptotic", 0.1103356809599],
554
+ [False, "two-sided", "asymptotic", 0.2206713619198],
555
+ [True, "less", "exact", 0.8967698967699],
556
+ [True, "greater", "exact", 0.1272061272061],
557
+ [True, "two-sided", "exact", 0.2544122544123]]
558
+
559
+ @pytest.mark.parametrize(("use_continuity", "alternative",
560
+ "method", "pvalue_exp"), cases_9184)
561
+ def test_gh_9184(self, use_continuity, alternative, method, pvalue_exp):
562
+ # gh-9184 might be considered a doc-only bug. Please see the
563
+ # documentation to confirm that mannwhitneyu correctly notes
564
+ # that the output statistic is that of the first sample (x). In any
565
+ # case, check the case provided there against output from R.
566
+ # R code:
567
+ # options(digits=16)
568
+ # x <- c(0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
569
+ # y <- c(1.15, 0.88, 0.90, 0.74, 1.21)
570
+ # wilcox.test(x, y, alternative = "less", exact = FALSE)
571
+ # wilcox.test(x, y, alternative = "greater", exact = FALSE)
572
+ # wilcox.test(x, y, alternative = "two.sided", exact = FALSE)
573
+ # wilcox.test(x, y, alternative = "less", exact = FALSE,
574
+ # correct=FALSE)
575
+ # wilcox.test(x, y, alternative = "greater", exact = FALSE,
576
+ # correct=FALSE)
577
+ # wilcox.test(x, y, alternative = "two.sided", exact = FALSE,
578
+ # correct=FALSE)
579
+ # wilcox.test(x, y, alternative = "less", exact = TRUE)
580
+ # wilcox.test(x, y, alternative = "greater", exact = TRUE)
581
+ # wilcox.test(x, y, alternative = "two.sided", exact = TRUE)
582
+ statistic_exp = 35
583
+ x = (0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
584
+ y = (1.15, 0.88, 0.90, 0.74, 1.21)
585
+ res = mannwhitneyu(x, y, use_continuity=use_continuity,
586
+ alternative=alternative, method=method)
587
+ assert_equal(res.statistic, statistic_exp)
588
+ assert_allclose(res.pvalue, pvalue_exp)
589
+
590
+ def test_gh_4067(self):
591
+ # Test for correct behavior with all NaN input - default is propagate
592
+ a = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
593
+ b = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
594
+ res = mannwhitneyu(a, b)
595
+ assert_equal(res.statistic, np.nan)
596
+ assert_equal(res.pvalue, np.nan)
597
+
598
+ # All cases checked against R wilcox.test, e.g.
599
+ # options(digits=16)
600
+ # x = c(1, 2, 3)
601
+ # y = c(1.5, 2.5)
602
+ # wilcox.test(x, y, exact=FALSE, alternative='less')
603
+
604
+ cases_2118 = [[[1, 2, 3], [1.5, 2.5], "greater", (3, 0.6135850036578)],
605
+ [[1, 2, 3], [1.5, 2.5], "less", (3, 0.6135850036578)],
606
+ [[1, 2, 3], [1.5, 2.5], "two-sided", (3, 1.0)],
607
+ [[1, 2, 3], [2], "greater", (1.5, 0.681324055883)],
608
+ [[1, 2, 3], [2], "less", (1.5, 0.681324055883)],
609
+ [[1, 2, 3], [2], "two-sided", (1.5, 1)],
610
+ [[1, 2], [1, 2], "greater", (2, 0.667497228949)],
611
+ [[1, 2], [1, 2], "less", (2, 0.667497228949)],
612
+ [[1, 2], [1, 2], "two-sided", (2, 1)]]
613
+
614
+ @pytest.mark.parametrize(["x", "y", "alternative", "expected"], cases_2118)
615
+ def test_gh_2118(self, x, y, alternative, expected):
616
+ # test cases in which U == m*n/2 when method is asymptotic
617
+ # applying continuity correction could result in p-value > 1
618
+ res = mannwhitneyu(x, y, use_continuity=True, alternative=alternative,
619
+ method="asymptotic")
620
+ assert_allclose(res, expected, rtol=1e-12)
621
+
622
+ def test_gh19692_smaller_table(self):
623
+ # In gh-19692, we noted that the shape of the cache used in calculating
624
+ # p-values was dependent on the order of the inputs because the sample
625
+ # sizes n1 and n2 changed. This was indicative of unnecessary cache
626
+ # growth and redundant calculation. Check that this is resolved.
627
+ rng = np.random.default_rng(7600451795963068007)
628
+ m, n = 5, 11
629
+ x = rng.random(size=m)
630
+ y = rng.random(size=n)
631
+ _mwu_state.reset() # reset cache
632
+ res = stats.mannwhitneyu(x, y, method='exact')
633
+ shape = _mwu_state.configurations.shape
634
+ assert shape[-1] == min(res.statistic, m*n - res.statistic) + 1
635
+ stats.mannwhitneyu(y, x, method='exact')
636
+ assert shape == _mwu_state.configurations.shape # same when sizes are reversed
637
+
638
+ # Also, we weren't exploiting the symmmetry of the null distribution
639
+ # to its full potential. Ensure that the null distribution is not
640
+ # evaluated explicitly for `k > m*n/2`.
641
+ _mwu_state.reset() # reset cache
642
+ stats.mannwhitneyu(x, 0*y, method='exact', alternative='greater')
643
+ shape = _mwu_state.configurations.shape
644
+ assert shape[-1] == 1 # k is smallest possible
645
+ stats.mannwhitneyu(0*x, y, method='exact', alternative='greater')
646
+ assert shape == _mwu_state.configurations.shape
647
+
648
+ @pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
649
+ def test_permutation_method(self, alternative):
650
+ rng = np.random.default_rng(7600451795963068007)
651
+ x = rng.random(size=(2, 5))
652
+ y = rng.random(size=(2, 6))
653
+ res = stats.mannwhitneyu(x, y, method=stats.PermutationMethod(),
654
+ alternative=alternative, axis=1)
655
+ res2 = stats.mannwhitneyu(x, y, method='exact',
656
+ alternative=alternative, axis=1)
657
+ assert_allclose(res.statistic, res2.statistic, rtol=1e-15)
658
+ assert_allclose(res.pvalue, res2.pvalue, rtol=1e-15)
659
+
660
+
661
+ class TestSomersD(_TestPythranFunc):
662
+ def setup_method(self):
663
+ self.dtypes = self.ALL_INTEGER + self.ALL_FLOAT
664
+ self.arguments = {0: (np.arange(10),
665
+ self.ALL_INTEGER + self.ALL_FLOAT),
666
+ 1: (np.arange(10),
667
+ self.ALL_INTEGER + self.ALL_FLOAT)}
668
+ input_array = [self.arguments[idx][0] for idx in self.arguments]
669
+ # In this case, self.partialfunc can simply be stats.somersd,
670
+ # since `alternative` is an optional argument. If it is required,
671
+ # we can use functools.partial to freeze the value, because
672
+ # we only mainly test various array inputs, not str, etc.
673
+ self.partialfunc = functools.partial(stats.somersd,
674
+ alternative='two-sided')
675
+ self.expected = self.partialfunc(*input_array)
676
+
677
+ def pythranfunc(self, *args):
678
+ res = self.partialfunc(*args)
679
+ assert_allclose(res.statistic, self.expected.statistic, atol=1e-15)
680
+ assert_allclose(res.pvalue, self.expected.pvalue, atol=1e-15)
681
+
682
+ def test_pythranfunc_keywords(self):
683
+ # Not specifying the optional keyword args
684
+ table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
685
+ res1 = stats.somersd(table)
686
+ # Specifying the optional keyword args with default value
687
+ optional_args = self.get_optional_args(stats.somersd)
688
+ res2 = stats.somersd(table, **optional_args)
689
+ # Check if the results are the same in two cases
690
+ assert_allclose(res1.statistic, res2.statistic, atol=1e-15)
691
+ assert_allclose(res1.pvalue, res2.pvalue, atol=1e-15)
692
+
693
+ def test_like_kendalltau(self):
694
+ # All tests correspond with one in test_stats.py `test_kendalltau`
695
+
696
+ # case without ties, con-dis equal zero
697
+ x = [5, 2, 1, 3, 6, 4, 7, 8]
698
+ y = [5, 2, 6, 3, 1, 8, 7, 4]
699
+ # Cross-check with result from SAS FREQ:
700
+ expected = (0.000000000000000, 1.000000000000000)
701
+ res = stats.somersd(x, y)
702
+ assert_allclose(res.statistic, expected[0], atol=1e-15)
703
+ assert_allclose(res.pvalue, expected[1], atol=1e-15)
704
+
705
+ # case without ties, con-dis equal zero
706
+ x = [0, 5, 2, 1, 3, 6, 4, 7, 8]
707
+ y = [5, 2, 0, 6, 3, 1, 8, 7, 4]
708
+ # Cross-check with result from SAS FREQ:
709
+ expected = (0.000000000000000, 1.000000000000000)
710
+ res = stats.somersd(x, y)
711
+ assert_allclose(res.statistic, expected[0], atol=1e-15)
712
+ assert_allclose(res.pvalue, expected[1], atol=1e-15)
713
+
714
+ # case without ties, con-dis close to zero
715
+ x = [5, 2, 1, 3, 6, 4, 7]
716
+ y = [5, 2, 6, 3, 1, 7, 4]
717
+ # Cross-check with result from SAS FREQ:
718
+ expected = (-0.142857142857140, 0.630326953157670)
719
+ res = stats.somersd(x, y)
720
+ assert_allclose(res.statistic, expected[0], atol=1e-15)
721
+ assert_allclose(res.pvalue, expected[1], atol=1e-15)
722
+
723
+ # simple case without ties
724
+ x = np.arange(10)
725
+ y = np.arange(10)
726
+ # Cross-check with result from SAS FREQ:
727
+ # SAS p value is not provided.
728
+ expected = (1.000000000000000, 0)
729
+ res = stats.somersd(x, y)
730
+ assert_allclose(res.statistic, expected[0], atol=1e-15)
731
+ assert_allclose(res.pvalue, expected[1], atol=1e-15)
732
+
733
+ # swap a couple values and a couple more
734
+ x = np.arange(10)
735
+ y = np.array([0, 2, 1, 3, 4, 6, 5, 7, 8, 9])
736
+ # Cross-check with result from SAS FREQ:
737
+ expected = (0.911111111111110, 0.000000000000000)
738
+ res = stats.somersd(x, y)
739
+ assert_allclose(res.statistic, expected[0], atol=1e-15)
740
+ assert_allclose(res.pvalue, expected[1], atol=1e-15)
741
+
742
+ # same in opposite direction
743
+ x = np.arange(10)
744
+ y = np.arange(10)[::-1]
745
+ # Cross-check with result from SAS FREQ:
746
+ # SAS p value is not provided.
747
+ expected = (-1.000000000000000, 0)
748
+ res = stats.somersd(x, y)
749
+ assert_allclose(res.statistic, expected[0], atol=1e-15)
750
+ assert_allclose(res.pvalue, expected[1], atol=1e-15)
751
+
752
+ # swap a couple values and a couple more
753
+ x = np.arange(10)
754
+ y = np.array([9, 7, 8, 6, 5, 3, 4, 2, 1, 0])
755
+ # Cross-check with result from SAS FREQ:
756
+ expected = (-0.9111111111111111, 0.000000000000000)
757
+ res = stats.somersd(x, y)
758
+ assert_allclose(res.statistic, expected[0], atol=1e-15)
759
+ assert_allclose(res.pvalue, expected[1], atol=1e-15)
760
+
761
+ # with some ties
762
+ x1 = [12, 2, 1, 12, 2]
763
+ x2 = [1, 4, 7, 1, 0]
764
+ # Cross-check with result from SAS FREQ:
765
+ expected = (-0.500000000000000, 0.304901788178780)
766
+ res = stats.somersd(x1, x2)
767
+ assert_allclose(res.statistic, expected[0], atol=1e-15)
768
+ assert_allclose(res.pvalue, expected[1], atol=1e-15)
769
+
770
+ # with only ties in one or both inputs
771
+ # SAS will not produce an output for these:
772
+ # NOTE: No statistics are computed for x * y because x has fewer
773
+ # than 2 nonmissing levels.
774
+ # WARNING: No OUTPUT data set is produced for this table because a
775
+ # row or column variable has fewer than 2 nonmissing levels and no
776
+ # statistics are computed.
777
+
778
+ res = stats.somersd([2, 2, 2], [2, 2, 2])
779
+ assert_allclose(res.statistic, np.nan)
780
+ assert_allclose(res.pvalue, np.nan)
781
+
782
+ res = stats.somersd([2, 0, 2], [2, 2, 2])
783
+ assert_allclose(res.statistic, np.nan)
784
+ assert_allclose(res.pvalue, np.nan)
785
+
786
+ res = stats.somersd([2, 2, 2], [2, 0, 2])
787
+ assert_allclose(res.statistic, np.nan)
788
+ assert_allclose(res.pvalue, np.nan)
789
+
790
+ res = stats.somersd([0], [0])
791
+ assert_allclose(res.statistic, np.nan)
792
+ assert_allclose(res.pvalue, np.nan)
793
+
794
+ # empty arrays provided as input
795
+ res = stats.somersd([], [])
796
+ assert_allclose(res.statistic, np.nan)
797
+ assert_allclose(res.pvalue, np.nan)
798
+
799
+ # test unequal length inputs
800
+ x = np.arange(10.)
801
+ y = np.arange(20.)
802
+ assert_raises(ValueError, stats.somersd, x, y)
803
+
804
+ def test_asymmetry(self):
805
+ # test that somersd is asymmetric w.r.t. input order and that
806
+ # convention is as described: first input is row variable & independent
807
+ # data is from Wikipedia:
808
+ # https://en.wikipedia.org/wiki/Somers%27_D
809
+ # but currently that example contradicts itself - it says X is
810
+ # independent yet take D_XY
811
+
812
+ x = [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 1, 2,
813
+ 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
814
+ y = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
815
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
816
+ # Cross-check with result from SAS FREQ:
817
+ d_cr = 0.272727272727270
818
+ d_rc = 0.342857142857140
819
+ p = 0.092891940883700 # same p-value for either direction
820
+ res = stats.somersd(x, y)
821
+ assert_allclose(res.statistic, d_cr, atol=1e-15)
822
+ assert_allclose(res.pvalue, p, atol=1e-4)
823
+ assert_equal(res.table.shape, (3, 2))
824
+ res = stats.somersd(y, x)
825
+ assert_allclose(res.statistic, d_rc, atol=1e-15)
826
+ assert_allclose(res.pvalue, p, atol=1e-15)
827
+ assert_equal(res.table.shape, (2, 3))
828
+
829
+ def test_somers_original(self):
830
+ # test against Somers' original paper [1]
831
+
832
+ # Table 5A
833
+ # Somers' convention was column IV
834
+ table = np.array([[8, 2], [6, 5], [3, 4], [1, 3], [2, 3]])
835
+ # Our convention (and that of SAS FREQ) is row IV
836
+ table = table.T
837
+ dyx = 129/340
838
+ assert_allclose(stats.somersd(table).statistic, dyx)
839
+
840
+ # table 7A - d_yx = 1
841
+ table = np.array([[25, 0], [85, 0], [0, 30]])
842
+ dxy, dyx = 3300/5425, 3300/3300
843
+ assert_allclose(stats.somersd(table).statistic, dxy)
844
+ assert_allclose(stats.somersd(table.T).statistic, dyx)
845
+
846
+ # table 7B - d_yx < 0
847
+ table = np.array([[25, 0], [0, 30], [85, 0]])
848
+ dyx = -1800/3300
849
+ assert_allclose(stats.somersd(table.T).statistic, dyx)
850
+
851
+ def test_contingency_table_with_zero_rows_cols(self):
852
+ # test that zero rows/cols in contingency table don't affect result
853
+
854
+ N = 100
855
+ shape = 4, 6
856
+ size = np.prod(shape)
857
+
858
+ np.random.seed(0)
859
+ s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape)
860
+ res = stats.somersd(s)
861
+
862
+ s2 = np.insert(s, 2, np.zeros(shape[1]), axis=0)
863
+ res2 = stats.somersd(s2)
864
+
865
+ s3 = np.insert(s, 2, np.zeros(shape[0]), axis=1)
866
+ res3 = stats.somersd(s3)
867
+
868
+ s4 = np.insert(s2, 2, np.zeros(shape[0]+1), axis=1)
869
+ res4 = stats.somersd(s4)
870
+
871
+ # Cross-check with result from SAS FREQ:
872
+ assert_allclose(res.statistic, -0.116981132075470, atol=1e-15)
873
+ assert_allclose(res.statistic, res2.statistic)
874
+ assert_allclose(res.statistic, res3.statistic)
875
+ assert_allclose(res.statistic, res4.statistic)
876
+
877
+ assert_allclose(res.pvalue, 0.156376448188150, atol=1e-15)
878
+ assert_allclose(res.pvalue, res2.pvalue)
879
+ assert_allclose(res.pvalue, res3.pvalue)
880
+ assert_allclose(res.pvalue, res4.pvalue)
881
+
882
+ def test_invalid_contingency_tables(self):
883
+ N = 100
884
+ shape = 4, 6
885
+ size = np.prod(shape)
886
+
887
+ np.random.seed(0)
888
+ # start with a valid contingency table
889
+ s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape)
890
+
891
+ s5 = s - 2
892
+ message = "All elements of the contingency table must be non-negative"
893
+ with assert_raises(ValueError, match=message):
894
+ stats.somersd(s5)
895
+
896
+ s6 = s + 0.01
897
+ message = "All elements of the contingency table must be integer"
898
+ with assert_raises(ValueError, match=message):
899
+ stats.somersd(s6)
900
+
901
+ message = ("At least two elements of the contingency "
902
+ "table must be nonzero.")
903
+ with assert_raises(ValueError, match=message):
904
+ stats.somersd([[]])
905
+
906
+ with assert_raises(ValueError, match=message):
907
+ stats.somersd([[1]])
908
+
909
+ s7 = np.zeros((3, 3))
910
+ with assert_raises(ValueError, match=message):
911
+ stats.somersd(s7)
912
+
913
+ s7[0, 1] = 1
914
+ with assert_raises(ValueError, match=message):
915
+ stats.somersd(s7)
916
+
917
+ def test_only_ranks_matter(self):
918
+ # only ranks of input data should matter
919
+ x = [1, 2, 3]
920
+ x2 = [-1, 2.1, np.inf]
921
+ y = [3, 2, 1]
922
+ y2 = [0, -0.5, -np.inf]
923
+ res = stats.somersd(x, y)
924
+ res2 = stats.somersd(x2, y2)
925
+ assert_equal(res.statistic, res2.statistic)
926
+ assert_equal(res.pvalue, res2.pvalue)
927
+
928
+ def test_contingency_table_return(self):
929
+ # check that contingency table is returned
930
+ x = np.arange(10)
931
+ y = np.arange(10)
932
+ res = stats.somersd(x, y)
933
+ assert_equal(res.table, np.eye(10))
934
+
935
+ def test_somersd_alternative(self):
936
+ # Test alternative parameter, asymptotic method (due to tie)
937
+
938
+ # Based on scipy.stats.test_stats.TestCorrSpearman2::test_alternative
939
+ x1 = [1, 2, 3, 4, 5]
940
+ x2 = [5, 6, 7, 8, 7]
941
+
942
+ # strong positive correlation
943
+ expected = stats.somersd(x1, x2, alternative="two-sided")
944
+ assert expected.statistic > 0
945
+
946
+ # rank correlation > 0 -> large "less" p-value
947
+ res = stats.somersd(x1, x2, alternative="less")
948
+ assert_equal(res.statistic, expected.statistic)
949
+ assert_allclose(res.pvalue, 1 - (expected.pvalue / 2))
950
+
951
+ # rank correlation > 0 -> small "greater" p-value
952
+ res = stats.somersd(x1, x2, alternative="greater")
953
+ assert_equal(res.statistic, expected.statistic)
954
+ assert_allclose(res.pvalue, expected.pvalue / 2)
955
+
956
+ # reverse the direction of rank correlation
957
+ x2.reverse()
958
+
959
+ # strong negative correlation
960
+ expected = stats.somersd(x1, x2, alternative="two-sided")
961
+ assert expected.statistic < 0
962
+
963
+ # rank correlation < 0 -> large "greater" p-value
964
+ res = stats.somersd(x1, x2, alternative="greater")
965
+ assert_equal(res.statistic, expected.statistic)
966
+ assert_allclose(res.pvalue, 1 - (expected.pvalue / 2))
967
+
968
+ # rank correlation < 0 -> small "less" p-value
969
+ res = stats.somersd(x1, x2, alternative="less")
970
+ assert_equal(res.statistic, expected.statistic)
971
+ assert_allclose(res.pvalue, expected.pvalue / 2)
972
+
973
+ with pytest.raises(ValueError, match="`alternative` must be..."):
974
+ stats.somersd(x1, x2, alternative="ekki-ekki")
975
+
976
+ @pytest.mark.parametrize("positive_correlation", (False, True))
977
+ def test_somersd_perfect_correlation(self, positive_correlation):
978
+ # Before the addition of `alternative`, perfect correlation was
979
+ # treated as a special case. Now it is treated like any other case, but
980
+ # make sure there are no divide by zero warnings or associated errors
981
+
982
+ x1 = np.arange(10)
983
+ x2 = x1 if positive_correlation else np.flip(x1)
984
+ expected_statistic = 1 if positive_correlation else -1
985
+
986
+ # perfect correlation -> small "two-sided" p-value (0)
987
+ res = stats.somersd(x1, x2, alternative="two-sided")
988
+ assert res.statistic == expected_statistic
989
+ assert res.pvalue == 0
990
+
991
+ # rank correlation > 0 -> large "less" p-value (1)
992
+ res = stats.somersd(x1, x2, alternative="less")
993
+ assert res.statistic == expected_statistic
994
+ assert res.pvalue == (1 if positive_correlation else 0)
995
+
996
+ # rank correlation > 0 -> small "greater" p-value (0)
997
+ res = stats.somersd(x1, x2, alternative="greater")
998
+ assert res.statistic == expected_statistic
999
+ assert res.pvalue == (0 if positive_correlation else 1)
1000
+
1001
+ def test_somersd_large_inputs_gh18132(self):
1002
+ # Test that large inputs where potential overflows could occur give
1003
+ # the expected output. This is tested in the case of binary inputs.
1004
+ # See gh-18126.
1005
+
1006
+ # generate lists of random classes 1-2 (binary)
1007
+ classes = [1, 2]
1008
+ n_samples = 10 ** 6
1009
+ random.seed(6272161)
1010
+ x = random.choices(classes, k=n_samples)
1011
+ y = random.choices(classes, k=n_samples)
1012
+
1013
+ # get value to compare with: sklearn output
1014
+ # from sklearn import metrics
1015
+ # val_auc_sklearn = metrics.roc_auc_score(x, y)
1016
+ # # convert to the Gini coefficient (Gini = (AUC*2)-1)
1017
+ # val_sklearn = 2 * val_auc_sklearn - 1
1018
+ val_sklearn = -0.001528138777036947
1019
+
1020
+ # calculate the Somers' D statistic, which should be equal to the
1021
+ # result of val_sklearn until approximately machine precision
1022
+ val_scipy = stats.somersd(x, y).statistic
1023
+ assert_allclose(val_sklearn, val_scipy, atol=1e-15)
1024
+
1025
+
1026
+ class TestBarnardExact:
1027
+ """Some tests to show that barnard_exact() works correctly."""
1028
+
1029
+ @pytest.mark.parametrize(
1030
+ "input_sample,expected",
1031
+ [
1032
+ ([[43, 40], [10, 39]], (3.555406779643, 0.000362832367)),
1033
+ ([[100, 2], [1000, 5]], (-1.776382925679, 0.135126970878)),
1034
+ ([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
1035
+ ([[5, 1], [10, 10]], (1.449486150679, 0.156277546306)),
1036
+ ([[5, 15], [20, 20]], (-1.851640199545, 0.066363501421)),
1037
+ ([[5, 16], [20, 25]], (-1.609639949352, 0.116984852192)),
1038
+ ([[10, 5], [10, 1]], (-1.449486150679, 0.177536588915)),
1039
+ ([[5, 0], [1, 4]], (2.581988897472, 0.013671875000)),
1040
+ ([[0, 1], [3, 2]], (-1.095445115010, 0.509667991877)),
1041
+ ([[0, 2], [6, 4]], (-1.549193338483, 0.197019618792)),
1042
+ ([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
1043
+ ],
1044
+ )
1045
+ def test_precise(self, input_sample, expected):
1046
+ """The expected values have been generated by R, using a resolution
1047
+ for the nuisance parameter of 1e-6 :
1048
+ ```R
1049
+ library(Barnard)
1050
+ options(digits=10)
1051
+ barnard.test(43, 40, 10, 39, dp=1e-6, pooled=TRUE)
1052
+ ```
1053
+ """
1054
+ res = barnard_exact(input_sample)
1055
+ statistic, pvalue = res.statistic, res.pvalue
1056
+ assert_allclose([statistic, pvalue], expected)
1057
+
1058
+ @pytest.mark.parametrize(
1059
+ "input_sample,expected",
1060
+ [
1061
+ ([[43, 40], [10, 39]], (3.920362887717, 0.000289470662)),
1062
+ ([[100, 2], [1000, 5]], (-1.139432816087, 0.950272080594)),
1063
+ ([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
1064
+ ([[5, 1], [10, 10]], (1.622375939458, 0.150599922226)),
1065
+ ([[5, 15], [20, 20]], (-1.974771239528, 0.063038448651)),
1066
+ ([[5, 16], [20, 25]], (-1.722122973346, 0.133329494287)),
1067
+ ([[10, 5], [10, 1]], (-1.765469659009, 0.250566655215)),
1068
+ ([[5, 0], [1, 4]], (5.477225575052, 0.007812500000)),
1069
+ ([[0, 1], [3, 2]], (-1.224744871392, 0.509667991877)),
1070
+ ([[0, 2], [6, 4]], (-1.732050807569, 0.197019618792)),
1071
+ ([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
1072
+ ],
1073
+ )
1074
+ def test_pooled_param(self, input_sample, expected):
1075
+ """The expected values have been generated by R, using a resolution
1076
+ for the nuisance parameter of 1e-6 :
1077
+ ```R
1078
+ library(Barnard)
1079
+ options(digits=10)
1080
+ barnard.test(43, 40, 10, 39, dp=1e-6, pooled=FALSE)
1081
+ ```
1082
+ """
1083
+ res = barnard_exact(input_sample, pooled=False)
1084
+ statistic, pvalue = res.statistic, res.pvalue
1085
+ assert_allclose([statistic, pvalue], expected)
1086
+
1087
+ def test_raises(self):
1088
+ # test we raise an error for wrong input number of nuisances.
1089
+ error_msg = (
1090
+ "Number of points `n` must be strictly positive, found 0"
1091
+ )
1092
+ with assert_raises(ValueError, match=error_msg):
1093
+ barnard_exact([[1, 2], [3, 4]], n=0)
1094
+
1095
+ # test we raise an error for wrong shape of input.
1096
+ error_msg = "The input `table` must be of shape \\(2, 2\\)."
1097
+ with assert_raises(ValueError, match=error_msg):
1098
+ barnard_exact(np.arange(6).reshape(2, 3))
1099
+
1100
+ # Test all values must be positives
1101
+ error_msg = "All values in `table` must be nonnegative."
1102
+ with assert_raises(ValueError, match=error_msg):
1103
+ barnard_exact([[-1, 2], [3, 4]])
1104
+
1105
+ # Test value error on wrong alternative param
1106
+ error_msg = (
1107
+ "`alternative` should be one of {'two-sided', 'less', 'greater'},"
1108
+ " found .*"
1109
+ )
1110
+ with assert_raises(ValueError, match=error_msg):
1111
+ barnard_exact([[1, 2], [3, 4]], "not-correct")
1112
+
1113
+ @pytest.mark.parametrize(
1114
+ "input_sample,expected",
1115
+ [
1116
+ ([[0, 0], [4, 3]], (1.0, 0)),
1117
+ ],
1118
+ )
1119
+ def test_edge_cases(self, input_sample, expected):
1120
+ res = barnard_exact(input_sample)
1121
+ statistic, pvalue = res.statistic, res.pvalue
1122
+ assert_equal(pvalue, expected[0])
1123
+ assert_equal(statistic, expected[1])
1124
+
1125
+ @pytest.mark.parametrize(
1126
+ "input_sample,expected",
1127
+ [
1128
+ ([[0, 5], [0, 10]], (1.0, np.nan)),
1129
+ ([[5, 0], [10, 0]], (1.0, np.nan)),
1130
+ ],
1131
+ )
1132
+ def test_row_or_col_zero(self, input_sample, expected):
1133
+ res = barnard_exact(input_sample)
1134
+ statistic, pvalue = res.statistic, res.pvalue
1135
+ assert_equal(pvalue, expected[0])
1136
+ assert_equal(statistic, expected[1])
1137
+
1138
+ @pytest.mark.parametrize(
1139
+ "input_sample,expected",
1140
+ [
1141
+ ([[2, 7], [8, 2]], (-2.518474945157, 0.009886140845)),
1142
+ ([[7, 200], [300, 8]], (-21.320036698460, 0.0)),
1143
+ ([[21, 28], [1957, 6]], (-30.489638143953, 0.0)),
1144
+ ],
1145
+ )
1146
+ @pytest.mark.parametrize("alternative", ["greater", "less"])
1147
+ def test_less_greater(self, input_sample, expected, alternative):
1148
+ """
1149
+ "The expected values have been generated by R, using a resolution
1150
+ for the nuisance parameter of 1e-6 :
1151
+ ```R
1152
+ library(Barnard)
1153
+ options(digits=10)
1154
+ a = barnard.test(2, 7, 8, 2, dp=1e-6, pooled=TRUE)
1155
+ a$p.value[1]
1156
+ ```
1157
+ In this test, we are using the "one-sided" return value `a$p.value[1]`
1158
+ to test our pvalue.
1159
+ """
1160
+ expected_stat, less_pvalue_expect = expected
1161
+
1162
+ if alternative == "greater":
1163
+ input_sample = np.array(input_sample)[:, ::-1]
1164
+ expected_stat = -expected_stat
1165
+
1166
+ res = barnard_exact(input_sample, alternative=alternative)
1167
+ statistic, pvalue = res.statistic, res.pvalue
1168
+ assert_allclose(
1169
+ [statistic, pvalue], [expected_stat, less_pvalue_expect], atol=1e-7
1170
+ )
1171
+
1172
+
1173
+ class TestBoschlooExact:
1174
+ """Some tests to show that boschloo_exact() works correctly."""
1175
+
1176
+ ATOL = 1e-7
1177
+
1178
+ @pytest.mark.parametrize(
1179
+ "input_sample,expected",
1180
+ [
1181
+ ([[2, 7], [8, 2]], (0.01852173, 0.009886142)),
1182
+ ([[5, 1], [10, 10]], (0.9782609, 0.9450994)),
1183
+ ([[5, 16], [20, 25]], (0.08913823, 0.05827348)),
1184
+ ([[10, 5], [10, 1]], (0.1652174, 0.08565611)),
1185
+ ([[5, 0], [1, 4]], (1, 1)),
1186
+ ([[0, 1], [3, 2]], (0.5, 0.34375)),
1187
+ ([[2, 7], [8, 2]], (0.01852173, 0.009886142)),
1188
+ ([[7, 12], [8, 3]], (0.06406797, 0.03410916)),
1189
+ ([[10, 24], [25, 37]], (0.2009359, 0.1512882)),
1190
+ ],
1191
+ )
1192
+ def test_less(self, input_sample, expected):
1193
+ """The expected values have been generated by R, using a resolution
1194
+ for the nuisance parameter of 1e-8 :
1195
+ ```R
1196
+ library(Exact)
1197
+ options(digits=10)
1198
+ data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
1199
+ a = exact.test(data, method="Boschloo", alternative="less",
1200
+ tsmethod="central", np.interval=TRUE, beta=1e-8)
1201
+ ```
1202
+ """
1203
+ res = boschloo_exact(input_sample, alternative="less")
1204
+ statistic, pvalue = res.statistic, res.pvalue
1205
+ assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
1206
+
1207
+ @pytest.mark.parametrize(
1208
+ "input_sample,expected",
1209
+ [
1210
+ ([[43, 40], [10, 39]], (0.0002875544, 0.0001615562)),
1211
+ ([[2, 7], [8, 2]], (0.9990149, 0.9918327)),
1212
+ ([[5, 1], [10, 10]], (0.1652174, 0.09008534)),
1213
+ ([[5, 15], [20, 20]], (0.9849087, 0.9706997)),
1214
+ ([[5, 16], [20, 25]], (0.972349, 0.9524124)),
1215
+ ([[5, 0], [1, 4]], (0.02380952, 0.006865367)),
1216
+ ([[0, 1], [3, 2]], (1, 1)),
1217
+ ([[0, 2], [6, 4]], (1, 1)),
1218
+ ([[2, 7], [8, 2]], (0.9990149, 0.9918327)),
1219
+ ([[7, 12], [8, 3]], (0.9895302, 0.9771215)),
1220
+ ([[10, 24], [25, 37]], (0.9012936, 0.8633275)),
1221
+ ],
1222
+ )
1223
+ def test_greater(self, input_sample, expected):
1224
+ """The expected values have been generated by R, using a resolution
1225
+ for the nuisance parameter of 1e-8 :
1226
+ ```R
1227
+ library(Exact)
1228
+ options(digits=10)
1229
+ data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
1230
+ a = exact.test(data, method="Boschloo", alternative="greater",
1231
+ tsmethod="central", np.interval=TRUE, beta=1e-8)
1232
+ ```
1233
+ """
1234
+ res = boschloo_exact(input_sample, alternative="greater")
1235
+ statistic, pvalue = res.statistic, res.pvalue
1236
+ assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
1237
+
1238
+ @pytest.mark.parametrize(
1239
+ "input_sample,expected",
1240
+ [
1241
+ ([[43, 40], [10, 39]], (0.0002875544, 0.0003231115)),
1242
+ ([[2, 7], [8, 2]], (0.01852173, 0.01977228)),
1243
+ ([[5, 1], [10, 10]], (0.1652174, 0.1801707)),
1244
+ ([[5, 16], [20, 25]], (0.08913823, 0.116547)),
1245
+ ([[5, 0], [1, 4]], (0.02380952, 0.01373073)),
1246
+ ([[0, 1], [3, 2]], (0.5, 0.6875)),
1247
+ ([[2, 7], [8, 2]], (0.01852173, 0.01977228)),
1248
+ ([[7, 12], [8, 3]], (0.06406797, 0.06821831)),
1249
+ ],
1250
+ )
1251
+ def test_two_sided(self, input_sample, expected):
1252
+ """The expected values have been generated by R, using a resolution
1253
+ for the nuisance parameter of 1e-8 :
1254
+ ```R
1255
+ library(Exact)
1256
+ options(digits=10)
1257
+ data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
1258
+ a = exact.test(data, method="Boschloo", alternative="two.sided",
1259
+ tsmethod="central", np.interval=TRUE, beta=1e-8)
1260
+ ```
1261
+ """
1262
+ res = boschloo_exact(input_sample, alternative="two-sided", n=64)
1263
+ # Need n = 64 for python 32-bit
1264
+ statistic, pvalue = res.statistic, res.pvalue
1265
+ assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
1266
+
1267
+ def test_raises(self):
1268
+ # test we raise an error for wrong input number of nuisances.
1269
+ error_msg = (
1270
+ "Number of points `n` must be strictly positive, found 0"
1271
+ )
1272
+ with assert_raises(ValueError, match=error_msg):
1273
+ boschloo_exact([[1, 2], [3, 4]], n=0)
1274
+
1275
+ # test we raise an error for wrong shape of input.
1276
+ error_msg = "The input `table` must be of shape \\(2, 2\\)."
1277
+ with assert_raises(ValueError, match=error_msg):
1278
+ boschloo_exact(np.arange(6).reshape(2, 3))
1279
+
1280
+ # Test all values must be positives
1281
+ error_msg = "All values in `table` must be nonnegative."
1282
+ with assert_raises(ValueError, match=error_msg):
1283
+ boschloo_exact([[-1, 2], [3, 4]])
1284
+
1285
+ # Test value error on wrong alternative param
1286
+ error_msg = (
1287
+ r"`alternative` should be one of \('two-sided', 'less', "
1288
+ r"'greater'\), found .*"
1289
+ )
1290
+ with assert_raises(ValueError, match=error_msg):
1291
+ boschloo_exact([[1, 2], [3, 4]], "not-correct")
1292
+
1293
+ @pytest.mark.parametrize(
1294
+ "input_sample,expected",
1295
+ [
1296
+ ([[0, 5], [0, 10]], (np.nan, np.nan)),
1297
+ ([[5, 0], [10, 0]], (np.nan, np.nan)),
1298
+ ],
1299
+ )
1300
+ def test_row_or_col_zero(self, input_sample, expected):
1301
+ res = boschloo_exact(input_sample)
1302
+ statistic, pvalue = res.statistic, res.pvalue
1303
+ assert_equal(pvalue, expected[0])
1304
+ assert_equal(statistic, expected[1])
1305
+
1306
+ def test_two_sided_gt_1(self):
1307
+ # Check that returned p-value does not exceed 1 even when twice
1308
+ # the minimum of the one-sided p-values does. See gh-15345.
1309
+ tbl = [[1, 1], [13, 12]]
1310
+ pl = boschloo_exact(tbl, alternative='less').pvalue
1311
+ pg = boschloo_exact(tbl, alternative='greater').pvalue
1312
+ assert 2*min(pl, pg) > 1
1313
+ pt = boschloo_exact(tbl, alternative='two-sided').pvalue
1314
+ assert pt == 1.0
1315
+
1316
+ @pytest.mark.parametrize("alternative", ("less", "greater"))
1317
+ def test_against_fisher_exact(self, alternative):
1318
+ # Check that the statistic of `boschloo_exact` is the same as the
1319
+ # p-value of `fisher_exact` (for one-sided tests). See gh-15345.
1320
+ tbl = [[2, 7], [8, 2]]
1321
+ boschloo_stat = boschloo_exact(tbl, alternative=alternative).statistic
1322
+ fisher_p = stats.fisher_exact(tbl, alternative=alternative)[1]
1323
+ assert_allclose(boschloo_stat, fisher_p)
1324
+
1325
+
1326
+ class TestCvm_2samp:
1327
+ @pytest.mark.parametrize('args', [([], np.arange(5)),
1328
+ (np.arange(5), [1])])
1329
+ def test_too_small_input(self, args):
1330
+ with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
1331
+ res = cramervonmises_2samp(*args)
1332
+ assert_equal(res.statistic, np.nan)
1333
+ assert_equal(res.pvalue, np.nan)
1334
+
1335
+ def test_invalid_input(self):
1336
+ y = np.arange(5)
1337
+ msg = 'method must be either auto, exact or asymptotic'
1338
+ with pytest.raises(ValueError, match=msg):
1339
+ cramervonmises_2samp(y, y, 'xyz')
1340
+
1341
+ def test_list_input(self):
1342
+ x = [2, 3, 4, 7, 6]
1343
+ y = [0.2, 0.7, 12, 18]
1344
+ r1 = cramervonmises_2samp(x, y)
1345
+ r2 = cramervonmises_2samp(np.array(x), np.array(y))
1346
+ assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
1347
+
1348
+ def test_example_conover(self):
1349
+ # Example 2 in Section 6.2 of W.J. Conover: Practical Nonparametric
1350
+ # Statistics, 1971.
1351
+ x = [7.6, 8.4, 8.6, 8.7, 9.3, 9.9, 10.1, 10.6, 11.2]
1352
+ y = [5.2, 5.7, 5.9, 6.5, 6.8, 8.2, 9.1, 9.8, 10.8, 11.3, 11.5, 12.3,
1353
+ 12.5, 13.4, 14.6]
1354
+ r = cramervonmises_2samp(x, y)
1355
+ assert_allclose(r.statistic, 0.262, atol=1e-3)
1356
+ assert_allclose(r.pvalue, 0.18, atol=1e-2)
1357
+
1358
+ @pytest.mark.parametrize('statistic, m, n, pval',
1359
+ [(710, 5, 6, 48./462),
1360
+ (1897, 7, 7, 117./1716),
1361
+ (576, 4, 6, 2./210),
1362
+ (1764, 6, 7, 2./1716)])
1363
+ def test_exact_pvalue(self, statistic, m, n, pval):
1364
+ # the exact values are taken from Anderson: On the distribution of the
1365
+ # two-sample Cramer-von-Mises criterion, 1962.
1366
+ # The values are taken from Table 2, 3, 4 and 5
1367
+ assert_equal(_pval_cvm_2samp_exact(statistic, m, n), pval)
1368
+
1369
+ @pytest.mark.xslow
1370
+ def test_large_sample(self):
1371
+ # for large samples, the statistic U gets very large
1372
+ # do a sanity check that p-value is not 0, 1 or nan
1373
+ np.random.seed(4367)
1374
+ x = distributions.norm.rvs(size=1000000)
1375
+ y = distributions.norm.rvs(size=900000)
1376
+ r = cramervonmises_2samp(x, y)
1377
+ assert_(0 < r.pvalue < 1)
1378
+ r = cramervonmises_2samp(x, y+0.1)
1379
+ assert_(0 < r.pvalue < 1)
1380
+
1381
+ def test_exact_vs_asymptotic(self):
1382
+ np.random.seed(0)
1383
+ x = np.random.rand(7)
1384
+ y = np.random.rand(8)
1385
+ r1 = cramervonmises_2samp(x, y, method='exact')
1386
+ r2 = cramervonmises_2samp(x, y, method='asymptotic')
1387
+ assert_equal(r1.statistic, r2.statistic)
1388
+ assert_allclose(r1.pvalue, r2.pvalue, atol=1e-2)
1389
+
1390
+ def test_method_auto(self):
1391
+ x = np.arange(20)
1392
+ y = [0.5, 4.7, 13.1]
1393
+ r1 = cramervonmises_2samp(x, y, method='exact')
1394
+ r2 = cramervonmises_2samp(x, y, method='auto')
1395
+ assert_equal(r1.pvalue, r2.pvalue)
1396
+ # switch to asymptotic if one sample has more than 20 observations
1397
+ x = np.arange(21)
1398
+ r1 = cramervonmises_2samp(x, y, method='asymptotic')
1399
+ r2 = cramervonmises_2samp(x, y, method='auto')
1400
+ assert_equal(r1.pvalue, r2.pvalue)
1401
+
1402
+ def test_same_input(self):
1403
+ # make sure trivial edge case can be handled
1404
+ # note that _cdf_cvm_inf(0) = nan. implementation avoids nan by
1405
+ # returning pvalue=1 for very small values of the statistic
1406
+ x = np.arange(15)
1407
+ res = cramervonmises_2samp(x, x)
1408
+ assert_equal((res.statistic, res.pvalue), (0.0, 1.0))
1409
+ # check exact p-value
1410
+ res = cramervonmises_2samp(x[:4], x[:4])
1411
+ assert_equal((res.statistic, res.pvalue), (0.0, 1.0))
1412
+
1413
+
1414
+ class TestTukeyHSD:
1415
+
1416
+ data_same_size = ([24.5, 23.5, 26.4, 27.1, 29.9],
1417
+ [28.4, 34.2, 29.5, 32.2, 30.1],
1418
+ [26.1, 28.3, 24.3, 26.2, 27.8])
1419
+ data_diff_size = ([24.5, 23.5, 26.28, 26.4, 27.1, 29.9, 30.1, 30.1],
1420
+ [28.4, 34.2, 29.5, 32.2, 30.1],
1421
+ [26.1, 28.3, 24.3, 26.2, 27.8])
1422
+ extreme_size = ([24.5, 23.5, 26.4],
1423
+ [28.4, 34.2, 29.5, 32.2, 30.1, 28.4, 34.2, 29.5, 32.2,
1424
+ 30.1],
1425
+ [26.1, 28.3, 24.3, 26.2, 27.8])
1426
+
1427
+ sas_same_size = """
1428
+ Comparison LowerCL Difference UpperCL Significance
1429
+ 2 - 3 0.6908830568 4.34 7.989116943 1
1430
+ 2 - 1 0.9508830568 4.6 8.249116943 1
1431
+ 3 - 2 -7.989116943 -4.34 -0.6908830568 1
1432
+ 3 - 1 -3.389116943 0.26 3.909116943 0
1433
+ 1 - 2 -8.249116943 -4.6 -0.9508830568 1
1434
+ 1 - 3 -3.909116943 -0.26 3.389116943 0
1435
+ """
1436
+
1437
+ sas_diff_size = """
1438
+ Comparison LowerCL Difference UpperCL Significance
1439
+ 2 - 1 0.2679292645 3.645 7.022070736 1
1440
+ 2 - 3 0.5934764007 4.34 8.086523599 1
1441
+ 1 - 2 -7.022070736 -3.645 -0.2679292645 1
1442
+ 1 - 3 -2.682070736 0.695 4.072070736 0
1443
+ 3 - 2 -8.086523599 -4.34 -0.5934764007 1
1444
+ 3 - 1 -4.072070736 -0.695 2.682070736 0
1445
+ """
1446
+
1447
+ sas_extreme = """
1448
+ Comparison LowerCL Difference UpperCL Significance
1449
+ 2 - 3 1.561605075 4.34 7.118394925 1
1450
+ 2 - 1 2.740784879 6.08 9.419215121 1
1451
+ 3 - 2 -7.118394925 -4.34 -1.561605075 1
1452
+ 3 - 1 -1.964526566 1.74 5.444526566 0
1453
+ 1 - 2 -9.419215121 -6.08 -2.740784879 1
1454
+ 1 - 3 -5.444526566 -1.74 1.964526566 0
1455
+ """
1456
+
1457
+ @pytest.mark.parametrize("data,res_expect_str,atol",
1458
+ ((data_same_size, sas_same_size, 1e-4),
1459
+ (data_diff_size, sas_diff_size, 1e-4),
1460
+ (extreme_size, sas_extreme, 1e-10),
1461
+ ),
1462
+ ids=["equal size sample",
1463
+ "unequal sample size",
1464
+ "extreme sample size differences"])
1465
+ def test_compare_sas(self, data, res_expect_str, atol):
1466
+ '''
1467
+ SAS code used to generate results for each sample:
1468
+ DATA ACHE;
1469
+ INPUT BRAND RELIEF;
1470
+ CARDS;
1471
+ 1 24.5
1472
+ ...
1473
+ 3 27.8
1474
+ ;
1475
+ ods graphics on; ODS RTF;ODS LISTING CLOSE;
1476
+ PROC ANOVA DATA=ACHE;
1477
+ CLASS BRAND;
1478
+ MODEL RELIEF=BRAND;
1479
+ MEANS BRAND/TUKEY CLDIFF;
1480
+ TITLE 'COMPARE RELIEF ACROSS MEDICINES - ANOVA EXAMPLE';
1481
+ ods output CLDiffs =tc;
1482
+ proc print data=tc;
1483
+ format LowerCL 17.16 UpperCL 17.16 Difference 17.16;
1484
+ title "Output with many digits";
1485
+ RUN;
1486
+ QUIT;
1487
+ ODS RTF close;
1488
+ ODS LISTING;
1489
+ '''
1490
+ res_expect = np.asarray(res_expect_str.replace(" - ", " ").split()[5:],
1491
+ dtype=float).reshape((6, 6))
1492
+ res_tukey = stats.tukey_hsd(*data)
1493
+ conf = res_tukey.confidence_interval()
1494
+ # loop over the comparisons
1495
+ for i, j, l, s, h, sig in res_expect:
1496
+ i, j = int(i) - 1, int(j) - 1
1497
+ assert_allclose(conf.low[i, j], l, atol=atol)
1498
+ assert_allclose(res_tukey.statistic[i, j], s, atol=atol)
1499
+ assert_allclose(conf.high[i, j], h, atol=atol)
1500
+ assert_allclose((res_tukey.pvalue[i, j] <= .05), sig == 1)
1501
+
1502
+ matlab_sm_siz = """
1503
+ 1 2 -8.2491590248597 -4.6 -0.9508409751403 0.0144483269098
1504
+ 1 3 -3.9091590248597 -0.26 3.3891590248597 0.9803107240900
1505
+ 2 3 0.6908409751403 4.34 7.9891590248597 0.0203311368795
1506
+ """
1507
+
1508
+ matlab_diff_sz = """
1509
+ 1 2 -7.02207069748501 -3.645 -0.26792930251500 0.03371498443080
1510
+ 1 3 -2.68207069748500 0.695 4.07207069748500 0.85572267328807
1511
+ 2 3 0.59347644287720 4.34 8.08652355712281 0.02259047020620
1512
+ """
1513
+
1514
+ @pytest.mark.parametrize("data,res_expect_str,atol",
1515
+ ((data_same_size, matlab_sm_siz, 1e-12),
1516
+ (data_diff_size, matlab_diff_sz, 1e-7)),
1517
+ ids=["equal size sample",
1518
+ "unequal size sample"])
1519
+ def test_compare_matlab(self, data, res_expect_str, atol):
1520
+ """
1521
+ vals = [24.5, 23.5, 26.4, 27.1, 29.9, 28.4, 34.2, 29.5, 32.2, 30.1,
1522
+ 26.1, 28.3, 24.3, 26.2, 27.8]
1523
+ names = {'zero', 'zero', 'zero', 'zero', 'zero', 'one', 'one', 'one',
1524
+ 'one', 'one', 'two', 'two', 'two', 'two', 'two'}
1525
+ [p,t,stats] = anova1(vals,names,"off");
1526
+ [c,m,h,nms] = multcompare(stats, "CType","hsd");
1527
+ """
1528
+ res_expect = np.asarray(res_expect_str.split(),
1529
+ dtype=float).reshape((3, 6))
1530
+ res_tukey = stats.tukey_hsd(*data)
1531
+ conf = res_tukey.confidence_interval()
1532
+ # loop over the comparisons
1533
+ for i, j, l, s, h, p in res_expect:
1534
+ i, j = int(i) - 1, int(j) - 1
1535
+ assert_allclose(conf.low[i, j], l, atol=atol)
1536
+ assert_allclose(res_tukey.statistic[i, j], s, atol=atol)
1537
+ assert_allclose(conf.high[i, j], h, atol=atol)
1538
+ assert_allclose(res_tukey.pvalue[i, j], p, atol=atol)
1539
+
1540
+ def test_compare_r(self):
1541
+ """
1542
+ Testing against results and p-values from R:
1543
+ from: https://www.rdocumentation.org/packages/stats/versions/3.6.2/
1544
+ topics/TukeyHSD
1545
+ > require(graphics)
1546
+ > summary(fm1 <- aov(breaks ~ tension, data = warpbreaks))
1547
+ > TukeyHSD(fm1, "tension", ordered = TRUE)
1548
+ > plot(TukeyHSD(fm1, "tension"))
1549
+ Tukey multiple comparisons of means
1550
+ 95% family-wise confidence level
1551
+ factor levels have been ordered
1552
+ Fit: aov(formula = breaks ~ tension, data = warpbreaks)
1553
+ $tension
1554
+ """
1555
+ str_res = """
1556
+ diff lwr upr p adj
1557
+ 2 - 3 4.722222 -4.8376022 14.28205 0.4630831
1558
+ 1 - 3 14.722222 5.1623978 24.28205 0.0014315
1559
+ 1 - 2 10.000000 0.4401756 19.55982 0.0384598
1560
+ """
1561
+ res_expect = np.asarray(str_res.replace(" - ", " ").split()[5:],
1562
+ dtype=float).reshape((3, 6))
1563
+ data = ([26, 30, 54, 25, 70, 52, 51, 26, 67,
1564
+ 27, 14, 29, 19, 29, 31, 41, 20, 44],
1565
+ [18, 21, 29, 17, 12, 18, 35, 30, 36,
1566
+ 42, 26, 19, 16, 39, 28, 21, 39, 29],
1567
+ [36, 21, 24, 18, 10, 43, 28, 15, 26,
1568
+ 20, 21, 24, 17, 13, 15, 15, 16, 28])
1569
+
1570
+ res_tukey = stats.tukey_hsd(*data)
1571
+ conf = res_tukey.confidence_interval()
1572
+ # loop over the comparisons
1573
+ for i, j, s, l, h, p in res_expect:
1574
+ i, j = int(i) - 1, int(j) - 1
1575
+ # atols are set to the number of digits present in the r result.
1576
+ assert_allclose(conf.low[i, j], l, atol=1e-7)
1577
+ assert_allclose(res_tukey.statistic[i, j], s, atol=1e-6)
1578
+ assert_allclose(conf.high[i, j], h, atol=1e-5)
1579
+ assert_allclose(res_tukey.pvalue[i, j], p, atol=1e-7)
1580
+
1581
+ def test_engineering_stat_handbook(self):
1582
+ '''
1583
+ Example sourced from:
1584
+ https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm
1585
+ '''
1586
+ group1 = [6.9, 5.4, 5.8, 4.6, 4.0]
1587
+ group2 = [8.3, 6.8, 7.8, 9.2, 6.5]
1588
+ group3 = [8.0, 10.5, 8.1, 6.9, 9.3]
1589
+ group4 = [5.8, 3.8, 6.1, 5.6, 6.2]
1590
+ res = stats.tukey_hsd(group1, group2, group3, group4)
1591
+ conf = res.confidence_interval()
1592
+ lower = np.asarray([
1593
+ [0, 0, 0, -2.25],
1594
+ [.29, 0, -2.93, .13],
1595
+ [1.13, 0, 0, .97],
1596
+ [0, 0, 0, 0]])
1597
+ upper = np.asarray([
1598
+ [0, 0, 0, 1.93],
1599
+ [4.47, 0, 1.25, 4.31],
1600
+ [5.31, 0, 0, 5.15],
1601
+ [0, 0, 0, 0]])
1602
+
1603
+ for (i, j) in [(1, 0), (2, 0), (0, 3), (1, 2), (2, 3)]:
1604
+ assert_allclose(conf.low[i, j], lower[i, j], atol=1e-2)
1605
+ assert_allclose(conf.high[i, j], upper[i, j], atol=1e-2)
1606
+
1607
+ def test_rand_symm(self):
1608
+ # test some expected identities of the results
1609
+ np.random.seed(1234)
1610
+ data = np.random.rand(3, 100)
1611
+ res = stats.tukey_hsd(*data)
1612
+ conf = res.confidence_interval()
1613
+ # the confidence intervals should be negated symmetric of each other
1614
+ assert_equal(conf.low, -conf.high.T)
1615
+ # the `high` and `low` center diagonals should be the same since the
1616
+ # mean difference in a self comparison is 0.
1617
+ assert_equal(np.diagonal(conf.high), conf.high[0, 0])
1618
+ assert_equal(np.diagonal(conf.low), conf.low[0, 0])
1619
+ # statistic array should be antisymmetric with zeros on the diagonal
1620
+ assert_equal(res.statistic, -res.statistic.T)
1621
+ assert_equal(np.diagonal(res.statistic), 0)
1622
+ # p-values should be symmetric and 1 when compared to itself
1623
+ assert_equal(res.pvalue, res.pvalue.T)
1624
+ assert_equal(np.diagonal(res.pvalue), 1)
1625
+
1626
+ def test_no_inf(self):
1627
+ with assert_raises(ValueError, match="...must be finite."):
1628
+ stats.tukey_hsd([1, 2, 3], [2, np.inf], [6, 7, 3])
1629
+
1630
+ def test_is_1d(self):
1631
+ with assert_raises(ValueError, match="...must be one-dimensional"):
1632
+ stats.tukey_hsd([[1, 2], [2, 3]], [2, 5], [5, 23, 6])
1633
+
1634
+ def test_no_empty(self):
1635
+ with assert_raises(ValueError, match="...must be greater than one"):
1636
+ stats.tukey_hsd([], [2, 5], [4, 5, 6])
1637
+
1638
+ @pytest.mark.parametrize("nargs", (0, 1))
1639
+ def test_not_enough_treatments(self, nargs):
1640
+ with assert_raises(ValueError, match="...more than 1 treatment."):
1641
+ stats.tukey_hsd(*([[23, 7, 3]] * nargs))
1642
+
1643
+ @pytest.mark.parametrize("cl", [-.5, 0, 1, 2])
1644
+ def test_conf_level_invalid(self, cl):
1645
+ with assert_raises(ValueError, match="must be between 0 and 1"):
1646
+ r = stats.tukey_hsd([23, 7, 3], [3, 4], [9, 4])
1647
+ r.confidence_interval(cl)
1648
+
1649
+ def test_2_args_ttest(self):
1650
+ # that with 2 treatments the `pvalue` is equal to that of `ttest_ind`
1651
+ res_tukey = stats.tukey_hsd(*self.data_diff_size[:2])
1652
+ res_ttest = stats.ttest_ind(*self.data_diff_size[:2])
1653
+ assert_allclose(res_ttest.pvalue, res_tukey.pvalue[0, 1])
1654
+ assert_allclose(res_ttest.pvalue, res_tukey.pvalue[1, 0])
1655
+
1656
+
1657
+ class TestPoissonMeansTest:
1658
+ @pytest.mark.parametrize("c1, n1, c2, n2, p_expect", (
1659
+ # example from [1], 6. Illustrative examples: Example 1
1660
+ [0, 100, 3, 100, 0.0884],
1661
+ [2, 100, 6, 100, 0.1749]
1662
+ ))
1663
+ def test_paper_examples(self, c1, n1, c2, n2, p_expect):
1664
+ res = stats.poisson_means_test(c1, n1, c2, n2)
1665
+ assert_allclose(res.pvalue, p_expect, atol=1e-4)
1666
+
1667
+ @pytest.mark.parametrize("c1, n1, c2, n2, p_expect, alt, d", (
1668
+ # These test cases are produced by the wrapped fortran code from the
1669
+ # original authors. Using a slightly modified version of this fortran,
1670
+ # found here, https://github.com/nolanbconaway/poisson-etest,
1671
+ # additional tests were created.
1672
+ [20, 10, 20, 10, 0.9999997568929630, 'two-sided', 0],
1673
+ [10, 10, 10, 10, 0.9999998403241203, 'two-sided', 0],
1674
+ [50, 15, 1, 1, 0.09920321053409643, 'two-sided', .05],
1675
+ [3, 100, 20, 300, 0.12202725450896404, 'two-sided', 0],
1676
+ [3, 12, 4, 20, 0.40416087318539173, 'greater', 0],
1677
+ [4, 20, 3, 100, 0.008053640402974236, 'greater', 0],
1678
+ # publishing paper does not include a `less` alternative,
1679
+ # so it was calculated with switched argument order and
1680
+ # alternative="greater"
1681
+ [4, 20, 3, 10, 0.3083216325432898, 'less', 0],
1682
+ [1, 1, 50, 15, 0.09322998607245102, 'less', 0]
1683
+ ))
1684
+ def test_fortran_authors(self, c1, n1, c2, n2, p_expect, alt, d):
1685
+ res = stats.poisson_means_test(c1, n1, c2, n2, alternative=alt, diff=d)
1686
+ assert_allclose(res.pvalue, p_expect, atol=2e-6, rtol=1e-16)
1687
+
1688
+ def test_different_results(self):
1689
+ # The implementation in Fortran is known to break down at higher
1690
+ # counts and observations, so we expect different results. By
1691
+ # inspection we can infer the p-value to be near one.
1692
+ count1, count2 = 10000, 10000
1693
+ nobs1, nobs2 = 10000, 10000
1694
+ res = stats.poisson_means_test(count1, nobs1, count2, nobs2)
1695
+ assert_allclose(res.pvalue, 1)
1696
+
1697
+ def test_less_than_zero_lambda_hat2(self):
1698
+ # demonstrates behavior that fixes a known fault from original Fortran.
1699
+ # p-value should clearly be near one.
1700
+ count1, count2 = 0, 0
1701
+ nobs1, nobs2 = 1, 1
1702
+ res = stats.poisson_means_test(count1, nobs1, count2, nobs2)
1703
+ assert_allclose(res.pvalue, 1)
1704
+
1705
+ def test_input_validation(self):
1706
+ count1, count2 = 0, 0
1707
+ nobs1, nobs2 = 1, 1
1708
+
1709
+ # test non-integral events
1710
+ message = '`k1` and `k2` must be integers.'
1711
+ with assert_raises(TypeError, match=message):
1712
+ stats.poisson_means_test(.7, nobs1, count2, nobs2)
1713
+ with assert_raises(TypeError, match=message):
1714
+ stats.poisson_means_test(count1, nobs1, .7, nobs2)
1715
+
1716
+ # test negative events
1717
+ message = '`k1` and `k2` must be greater than or equal to 0.'
1718
+ with assert_raises(ValueError, match=message):
1719
+ stats.poisson_means_test(-1, nobs1, count2, nobs2)
1720
+ with assert_raises(ValueError, match=message):
1721
+ stats.poisson_means_test(count1, nobs1, -1, nobs2)
1722
+
1723
+ # test negative sample size
1724
+ message = '`n1` and `n2` must be greater than 0.'
1725
+ with assert_raises(ValueError, match=message):
1726
+ stats.poisson_means_test(count1, -1, count2, nobs2)
1727
+ with assert_raises(ValueError, match=message):
1728
+ stats.poisson_means_test(count1, nobs1, count2, -1)
1729
+
1730
+ # test negative difference
1731
+ message = 'diff must be greater than or equal to 0.'
1732
+ with assert_raises(ValueError, match=message):
1733
+ stats.poisson_means_test(count1, nobs1, count2, nobs2, diff=-1)
1734
+
1735
+ # test invalid alternatvie
1736
+ message = 'Alternative must be one of ...'
1737
+ with assert_raises(ValueError, match=message):
1738
+ stats.poisson_means_test(1, 2, 1, 2, alternative='error')
1739
+
1740
+
1741
+ class TestBWSTest:
1742
+
1743
+ def test_bws_input_validation(self):
1744
+ rng = np.random.default_rng(4571775098104213308)
1745
+
1746
+ x, y = rng.random(size=(2, 7))
1747
+
1748
+ message = '`x` and `y` must be exactly one-dimensional.'
1749
+ with pytest.raises(ValueError, match=message):
1750
+ stats.bws_test([x, x], [y, y])
1751
+
1752
+ message = '`x` and `y` must not contain NaNs.'
1753
+ with pytest.raises(ValueError, match=message):
1754
+ stats.bws_test([np.nan], y)
1755
+
1756
+ message = '`x` and `y` must be of nonzero size.'
1757
+ with pytest.raises(ValueError, match=message):
1758
+ stats.bws_test(x, [])
1759
+
1760
+ message = 'alternative` must be one of...'
1761
+ with pytest.raises(ValueError, match=message):
1762
+ stats.bws_test(x, y, alternative='ekki-ekki')
1763
+
1764
+ message = 'method` must be an instance of...'
1765
+ with pytest.raises(ValueError, match=message):
1766
+ stats.bws_test(x, y, method=42)
1767
+
1768
+
1769
+ def test_against_published_reference(self):
1770
+ # Test against Example 2 in bws_test Reference [1], pg 9
1771
+ # https://link.springer.com/content/pdf/10.1007/BF02762032.pdf
1772
+ x = [1, 2, 3, 4, 6, 7, 8]
1773
+ y = [5, 9, 10, 11, 12, 13, 14]
1774
+ res = stats.bws_test(x, y, alternative='two-sided')
1775
+ assert_allclose(res.statistic, 5.132, atol=1e-3)
1776
+ assert_equal(res.pvalue, 10/3432)
1777
+
1778
+
1779
+ @pytest.mark.parametrize(('alternative', 'statistic', 'pvalue'),
1780
+ [('two-sided', 1.7510204081633, 0.1264422777777),
1781
+ ('less', -1.7510204081633, 0.05754662004662),
1782
+ ('greater', -1.7510204081633, 0.9424533799534)])
1783
+ def test_against_R(self, alternative, statistic, pvalue):
1784
+ # Test against R library BWStest function bws_test
1785
+ # library(BWStest)
1786
+ # options(digits=16)
1787
+ # x = c(...)
1788
+ # y = c(...)
1789
+ # bws_test(x, y, alternative='two.sided')
1790
+ rng = np.random.default_rng(4571775098104213308)
1791
+ x, y = rng.random(size=(2, 7))
1792
+ res = stats.bws_test(x, y, alternative=alternative)
1793
+ assert_allclose(res.statistic, statistic, rtol=1e-13)
1794
+ assert_allclose(res.pvalue, pvalue, atol=1e-2, rtol=1e-1)
1795
+
1796
+ @pytest.mark.parametrize(('alternative', 'statistic', 'pvalue'),
1797
+ [('two-sided', 1.142629265891, 0.2903950180801),
1798
+ ('less', 0.99629665877411, 0.8545660222131),
1799
+ ('greater', 0.99629665877411, 0.1454339777869)])
1800
+ def test_against_R_imbalanced(self, alternative, statistic, pvalue):
1801
+ # Test against R library BWStest function bws_test
1802
+ # library(BWStest)
1803
+ # options(digits=16)
1804
+ # x = c(...)
1805
+ # y = c(...)
1806
+ # bws_test(x, y, alternative='two.sided')
1807
+ rng = np.random.default_rng(5429015622386364034)
1808
+ x = rng.random(size=9)
1809
+ y = rng.random(size=8)
1810
+ res = stats.bws_test(x, y, alternative=alternative)
1811
+ assert_allclose(res.statistic, statistic, rtol=1e-13)
1812
+ assert_allclose(res.pvalue, pvalue, atol=1e-2, rtol=1e-1)
1813
+
1814
+ def test_method(self):
1815
+ # Test that `method` parameter has the desired effect
1816
+ rng = np.random.default_rng(1520514347193347862)
1817
+ x, y = rng.random(size=(2, 10))
1818
+
1819
+ rng = np.random.default_rng(1520514347193347862)
1820
+ method = stats.PermutationMethod(n_resamples=10, random_state=rng)
1821
+ res1 = stats.bws_test(x, y, method=method)
1822
+
1823
+ assert len(res1.null_distribution) == 10
1824
+
1825
+ rng = np.random.default_rng(1520514347193347862)
1826
+ method = stats.PermutationMethod(n_resamples=10, random_state=rng)
1827
+ res2 = stats.bws_test(x, y, method=method)
1828
+
1829
+ assert_allclose(res1.null_distribution, res2.null_distribution)
1830
+
1831
+ rng = np.random.default_rng(5205143471933478621)
1832
+ method = stats.PermutationMethod(n_resamples=10, random_state=rng)
1833
+ res3 = stats.bws_test(x, y, method=method)
1834
+
1835
+ assert not np.allclose(res3.null_distribution, res1.null_distribution)
1836
+
1837
+ def test_directions(self):
1838
+ # Sanity check of the sign of the one-sided statistic
1839
+ rng = np.random.default_rng(1520514347193347862)
1840
+ x = rng.random(size=5)
1841
+ y = x - 1
1842
+
1843
+ res = stats.bws_test(x, y, alternative='greater')
1844
+ assert res.statistic > 0
1845
+ assert_equal(res.pvalue, 1 / len(res.null_distribution))
1846
+
1847
+ res = stats.bws_test(x, y, alternative='less')
1848
+ assert res.statistic > 0
1849
+ assert_equal(res.pvalue, 1)
1850
+
1851
+ res = stats.bws_test(y, x, alternative='less')
1852
+ assert res.statistic < 0
1853
+ assert_equal(res.pvalue, 1 / len(res.null_distribution))
1854
+
1855
+ res = stats.bws_test(y, x, alternative='greater')
1856
+ assert res.statistic < 0
1857
+ assert_equal(res.pvalue, 1)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_kdeoth.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy import stats, linalg, integrate
2
+ import numpy as np
3
+ from numpy.testing import (assert_almost_equal, assert_, assert_equal,
4
+ assert_array_almost_equal,
5
+ assert_array_almost_equal_nulp, assert_allclose)
6
+ import pytest
7
+ from pytest import raises as assert_raises
8
+
9
+
10
+ def test_kde_1d():
11
+ #some basic tests comparing to normal distribution
12
+ np.random.seed(8765678)
13
+ n_basesample = 500
14
+ xn = np.random.randn(n_basesample)
15
+ xnmean = xn.mean()
16
+ xnstd = xn.std(ddof=1)
17
+
18
+ # get kde for original sample
19
+ gkde = stats.gaussian_kde(xn)
20
+
21
+ # evaluate the density function for the kde for some points
22
+ xs = np.linspace(-7,7,501)
23
+ kdepdf = gkde.evaluate(xs)
24
+ normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
25
+ intervall = xs[1] - xs[0]
26
+
27
+ assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
28
+ prob1 = gkde.integrate_box_1d(xnmean, np.inf)
29
+ prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
30
+ assert_almost_equal(prob1, 0.5, decimal=1)
31
+ assert_almost_equal(prob2, 0.5, decimal=1)
32
+ assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
33
+ assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
34
+
35
+ assert_almost_equal(gkde.integrate_kde(gkde),
36
+ (kdepdf**2).sum()*intervall, decimal=2)
37
+ assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
38
+ (kdepdf*normpdf).sum()*intervall, decimal=2)
39
+
40
+
41
+ def test_kde_1d_weighted():
42
+ #some basic tests comparing to normal distribution
43
+ np.random.seed(8765678)
44
+ n_basesample = 500
45
+ xn = np.random.randn(n_basesample)
46
+ wn = np.random.rand(n_basesample)
47
+ xnmean = np.average(xn, weights=wn)
48
+ xnstd = np.sqrt(np.average((xn-xnmean)**2, weights=wn))
49
+
50
+ # get kde for original sample
51
+ gkde = stats.gaussian_kde(xn, weights=wn)
52
+
53
+ # evaluate the density function for the kde for some points
54
+ xs = np.linspace(-7,7,501)
55
+ kdepdf = gkde.evaluate(xs)
56
+ normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
57
+ intervall = xs[1] - xs[0]
58
+
59
+ assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
60
+ prob1 = gkde.integrate_box_1d(xnmean, np.inf)
61
+ prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
62
+ assert_almost_equal(prob1, 0.5, decimal=1)
63
+ assert_almost_equal(prob2, 0.5, decimal=1)
64
+ assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
65
+ assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
66
+
67
+ assert_almost_equal(gkde.integrate_kde(gkde),
68
+ (kdepdf**2).sum()*intervall, decimal=2)
69
+ assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
70
+ (kdepdf*normpdf).sum()*intervall, decimal=2)
71
+
72
+
73
+ @pytest.mark.xslow
74
+ def test_kde_2d():
75
+ #some basic tests comparing to normal distribution
76
+ np.random.seed(8765678)
77
+ n_basesample = 500
78
+
79
+ mean = np.array([1.0, 3.0])
80
+ covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
81
+
82
+ # Need transpose (shape (2, 500)) for kde
83
+ xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
84
+
85
+ # get kde for original sample
86
+ gkde = stats.gaussian_kde(xn)
87
+
88
+ # evaluate the density function for the kde for some points
89
+ x, y = np.mgrid[-7:7:500j, -7:7:500j]
90
+ grid_coords = np.vstack([x.ravel(), y.ravel()])
91
+ kdepdf = gkde.evaluate(grid_coords)
92
+ kdepdf = kdepdf.reshape(500, 500)
93
+
94
+ normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]),
95
+ mean=mean, cov=covariance)
96
+ intervall = y.ravel()[1] - y.ravel()[0]
97
+
98
+ assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
99
+
100
+ small = -1e100
101
+ large = 1e100
102
+ prob1 = gkde.integrate_box([small, mean[1]], [large, large])
103
+ prob2 = gkde.integrate_box([small, small], [large, mean[1]])
104
+
105
+ assert_almost_equal(prob1, 0.5, decimal=1)
106
+ assert_almost_equal(prob2, 0.5, decimal=1)
107
+ assert_almost_equal(gkde.integrate_kde(gkde),
108
+ (kdepdf**2).sum()*(intervall**2), decimal=2)
109
+ assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
110
+ (kdepdf*normpdf).sum()*(intervall**2), decimal=2)
111
+
112
+
113
+ @pytest.mark.xslow
114
+ def test_kde_2d_weighted():
115
+ #some basic tests comparing to normal distribution
116
+ np.random.seed(8765678)
117
+ n_basesample = 500
118
+
119
+ mean = np.array([1.0, 3.0])
120
+ covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
121
+
122
+ # Need transpose (shape (2, 500)) for kde
123
+ xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
124
+ wn = np.random.rand(n_basesample)
125
+
126
+ # get kde for original sample
127
+ gkde = stats.gaussian_kde(xn, weights=wn)
128
+
129
+ # evaluate the density function for the kde for some points
130
+ x, y = np.mgrid[-7:7:500j, -7:7:500j]
131
+ grid_coords = np.vstack([x.ravel(), y.ravel()])
132
+ kdepdf = gkde.evaluate(grid_coords)
133
+ kdepdf = kdepdf.reshape(500, 500)
134
+
135
+ normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]),
136
+ mean=mean, cov=covariance)
137
+ intervall = y.ravel()[1] - y.ravel()[0]
138
+
139
+ assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
140
+
141
+ small = -1e100
142
+ large = 1e100
143
+ prob1 = gkde.integrate_box([small, mean[1]], [large, large])
144
+ prob2 = gkde.integrate_box([small, small], [large, mean[1]])
145
+
146
+ assert_almost_equal(prob1, 0.5, decimal=1)
147
+ assert_almost_equal(prob2, 0.5, decimal=1)
148
+ assert_almost_equal(gkde.integrate_kde(gkde),
149
+ (kdepdf**2).sum()*(intervall**2), decimal=2)
150
+ assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
151
+ (kdepdf*normpdf).sum()*(intervall**2), decimal=2)
152
+
153
+
154
+ def test_kde_bandwidth_method():
155
+ def scotts_factor(kde_obj):
156
+ """Same as default, just check that it works."""
157
+ return np.power(kde_obj.n, -1./(kde_obj.d+4))
158
+
159
+ np.random.seed(8765678)
160
+ n_basesample = 50
161
+ xn = np.random.randn(n_basesample)
162
+
163
+ # Default
164
+ gkde = stats.gaussian_kde(xn)
165
+ # Supply a callable
166
+ gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
167
+ # Supply a scalar
168
+ gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
169
+
170
+ xs = np.linspace(-7,7,51)
171
+ kdepdf = gkde.evaluate(xs)
172
+ kdepdf2 = gkde2.evaluate(xs)
173
+ assert_almost_equal(kdepdf, kdepdf2)
174
+ kdepdf3 = gkde3.evaluate(xs)
175
+ assert_almost_equal(kdepdf, kdepdf3)
176
+
177
+ assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
178
+
179
+
180
+ def test_kde_bandwidth_method_weighted():
181
+ def scotts_factor(kde_obj):
182
+ """Same as default, just check that it works."""
183
+ return np.power(kde_obj.neff, -1./(kde_obj.d+4))
184
+
185
+ np.random.seed(8765678)
186
+ n_basesample = 50
187
+ xn = np.random.randn(n_basesample)
188
+
189
+ # Default
190
+ gkde = stats.gaussian_kde(xn)
191
+ # Supply a callable
192
+ gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
193
+ # Supply a scalar
194
+ gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
195
+
196
+ xs = np.linspace(-7,7,51)
197
+ kdepdf = gkde.evaluate(xs)
198
+ kdepdf2 = gkde2.evaluate(xs)
199
+ assert_almost_equal(kdepdf, kdepdf2)
200
+ kdepdf3 = gkde3.evaluate(xs)
201
+ assert_almost_equal(kdepdf, kdepdf3)
202
+
203
+ assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
204
+
205
+
206
+ # Subclasses that should stay working (extracted from various sources).
207
+ # Unfortunately the earlier design of gaussian_kde made it necessary for users
208
+ # to create these kinds of subclasses, or call _compute_covariance() directly.
209
+
210
+ class _kde_subclass1(stats.gaussian_kde):
211
+ def __init__(self, dataset):
212
+ self.dataset = np.atleast_2d(dataset)
213
+ self.d, self.n = self.dataset.shape
214
+ self.covariance_factor = self.scotts_factor
215
+ self._compute_covariance()
216
+
217
+
218
+ class _kde_subclass2(stats.gaussian_kde):
219
+ def __init__(self, dataset):
220
+ self.covariance_factor = self.scotts_factor
221
+ super().__init__(dataset)
222
+
223
+
224
+ class _kde_subclass4(stats.gaussian_kde):
225
+ def covariance_factor(self):
226
+ return 0.5 * self.silverman_factor()
227
+
228
+
229
+ def test_gaussian_kde_subclassing():
230
+ x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
231
+ xs = np.linspace(-10, 10, num=50)
232
+
233
+ # gaussian_kde itself
234
+ kde = stats.gaussian_kde(x1)
235
+ ys = kde(xs)
236
+
237
+ # subclass 1
238
+ kde1 = _kde_subclass1(x1)
239
+ y1 = kde1(xs)
240
+ assert_array_almost_equal_nulp(ys, y1, nulp=10)
241
+
242
+ # subclass 2
243
+ kde2 = _kde_subclass2(x1)
244
+ y2 = kde2(xs)
245
+ assert_array_almost_equal_nulp(ys, y2, nulp=10)
246
+
247
+ # subclass 3 was removed because we have no obligation to maintain support
248
+ # for user invocation of private methods
249
+
250
+ # subclass 4
251
+ kde4 = _kde_subclass4(x1)
252
+ y4 = kde4(x1)
253
+ y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
254
+
255
+ assert_array_almost_equal(y_expected, y4, decimal=6)
256
+
257
+ # Not a subclass, but check for use of _compute_covariance()
258
+ kde5 = kde
259
+ kde5.covariance_factor = lambda: kde.factor
260
+ kde5._compute_covariance()
261
+ y5 = kde5(xs)
262
+ assert_array_almost_equal_nulp(ys, y5, nulp=10)
263
+
264
+
265
+ def test_gaussian_kde_covariance_caching():
266
+ x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
267
+ xs = np.linspace(-10, 10, num=5)
268
+ # These expected values are from scipy 0.10, before some changes to
269
+ # gaussian_kde. They were not compared with any external reference.
270
+ y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
271
+
272
+ # Set the bandwidth, then reset it to the default.
273
+ kde = stats.gaussian_kde(x1)
274
+ kde.set_bandwidth(bw_method=0.5)
275
+ kde.set_bandwidth(bw_method='scott')
276
+ y2 = kde(xs)
277
+
278
+ assert_array_almost_equal(y_expected, y2, decimal=7)
279
+
280
+
281
+ def test_gaussian_kde_monkeypatch():
282
+ """Ugly, but people may rely on this. See scipy pull request 123,
283
+ specifically the linked ML thread "Width of the Gaussian in stats.kde".
284
+ If it is necessary to break this later on, that is to be discussed on ML.
285
+ """
286
+ x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
287
+ xs = np.linspace(-10, 10, num=50)
288
+
289
+ # The old monkeypatched version to get at Silverman's Rule.
290
+ kde = stats.gaussian_kde(x1)
291
+ kde.covariance_factor = kde.silverman_factor
292
+ kde._compute_covariance()
293
+ y1 = kde(xs)
294
+
295
+ # The new saner version.
296
+ kde2 = stats.gaussian_kde(x1, bw_method='silverman')
297
+ y2 = kde2(xs)
298
+
299
+ assert_array_almost_equal_nulp(y1, y2, nulp=10)
300
+
301
+
302
+ def test_kde_integer_input():
303
+ """Regression test for #1181."""
304
+ x1 = np.arange(5)
305
+ kde = stats.gaussian_kde(x1)
306
+ y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
307
+ assert_array_almost_equal(kde(x1), y_expected, decimal=6)
308
+
309
+
310
+ _ftypes = ['float32', 'float64', 'float96', 'float128', 'int32', 'int64']
311
+
312
+
313
+ @pytest.mark.parametrize("bw_type", _ftypes + ["scott", "silverman"])
314
+ @pytest.mark.parametrize("dtype", _ftypes)
315
+ def test_kde_output_dtype(dtype, bw_type):
316
+ # Check whether the datatypes are available
317
+ dtype = getattr(np, dtype, None)
318
+
319
+ if bw_type in ["scott", "silverman"]:
320
+ bw = bw_type
321
+ else:
322
+ bw_type = getattr(np, bw_type, None)
323
+ bw = bw_type(3) if bw_type else None
324
+
325
+ if any(dt is None for dt in [dtype, bw]):
326
+ pytest.skip()
327
+
328
+ weights = np.arange(5, dtype=dtype)
329
+ dataset = np.arange(5, dtype=dtype)
330
+ k = stats.gaussian_kde(dataset, bw_method=bw, weights=weights)
331
+ points = np.arange(5, dtype=dtype)
332
+ result = k(points)
333
+ # weights are always cast to float64
334
+ assert result.dtype == np.result_type(dataset, points, np.float64(weights),
335
+ k.factor)
336
+
337
+
338
+ def test_pdf_logpdf_validation():
339
+ rng = np.random.default_rng(64202298293133848336925499069837723291)
340
+ xn = rng.standard_normal((2, 10))
341
+ gkde = stats.gaussian_kde(xn)
342
+ xs = rng.standard_normal((3, 10))
343
+
344
+ msg = "points have dimension 3, dataset has dimension 2"
345
+ with pytest.raises(ValueError, match=msg):
346
+ gkde.logpdf(xs)
347
+
348
+
349
+ def test_pdf_logpdf():
350
+ np.random.seed(1)
351
+ n_basesample = 50
352
+ xn = np.random.randn(n_basesample)
353
+
354
+ # Default
355
+ gkde = stats.gaussian_kde(xn)
356
+
357
+ xs = np.linspace(-15, 12, 25)
358
+ pdf = gkde.evaluate(xs)
359
+ pdf2 = gkde.pdf(xs)
360
+ assert_almost_equal(pdf, pdf2, decimal=12)
361
+
362
+ logpdf = np.log(pdf)
363
+ logpdf2 = gkde.logpdf(xs)
364
+ assert_almost_equal(logpdf, logpdf2, decimal=12)
365
+
366
+ # There are more points than data
367
+ gkde = stats.gaussian_kde(xs)
368
+ pdf = np.log(gkde.evaluate(xn))
369
+ pdf2 = gkde.logpdf(xn)
370
+ assert_almost_equal(pdf, pdf2, decimal=12)
371
+
372
+
373
+ def test_pdf_logpdf_weighted():
374
+ np.random.seed(1)
375
+ n_basesample = 50
376
+ xn = np.random.randn(n_basesample)
377
+ wn = np.random.rand(n_basesample)
378
+
379
+ # Default
380
+ gkde = stats.gaussian_kde(xn, weights=wn)
381
+
382
+ xs = np.linspace(-15, 12, 25)
383
+ pdf = gkde.evaluate(xs)
384
+ pdf2 = gkde.pdf(xs)
385
+ assert_almost_equal(pdf, pdf2, decimal=12)
386
+
387
+ logpdf = np.log(pdf)
388
+ logpdf2 = gkde.logpdf(xs)
389
+ assert_almost_equal(logpdf, logpdf2, decimal=12)
390
+
391
+ # There are more points than data
392
+ gkde = stats.gaussian_kde(xs, weights=np.random.rand(len(xs)))
393
+ pdf = np.log(gkde.evaluate(xn))
394
+ pdf2 = gkde.logpdf(xn)
395
+ assert_almost_equal(pdf, pdf2, decimal=12)
396
+
397
+
398
+ def test_marginal_1_axis():
399
+ rng = np.random.default_rng(6111799263660870475)
400
+ n_data = 50
401
+ n_dim = 10
402
+ dataset = rng.normal(size=(n_dim, n_data))
403
+ points = rng.normal(size=(n_dim, 3))
404
+
405
+ dimensions = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) # dimensions to keep
406
+
407
+ kde = stats.gaussian_kde(dataset)
408
+ marginal = kde.marginal(dimensions)
409
+ pdf = marginal.pdf(points[dimensions])
410
+
411
+ def marginal_pdf_single(point):
412
+ def f(x):
413
+ x = np.concatenate(([x], point[dimensions]))
414
+ return kde.pdf(x)[0]
415
+ return integrate.quad(f, -np.inf, np.inf)[0]
416
+
417
+ def marginal_pdf(points):
418
+ return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points)
419
+
420
+ ref = marginal_pdf(points)
421
+
422
+ assert_allclose(pdf, ref, rtol=1e-6)
423
+
424
+
425
+ @pytest.mark.xslow
426
+ def test_marginal_2_axis():
427
+ rng = np.random.default_rng(6111799263660870475)
428
+ n_data = 30
429
+ n_dim = 4
430
+ dataset = rng.normal(size=(n_dim, n_data))
431
+ points = rng.normal(size=(n_dim, 3))
432
+
433
+ dimensions = np.array([1, 3]) # dimensions to keep
434
+
435
+ kde = stats.gaussian_kde(dataset)
436
+ marginal = kde.marginal(dimensions)
437
+ pdf = marginal.pdf(points[dimensions])
438
+
439
+ def marginal_pdf(points):
440
+ def marginal_pdf_single(point):
441
+ def f(y, x):
442
+ w, z = point[dimensions]
443
+ x = np.array([x, w, y, z])
444
+ return kde.pdf(x)[0]
445
+ return integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)[0]
446
+
447
+ return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points)
448
+
449
+ ref = marginal_pdf(points)
450
+
451
+ assert_allclose(pdf, ref, rtol=1e-6)
452
+
453
+
454
+ def test_marginal_iv():
455
+ # test input validation
456
+ rng = np.random.default_rng(6111799263660870475)
457
+ n_data = 30
458
+ n_dim = 4
459
+ dataset = rng.normal(size=(n_dim, n_data))
460
+ points = rng.normal(size=(n_dim, 3))
461
+
462
+ kde = stats.gaussian_kde(dataset)
463
+
464
+ # check that positive and negative indices are equivalent
465
+ dimensions1 = [-1, 1]
466
+ marginal1 = kde.marginal(dimensions1)
467
+ pdf1 = marginal1.pdf(points[dimensions1])
468
+
469
+ dimensions2 = [3, -3]
470
+ marginal2 = kde.marginal(dimensions2)
471
+ pdf2 = marginal2.pdf(points[dimensions2])
472
+
473
+ assert_equal(pdf1, pdf2)
474
+
475
+ # IV for non-integer dimensions
476
+ message = "Elements of `dimensions` must be integers..."
477
+ with pytest.raises(ValueError, match=message):
478
+ kde.marginal([1, 2.5])
479
+
480
+ # IV for uniquenes
481
+ message = "All elements of `dimensions` must be unique."
482
+ with pytest.raises(ValueError, match=message):
483
+ kde.marginal([1, 2, 2])
484
+
485
+ # IV for non-integer dimensions
486
+ message = (r"Dimensions \[-5 6\] are invalid for a distribution in 4...")
487
+ with pytest.raises(ValueError, match=message):
488
+ kde.marginal([1, -5, 6])
489
+
490
+
491
+ @pytest.mark.xslow
492
+ def test_logpdf_overflow():
493
+ # regression test for gh-12988; testing against linalg instability for
494
+ # very high dimensionality kde
495
+ np.random.seed(1)
496
+ n_dimensions = 2500
497
+ n_samples = 5000
498
+ xn = np.array([np.random.randn(n_samples) + (n) for n in range(
499
+ 0, n_dimensions)])
500
+
501
+ # Default
502
+ gkde = stats.gaussian_kde(xn)
503
+
504
+ logpdf = gkde.logpdf(np.arange(0, n_dimensions))
505
+ np.testing.assert_equal(np.isneginf(logpdf[0]), False)
506
+ np.testing.assert_equal(np.isnan(logpdf[0]), False)
507
+
508
+
509
+ def test_weights_intact():
510
+ # regression test for gh-9709: weights are not modified
511
+ np.random.seed(12345)
512
+ vals = np.random.lognormal(size=100)
513
+ weights = np.random.choice([1.0, 10.0, 100], size=vals.size)
514
+ orig_weights = weights.copy()
515
+
516
+ stats.gaussian_kde(np.log10(vals), weights=weights)
517
+ assert_allclose(weights, orig_weights, atol=1e-14, rtol=1e-14)
518
+
519
+
520
+ def test_weights_integer():
521
+ # integer weights are OK, cf gh-9709 (comment)
522
+ np.random.seed(12345)
523
+ values = [0.2, 13.5, 21.0, 75.0, 99.0]
524
+ weights = [1, 2, 4, 8, 16] # a list of integers
525
+ pdf_i = stats.gaussian_kde(values, weights=weights)
526
+ pdf_f = stats.gaussian_kde(values, weights=np.float64(weights))
527
+
528
+ xn = [0.3, 11, 88]
529
+ assert_allclose(pdf_i.evaluate(xn),
530
+ pdf_f.evaluate(xn), atol=1e-14, rtol=1e-14)
531
+
532
+
533
+ def test_seed():
534
+ # Test the seed option of the resample method
535
+ def test_seed_sub(gkde_trail):
536
+ n_sample = 200
537
+ # The results should be different without using seed
538
+ samp1 = gkde_trail.resample(n_sample)
539
+ samp2 = gkde_trail.resample(n_sample)
540
+ assert_raises(
541
+ AssertionError, assert_allclose, samp1, samp2, atol=1e-13
542
+ )
543
+ # Use integer seed
544
+ seed = 831
545
+ samp1 = gkde_trail.resample(n_sample, seed=seed)
546
+ samp2 = gkde_trail.resample(n_sample, seed=seed)
547
+ assert_allclose(samp1, samp2, atol=1e-13)
548
+ # Use RandomState
549
+ rstate1 = np.random.RandomState(seed=138)
550
+ samp1 = gkde_trail.resample(n_sample, seed=rstate1)
551
+ rstate2 = np.random.RandomState(seed=138)
552
+ samp2 = gkde_trail.resample(n_sample, seed=rstate2)
553
+ assert_allclose(samp1, samp2, atol=1e-13)
554
+
555
+ # check that np.random.Generator can be used (numpy >= 1.17)
556
+ if hasattr(np.random, 'default_rng'):
557
+ # obtain a np.random.Generator object
558
+ rng = np.random.default_rng(1234)
559
+ gkde_trail.resample(n_sample, seed=rng)
560
+
561
+ np.random.seed(8765678)
562
+ n_basesample = 500
563
+ wn = np.random.rand(n_basesample)
564
+ # Test 1D case
565
+ xn_1d = np.random.randn(n_basesample)
566
+
567
+ gkde_1d = stats.gaussian_kde(xn_1d)
568
+ test_seed_sub(gkde_1d)
569
+ gkde_1d_weighted = stats.gaussian_kde(xn_1d, weights=wn)
570
+ test_seed_sub(gkde_1d_weighted)
571
+
572
+ # Test 2D case
573
+ mean = np.array([1.0, 3.0])
574
+ covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
575
+ xn_2d = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
576
+
577
+ gkde_2d = stats.gaussian_kde(xn_2d)
578
+ test_seed_sub(gkde_2d)
579
+ gkde_2d_weighted = stats.gaussian_kde(xn_2d, weights=wn)
580
+ test_seed_sub(gkde_2d_weighted)
581
+
582
+
583
+ def test_singular_data_covariance_gh10205():
584
+ # When the data lie in a lower-dimensional subspace and this causes
585
+ # and exception, check that the error message is informative.
586
+ rng = np.random.default_rng(2321583144339784787)
587
+ mu = np.array([1, 10, 20])
588
+ sigma = np.array([[4, 10, 0], [10, 25, 0], [0, 0, 100]])
589
+ data = rng.multivariate_normal(mu, sigma, 1000)
590
+ try: # doesn't raise any error on some platforms, and that's OK
591
+ stats.gaussian_kde(data.T)
592
+ except linalg.LinAlgError:
593
+ msg = "The data appears to lie in a lower-dimensional subspace..."
594
+ with assert_raises(linalg.LinAlgError, match=msg):
595
+ stats.gaussian_kde(data.T)
596
+
597
+
598
+ def test_fewer_points_than_dimensions_gh17436():
599
+ # When the number of points is fewer than the number of dimensions, the
600
+ # the covariance matrix would be singular, and the exception tested in
601
+ # test_singular_data_covariance_gh10205 would occur. However, sometimes
602
+ # this occurs when the user passes in the transpose of what `gaussian_kde`
603
+ # expects. This can result in a huge covariance matrix, so bail early.
604
+ rng = np.random.default_rng(2046127537594925772)
605
+ rvs = rng.multivariate_normal(np.zeros(3), np.eye(3), size=5)
606
+ message = "Number of dimensions is greater than number of samples..."
607
+ with pytest.raises(ValueError, match=message):
608
+ stats.gaussian_kde(rvs)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_mgc.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from pytest import raises as assert_raises, warns as assert_warns
3
+
4
+ import numpy as np
5
+ from numpy.testing import assert_approx_equal, assert_allclose, assert_equal
6
+
7
+ from scipy.spatial.distance import cdist
8
+ from scipy import stats
9
+
10
+ class TestMGCErrorWarnings:
11
+ """ Tests errors and warnings derived from MGC.
12
+ """
13
+ def test_error_notndarray(self):
14
+ # raises error if x or y is not a ndarray
15
+ x = np.arange(20)
16
+ y = [5] * 20
17
+ assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
18
+ assert_raises(ValueError, stats.multiscale_graphcorr, y, x)
19
+
20
+ def test_error_shape(self):
21
+ # raises error if number of samples different (n)
22
+ x = np.arange(100).reshape(25, 4)
23
+ y = x.reshape(10, 10)
24
+ assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
25
+
26
+ def test_error_lowsamples(self):
27
+ # raises error if samples are low (< 3)
28
+ x = np.arange(3)
29
+ y = np.arange(3)
30
+ assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
31
+
32
+ def test_error_nans(self):
33
+ # raises error if inputs contain NaNs
34
+ x = np.arange(20, dtype=float)
35
+ x[0] = np.nan
36
+ assert_raises(ValueError, stats.multiscale_graphcorr, x, x)
37
+
38
+ y = np.arange(20)
39
+ assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
40
+
41
+ def test_error_wrongdisttype(self):
42
+ # raises error if metric is not a function
43
+ x = np.arange(20)
44
+ compute_distance = 0
45
+ assert_raises(ValueError, stats.multiscale_graphcorr, x, x,
46
+ compute_distance=compute_distance)
47
+
48
+ @pytest.mark.parametrize("reps", [
49
+ -1, # reps is negative
50
+ '1', # reps is not integer
51
+ ])
52
+ def test_error_reps(self, reps):
53
+ # raises error if reps is negative
54
+ x = np.arange(20)
55
+ assert_raises(ValueError, stats.multiscale_graphcorr, x, x, reps=reps)
56
+
57
+ def test_warns_reps(self):
58
+ # raises warning when reps is less than 1000
59
+ x = np.arange(20)
60
+ reps = 100
61
+ assert_warns(RuntimeWarning, stats.multiscale_graphcorr, x, x, reps=reps)
62
+
63
+ def test_error_infty(self):
64
+ # raises error if input contains infinities
65
+ x = np.arange(20)
66
+ y = np.ones(20) * np.inf
67
+ assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
68
+
69
+
70
+ class TestMGCStat:
71
+ """ Test validity of MGC test statistic
72
+ """
73
+ def _simulations(self, samps=100, dims=1, sim_type=""):
74
+ # linear simulation
75
+ if sim_type == "linear":
76
+ x = np.random.uniform(-1, 1, size=(samps, 1))
77
+ y = x + 0.3 * np.random.random_sample(size=(x.size, 1))
78
+
79
+ # spiral simulation
80
+ elif sim_type == "nonlinear":
81
+ unif = np.array(np.random.uniform(0, 5, size=(samps, 1)))
82
+ x = unif * np.cos(np.pi * unif)
83
+ y = (unif * np.sin(np.pi * unif) +
84
+ 0.4*np.random.random_sample(size=(x.size, 1)))
85
+
86
+ # independence (tests type I simulation)
87
+ elif sim_type == "independence":
88
+ u = np.random.normal(0, 1, size=(samps, 1))
89
+ v = np.random.normal(0, 1, size=(samps, 1))
90
+ u_2 = np.random.binomial(1, p=0.5, size=(samps, 1))
91
+ v_2 = np.random.binomial(1, p=0.5, size=(samps, 1))
92
+ x = u/3 + 2*u_2 - 1
93
+ y = v/3 + 2*v_2 - 1
94
+
95
+ # raises error if not approved sim_type
96
+ else:
97
+ raise ValueError("sim_type must be linear, nonlinear, or "
98
+ "independence")
99
+
100
+ # add dimensions of noise for higher dimensions
101
+ if dims > 1:
102
+ dims_noise = np.random.normal(0, 1, size=(samps, dims-1))
103
+ x = np.concatenate((x, dims_noise), axis=1)
104
+
105
+ return x, y
106
+
107
+ @pytest.mark.xslow
108
+ @pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [
109
+ ("linear", 0.97, 1/1000), # test linear simulation
110
+ ("nonlinear", 0.163, 1/1000), # test spiral simulation
111
+ ("independence", -0.0094, 0.78) # test independence simulation
112
+ ])
113
+ def test_oned(self, sim_type, obs_stat, obs_pvalue):
114
+ np.random.seed(12345678)
115
+
116
+ # generate x and y
117
+ x, y = self._simulations(samps=100, dims=1, sim_type=sim_type)
118
+
119
+ # test stat and pvalue
120
+ stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
121
+ assert_approx_equal(stat, obs_stat, significant=1)
122
+ assert_approx_equal(pvalue, obs_pvalue, significant=1)
123
+
124
+ @pytest.mark.xslow
125
+ @pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [
126
+ ("linear", 0.184, 1/1000), # test linear simulation
127
+ ("nonlinear", 0.0190, 0.117), # test spiral simulation
128
+ ])
129
+ def test_fived(self, sim_type, obs_stat, obs_pvalue):
130
+ np.random.seed(12345678)
131
+
132
+ # generate x and y
133
+ x, y = self._simulations(samps=100, dims=5, sim_type=sim_type)
134
+
135
+ # test stat and pvalue
136
+ stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
137
+ assert_approx_equal(stat, obs_stat, significant=1)
138
+ assert_approx_equal(pvalue, obs_pvalue, significant=1)
139
+
140
+ @pytest.mark.xslow
141
+ def test_twosamp(self):
142
+ np.random.seed(12345678)
143
+
144
+ # generate x and y
145
+ x = np.random.binomial(100, 0.5, size=(100, 5))
146
+ y = np.random.normal(0, 1, size=(80, 5))
147
+
148
+ # test stat and pvalue
149
+ stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
150
+ assert_approx_equal(stat, 1.0, significant=1)
151
+ assert_approx_equal(pvalue, 0.001, significant=1)
152
+
153
+ # generate x and y
154
+ y = np.random.normal(0, 1, size=(100, 5))
155
+
156
+ # test stat and pvalue
157
+ stat, pvalue, _ = stats.multiscale_graphcorr(x, y, is_twosamp=True)
158
+ assert_approx_equal(stat, 1.0, significant=1)
159
+ assert_approx_equal(pvalue, 0.001, significant=1)
160
+
161
+ @pytest.mark.xslow
162
+ def test_workers(self):
163
+ np.random.seed(12345678)
164
+
165
+ # generate x and y
166
+ x, y = self._simulations(samps=100, dims=1, sim_type="linear")
167
+
168
+ # test stat and pvalue
169
+ stat, pvalue, _ = stats.multiscale_graphcorr(x, y, workers=2)
170
+ assert_approx_equal(stat, 0.97, significant=1)
171
+ assert_approx_equal(pvalue, 0.001, significant=1)
172
+
173
+ @pytest.mark.xslow
174
+ def test_random_state(self):
175
+ # generate x and y
176
+ x, y = self._simulations(samps=100, dims=1, sim_type="linear")
177
+
178
+ # test stat and pvalue
179
+ stat, pvalue, _ = stats.multiscale_graphcorr(x, y, random_state=1)
180
+ assert_approx_equal(stat, 0.97, significant=1)
181
+ assert_approx_equal(pvalue, 0.001, significant=1)
182
+
183
+ @pytest.mark.xslow
184
+ def test_dist_perm(self):
185
+ np.random.seed(12345678)
186
+ # generate x and y
187
+ x, y = self._simulations(samps=100, dims=1, sim_type="nonlinear")
188
+ distx = cdist(x, x, metric="euclidean")
189
+ disty = cdist(y, y, metric="euclidean")
190
+
191
+ stat_dist, pvalue_dist, _ = stats.multiscale_graphcorr(distx, disty,
192
+ compute_distance=None,
193
+ random_state=1)
194
+ assert_approx_equal(stat_dist, 0.163, significant=1)
195
+ assert_approx_equal(pvalue_dist, 0.001, significant=1)
196
+
197
+ @pytest.mark.fail_slow(10) # all other tests are XSLOW; we need at least one to run
198
+ @pytest.mark.slow
199
+ def test_pvalue_literature(self):
200
+ np.random.seed(12345678)
201
+
202
+ # generate x and y
203
+ x, y = self._simulations(samps=100, dims=1, sim_type="linear")
204
+
205
+ # test stat and pvalue
206
+ _, pvalue, _ = stats.multiscale_graphcorr(x, y, random_state=1)
207
+ assert_allclose(pvalue, 1/1001)
208
+
209
+ @pytest.mark.xslow
210
+ def test_alias(self):
211
+ np.random.seed(12345678)
212
+
213
+ # generate x and y
214
+ x, y = self._simulations(samps=100, dims=1, sim_type="linear")
215
+
216
+ res = stats.multiscale_graphcorr(x, y, random_state=1)
217
+ assert_equal(res.stat, res.statistic)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_morestats.py ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_mstats_basic.py ADDED
@@ -0,0 +1,2066 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for the stats.mstats module (support for masked arrays)
3
+ """
4
+ import warnings
5
+ import platform
6
+
7
+ import numpy as np
8
+ from numpy import nan
9
+ import numpy.ma as ma
10
+ from numpy.ma import masked, nomask
11
+
12
+ import scipy.stats.mstats as mstats
13
+ from scipy import stats
14
+ from .common_tests import check_named_results
15
+ import pytest
16
+ from pytest import raises as assert_raises
17
+ from numpy.ma.testutils import (assert_equal, assert_almost_equal,
18
+ assert_array_almost_equal,
19
+ assert_array_almost_equal_nulp, assert_,
20
+ assert_allclose, assert_array_equal)
21
+ from numpy.testing import suppress_warnings
22
+ from scipy.stats import _mstats_basic, _stats_py
23
+ from scipy.conftest import skip_xp_invalid_arg
24
+ from scipy.stats._axis_nan_policy import SmallSampleWarning, too_small_1d_not_omit
25
+
26
+ class TestMquantiles:
27
+ def test_mquantiles_limit_keyword(self):
28
+ # Regression test for Trac ticket #867
29
+ data = np.array([[6., 7., 1.],
30
+ [47., 15., 2.],
31
+ [49., 36., 3.],
32
+ [15., 39., 4.],
33
+ [42., 40., -999.],
34
+ [41., 41., -999.],
35
+ [7., -999., -999.],
36
+ [39., -999., -999.],
37
+ [43., -999., -999.],
38
+ [40., -999., -999.],
39
+ [36., -999., -999.]])
40
+ desired = [[19.2, 14.6, 1.45],
41
+ [40.0, 37.5, 2.5],
42
+ [42.8, 40.05, 3.55]]
43
+ quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
44
+ assert_almost_equal(quants, desired)
45
+
46
+
47
+ def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
48
+ # Note this doesn't test when axis is not specified
49
+ x = mstats.gmean(array_like, axis=axis, dtype=dtype)
50
+ assert_allclose(x, desired, rtol=rtol)
51
+ assert_equal(x.dtype, dtype)
52
+
53
+
54
+ def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
55
+ x = stats.hmean(array_like, axis=axis, dtype=dtype)
56
+ assert_allclose(x, desired, rtol=rtol)
57
+ assert_equal(x.dtype, dtype)
58
+
59
+
60
+ @skip_xp_invalid_arg
61
+ class TestGeoMean:
62
+ def test_1d(self):
63
+ a = [1, 2, 3, 4]
64
+ desired = np.power(1*2*3*4, 1./4.)
65
+ check_equal_gmean(a, desired, rtol=1e-14)
66
+
67
+ def test_1d_ma(self):
68
+ # Test a 1d masked array
69
+ a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
70
+ desired = 45.2872868812
71
+ check_equal_gmean(a, desired)
72
+
73
+ a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
74
+ desired = np.power(1*2*3, 1./3.)
75
+ check_equal_gmean(a, desired, rtol=1e-14)
76
+
77
+ def test_1d_ma_value(self):
78
+ # Test a 1d masked array with a masked value
79
+ a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
80
+ mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
81
+ desired = 41.4716627439
82
+ check_equal_gmean(a, desired)
83
+
84
+ def test_1d_ma0(self):
85
+ # Test a 1d masked array with zero element
86
+ a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
87
+ desired = 0
88
+ check_equal_gmean(a, desired)
89
+
90
+ def test_1d_ma_inf(self):
91
+ # Test a 1d masked array with negative element
92
+ a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])
93
+ desired = np.nan
94
+ with np.errstate(invalid='ignore'):
95
+ check_equal_gmean(a, desired)
96
+
97
+ @pytest.mark.skipif(not hasattr(np, 'float96'),
98
+ reason='cannot find float96 so skipping')
99
+ def test_1d_float96(self):
100
+ a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
101
+ desired_dt = np.power(1*2*3, 1./3.).astype(np.float96)
102
+ check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14)
103
+
104
+ def test_2d_ma(self):
105
+ a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
106
+ mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
107
+ desired = np.array([1, 2, 3, 4])
108
+ check_equal_gmean(a, desired, axis=0, rtol=1e-14)
109
+
110
+ desired = ma.array([np.power(1*2*3*4, 1./4.),
111
+ np.power(2*3, 1./2.),
112
+ np.power(1*4, 1./2.)])
113
+ check_equal_gmean(a, desired, axis=-1, rtol=1e-14)
114
+
115
+ # Test a 2d masked array
116
+ a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
117
+ desired = 52.8885199
118
+ check_equal_gmean(np.ma.array(a), desired)
119
+
120
+
121
+ @skip_xp_invalid_arg
122
+ class TestHarMean:
123
+ def test_1d(self):
124
+ a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
125
+ desired = 3. / (1./1 + 1./2 + 1./3)
126
+ check_equal_hmean(a, desired, rtol=1e-14)
127
+
128
+ a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
129
+ desired = 34.1417152147
130
+ check_equal_hmean(a, desired)
131
+
132
+ a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
133
+ mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
134
+ desired = 31.8137186141
135
+ check_equal_hmean(a, desired)
136
+
137
+ @pytest.mark.skipif(not hasattr(np, 'float96'),
138
+ reason='cannot find float96 so skipping')
139
+ def test_1d_float96(self):
140
+ a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
141
+ desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96)
142
+ check_equal_hmean(a, desired_dt, dtype=np.float96)
143
+
144
+ def test_2d(self):
145
+ a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
146
+ mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
147
+ desired = ma.array([1, 2, 3, 4])
148
+ check_equal_hmean(a, desired, axis=0, rtol=1e-14)
149
+
150
+ desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)]
151
+ check_equal_hmean(a, desired, axis=-1, rtol=1e-14)
152
+
153
+ a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
154
+ desired = 38.6696271841
155
+ check_equal_hmean(np.ma.array(a), desired)
156
+
157
+
158
+ class TestRanking:
159
+ def test_ranking(self):
160
+ x = ma.array([0,1,1,1,2,3,4,5,5,6,])
161
+ assert_almost_equal(mstats.rankdata(x),
162
+ [1,3,3,3,5,6,7,8.5,8.5,10])
163
+ x[[3,4]] = masked
164
+ assert_almost_equal(mstats.rankdata(x),
165
+ [1,2.5,2.5,0,0,4,5,6.5,6.5,8])
166
+ assert_almost_equal(mstats.rankdata(x, use_missing=True),
167
+ [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
168
+ x = ma.array([0,1,5,1,2,4,3,5,1,6,])
169
+ assert_almost_equal(mstats.rankdata(x),
170
+ [1,3,8.5,3,5,7,6,8.5,3,10])
171
+ x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
172
+ assert_almost_equal(mstats.rankdata(x),
173
+ [[1,3,3,3,5], [6,7,8.5,8.5,10]])
174
+ assert_almost_equal(mstats.rankdata(x, axis=1),
175
+ [[1,3,3,3,5], [1,2,3.5,3.5,5]])
176
+ assert_almost_equal(mstats.rankdata(x,axis=0),
177
+ [[1,1,1,1,1], [2,2,2,2,2,]])
178
+
179
+
180
+ class TestCorr:
181
+ def test_pearsonr(self):
182
+ # Tests some computations of Pearson's r
183
+ x = ma.arange(10)
184
+ with warnings.catch_warnings():
185
+ # The tests in this context are edge cases, with perfect
186
+ # correlation or anticorrelation, or totally masked data.
187
+ # None of these should trigger a RuntimeWarning.
188
+ warnings.simplefilter("error", RuntimeWarning)
189
+
190
+ assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
191
+ assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
192
+
193
+ x = ma.array(x, mask=True)
194
+ pr = mstats.pearsonr(x, x)
195
+ assert_(pr[0] is masked)
196
+ assert_(pr[1] is masked)
197
+
198
+ x1 = ma.array([-1.0, 0.0, 1.0])
199
+ y1 = ma.array([0, 0, 3])
200
+ r, p = mstats.pearsonr(x1, y1)
201
+ assert_almost_equal(r, np.sqrt(3)/2)
202
+ assert_almost_equal(p, 1.0/3)
203
+
204
+ # (x2, y2) have the same unmasked data as (x1, y1).
205
+ mask = [False, False, False, True]
206
+ x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
207
+ y2 = ma.array([0, 0, 3, -1], mask=mask)
208
+ r, p = mstats.pearsonr(x2, y2)
209
+ assert_almost_equal(r, np.sqrt(3)/2)
210
+ assert_almost_equal(p, 1.0/3)
211
+
212
+ def test_pearsonr_misaligned_mask(self):
213
+ mx = np.ma.masked_array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 0, 0, 0])
214
+ my = np.ma.masked_array([9, 8, 7, 6, 5, 9], mask=[0, 0, 1, 0, 0, 0])
215
+ x = np.array([1, 4, 5, 6])
216
+ y = np.array([9, 6, 5, 9])
217
+ mr, mp = mstats.pearsonr(mx, my)
218
+ r, p = stats.pearsonr(x, y)
219
+ assert_equal(mr, r)
220
+ assert_equal(mp, p)
221
+
222
+ def test_spearmanr(self):
223
+ # Tests some computations of Spearman's rho
224
+ (x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95])
225
+ assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
226
+ (x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
227
+ (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
228
+ assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
229
+
230
+ x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
231
+ 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
232
+ y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
233
+ 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
234
+ assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
235
+ x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
236
+ 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
237
+ y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
238
+ 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
239
+ (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
240
+ assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
241
+ # Next test is to make sure calculation uses sufficient precision.
242
+ # The denominator's value is ~n^3 and used to be represented as an
243
+ # int. 2000**3 > 2**32 so these arrays would cause overflow on
244
+ # some machines.
245
+ x = list(range(2000))
246
+ y = list(range(2000))
247
+ y[0], y[9] = y[9], y[0]
248
+ y[10], y[434] = y[434], y[10]
249
+ y[435], y[1509] = y[1509], y[435]
250
+ # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
251
+ # = 1 - (1 / 500)
252
+ # = 0.998
253
+ assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998)
254
+
255
+ # test for namedtuple attributes
256
+ res = mstats.spearmanr(x, y)
257
+ attributes = ('correlation', 'pvalue')
258
+ check_named_results(res, attributes, ma=True)
259
+
260
+ def test_spearmanr_alternative(self):
261
+ # check against R
262
+ # options(digits=16)
263
+ # cor.test(c(2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
264
+ # 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7),
265
+ # c(22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
266
+ # 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4),
267
+ # alternative='two.sided', method='spearman')
268
+ x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
269
+ 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
270
+ y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
271
+ 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
272
+
273
+ r_exp = 0.6887298747763864 # from cor.test
274
+
275
+ r, p = mstats.spearmanr(x, y)
276
+ assert_allclose(r, r_exp)
277
+ assert_allclose(p, 0.004519192910756)
278
+
279
+ r, p = mstats.spearmanr(x, y, alternative='greater')
280
+ assert_allclose(r, r_exp)
281
+ assert_allclose(p, 0.002259596455378)
282
+
283
+ r, p = mstats.spearmanr(x, y, alternative='less')
284
+ assert_allclose(r, r_exp)
285
+ assert_allclose(p, 0.9977404035446)
286
+
287
+ # intuitive test (with obvious positive correlation)
288
+ n = 100
289
+ x = np.linspace(0, 5, n)
290
+ y = 0.1*x + np.random.rand(n) # y is positively correlated w/ x
291
+
292
+ stat1, p1 = mstats.spearmanr(x, y)
293
+
294
+ stat2, p2 = mstats.spearmanr(x, y, alternative="greater")
295
+ assert_allclose(p2, p1 / 2) # positive correlation -> small p
296
+
297
+ stat3, p3 = mstats.spearmanr(x, y, alternative="less")
298
+ assert_allclose(p3, 1 - p1 / 2) # positive correlation -> large p
299
+
300
+ assert stat1 == stat2 == stat3
301
+
302
+ with pytest.raises(ValueError, match="alternative must be 'less'..."):
303
+ mstats.spearmanr(x, y, alternative="ekki-ekki")
304
+
305
+ @pytest.mark.skipif(platform.machine() == 'ppc64le',
306
+ reason="fails/crashes on ppc64le")
307
+ def test_kendalltau(self):
308
+ # check case with maximum disorder and p=1
309
+ x = ma.array(np.array([9, 2, 5, 6]))
310
+ y = ma.array(np.array([4, 7, 9, 11]))
311
+ # Cross-check with exact result from R:
312
+ # cor.test(x,y,method="kendall",exact=1)
313
+ expected = [0.0, 1.0]
314
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
315
+
316
+ # simple case without ties
317
+ x = ma.array(np.arange(10))
318
+ y = ma.array(np.arange(10))
319
+ # Cross-check with exact result from R:
320
+ # cor.test(x,y,method="kendall",exact=1)
321
+ expected = [1.0, 5.511463844797e-07]
322
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
323
+
324
+ # check exception in case of invalid method keyword
325
+ assert_raises(ValueError, mstats.kendalltau, x, y, method='banana')
326
+
327
+ # swap a couple of values
328
+ b = y[1]
329
+ y[1] = y[2]
330
+ y[2] = b
331
+ # Cross-check with exact result from R:
332
+ # cor.test(x,y,method="kendall",exact=1)
333
+ expected = [0.9555555555555556, 5.511463844797e-06]
334
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
335
+
336
+ # swap a couple more
337
+ b = y[5]
338
+ y[5] = y[6]
339
+ y[6] = b
340
+ # Cross-check with exact result from R:
341
+ # cor.test(x,y,method="kendall",exact=1)
342
+ expected = [0.9111111111111111, 2.976190476190e-05]
343
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
344
+
345
+ # same in opposite direction
346
+ x = ma.array(np.arange(10))
347
+ y = ma.array(np.arange(10)[::-1])
348
+ # Cross-check with exact result from R:
349
+ # cor.test(x,y,method="kendall",exact=1)
350
+ expected = [-1.0, 5.511463844797e-07]
351
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
352
+
353
+ # swap a couple of values
354
+ b = y[1]
355
+ y[1] = y[2]
356
+ y[2] = b
357
+ # Cross-check with exact result from R:
358
+ # cor.test(x,y,method="kendall",exact=1)
359
+ expected = [-0.9555555555555556, 5.511463844797e-06]
360
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
361
+
362
+ # swap a couple more
363
+ b = y[5]
364
+ y[5] = y[6]
365
+ y[6] = b
366
+ # Cross-check with exact result from R:
367
+ # cor.test(x,y,method="kendall",exact=1)
368
+ expected = [-0.9111111111111111, 2.976190476190e-05]
369
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
370
+
371
+ # Tests some computations of Kendall's tau
372
+ x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan])
373
+ y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
374
+ z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
375
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, y)),
376
+ [+0.3333333, 0.75])
377
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')),
378
+ [+0.3333333, 0.4969059])
379
+ assert_almost_equal(np.asarray(mstats.kendalltau(x, z)),
380
+ [-0.5477226, 0.2785987])
381
+ #
382
+ x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20,
383
+ 10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan])
384
+ y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27,
385
+ 25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0])
386
+ result = mstats.kendalltau(x, y)
387
+ assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
388
+
389
+ # test for namedtuple attributes
390
+ attributes = ('correlation', 'pvalue')
391
+ check_named_results(result, attributes, ma=True)
392
+
393
+ @pytest.mark.skipif(platform.machine() == 'ppc64le',
394
+ reason="fails/crashes on ppc64le")
395
+ @pytest.mark.slow
396
+ def test_kendalltau_large(self):
397
+ # make sure internal variable use correct precision with
398
+ # larger arrays
399
+ x = np.arange(2000, dtype=float)
400
+ x = ma.masked_greater(x, 1995)
401
+ y = np.arange(2000, dtype=float)
402
+ y = np.concatenate((y[1000:], y[:1000]))
403
+ assert_(np.isfinite(mstats.kendalltau(x, y)[1]))
404
+
405
+ def test_kendalltau_seasonal(self):
406
+ # Tests the seasonal Kendall tau.
407
+ x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
408
+ [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
409
+ [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
410
+ [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
411
+ x = ma.fix_invalid(x).T
412
+ output = mstats.kendalltau_seasonal(x)
413
+ assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
414
+ assert_almost_equal(output['seasonal p-value'].round(2),
415
+ [0.18,0.53,0.20,0.04])
416
+
417
+ @pytest.mark.parametrize("method", ("exact", "asymptotic"))
418
+ @pytest.mark.parametrize("alternative", ("two-sided", "greater", "less"))
419
+ def test_kendalltau_mstats_vs_stats(self, method, alternative):
420
+ # Test that mstats.kendalltau and stats.kendalltau with
421
+ # nan_policy='omit' matches behavior of stats.kendalltau
422
+ # Accuracy of the alternatives is tested in stats/tests/test_stats.py
423
+
424
+ np.random.seed(0)
425
+ n = 50
426
+ x = np.random.rand(n)
427
+ y = np.random.rand(n)
428
+ mask = np.random.rand(n) > 0.5
429
+
430
+ x_masked = ma.array(x, mask=mask)
431
+ y_masked = ma.array(y, mask=mask)
432
+ res_masked = mstats.kendalltau(
433
+ x_masked, y_masked, method=method, alternative=alternative)
434
+
435
+ x_compressed = x_masked.compressed()
436
+ y_compressed = y_masked.compressed()
437
+ res_compressed = stats.kendalltau(
438
+ x_compressed, y_compressed, method=method, alternative=alternative)
439
+
440
+ x[mask] = np.nan
441
+ y[mask] = np.nan
442
+ res_nan = stats.kendalltau(
443
+ x, y, method=method, nan_policy='omit', alternative=alternative)
444
+
445
+ assert_allclose(res_masked, res_compressed)
446
+ assert_allclose(res_nan, res_compressed)
447
+
448
+ def test_kendall_p_exact_medium(self):
449
+ # Test for the exact method with medium samples (some n >= 171)
450
+ # expected values generated using SymPy
451
+ expectations = {(100, 2393): 0.62822615287956040664,
452
+ (101, 2436): 0.60439525773513602669,
453
+ (170, 0): 2.755801935583541e-307,
454
+ (171, 0): 0.0,
455
+ (171, 1): 2.755801935583541e-307,
456
+ (172, 1): 0.0,
457
+ (200, 9797): 0.74753983745929675209,
458
+ (201, 9656): 0.40959218958120363618}
459
+ for nc, expected in expectations.items():
460
+ res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
461
+ assert_almost_equal(res, expected)
462
+
463
+ @pytest.mark.xslow
464
+ def test_kendall_p_exact_large(self):
465
+ # Test for the exact method with large samples (n >= 171)
466
+ # expected values generated using SymPy
467
+ expectations = {(400, 38965): 0.48444283672113314099,
468
+ (401, 39516): 0.66363159823474837662,
469
+ (800, 156772): 0.42265448483120932055,
470
+ (801, 157849): 0.53437553412194416236,
471
+ (1600, 637472): 0.84200727400323538419,
472
+ (1601, 630304): 0.34465255088058593946}
473
+
474
+ for nc, expected in expectations.items():
475
+ res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
476
+ assert_almost_equal(res, expected)
477
+
478
+ def test_pointbiserial(self):
479
+ x = [1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0,
480
+ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, -1]
481
+ y = [14.8, 13.8, 12.4, 10.1, 7.1, 6.1, 5.8, 4.6, 4.3, 3.5, 3.3, 3.2,
482
+ 3.0, 2.8, 2.8, 2.5, 2.4, 2.3, 2.1, 1.7, 1.7, 1.5, 1.3, 1.3, 1.2,
483
+ 1.2, 1.1, 0.8, 0.7, 0.6, 0.5, 0.2, 0.2, 0.1, np.nan]
484
+ assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
485
+
486
+ # test for namedtuple attributes
487
+ res = mstats.pointbiserialr(x, y)
488
+ attributes = ('correlation', 'pvalue')
489
+ check_named_results(res, attributes, ma=True)
490
+
491
+
492
+ @skip_xp_invalid_arg
493
+ class TestTrimming:
494
+
495
+ def test_trim(self):
496
+ a = ma.arange(10)
497
+ assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
498
+ a = ma.arange(10)
499
+ assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
500
+ a = ma.arange(10)
501
+ assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
502
+ [None,None,None,3,4,5,6,7,None,None])
503
+ a = ma.arange(10)
504
+ assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
505
+ [None,1,2,3,4,5,6,7,None,None])
506
+
507
+ a = ma.arange(12)
508
+ a[[0,-1]] = a[5] = masked
509
+ assert_equal(mstats.trim(a, (2,8)),
510
+ [None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
511
+
512
+ x = ma.arange(100).reshape(10, 10)
513
+ expected = [1]*10 + [0]*70 + [1]*20
514
+ trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
515
+ assert_equal(trimx._mask.ravel(), expected)
516
+ trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
517
+ assert_equal(trimx._mask.ravel(), expected)
518
+ trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
519
+ assert_equal(trimx._mask.T.ravel(), expected)
520
+
521
+ # same as above, but with an extra masked row inserted
522
+ x = ma.arange(110).reshape(11, 10)
523
+ x[1] = masked
524
+ expected = [1]*20 + [0]*70 + [1]*20
525
+ trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
526
+ assert_equal(trimx._mask.ravel(), expected)
527
+ trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
528
+ assert_equal(trimx._mask.ravel(), expected)
529
+ trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
530
+ assert_equal(trimx.T._mask.ravel(), expected)
531
+
532
+ def test_trim_old(self):
533
+ x = ma.arange(100)
534
+ assert_equal(mstats.trimboth(x).count(), 60)
535
+ assert_equal(mstats.trimtail(x,tail='r').count(), 80)
536
+ x[50:70] = masked
537
+ trimx = mstats.trimboth(x)
538
+ assert_equal(trimx.count(), 48)
539
+ assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
540
+ x._mask = nomask
541
+ x.shape = (10,10)
542
+ assert_equal(mstats.trimboth(x).count(), 60)
543
+ assert_equal(mstats.trimtail(x).count(), 80)
544
+
545
+ def test_trimr(self):
546
+ x = ma.arange(10)
547
+ result = mstats.trimr(x, limits=(0.15, 0.14), inclusive=(False, False))
548
+ expected = ma.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
549
+ mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
550
+ assert_equal(result, expected)
551
+ assert_equal(result.mask, expected.mask)
552
+
553
+ def test_trimmedmean(self):
554
+ data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
555
+ 296,299,306,376,428,515,666,1310,2611])
556
+ assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
557
+ assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
558
+ assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
559
+
560
+ def test_trimmedvar(self):
561
+ # Basic test. Additional tests of all arguments, edge cases,
562
+ # input validation, and proper treatment of masked arrays are needed.
563
+ rng = np.random.default_rng(3262323289434724460)
564
+ data_orig = rng.random(size=20)
565
+ data = np.sort(data_orig)
566
+ data = ma.array(data, mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
567
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
568
+ assert_allclose(mstats.trimmed_var(data_orig, 0.1), data.var())
569
+
570
+ def test_trimmedstd(self):
571
+ # Basic test. Additional tests of all arguments, edge cases,
572
+ # input validation, and proper treatment of masked arrays are needed.
573
+ rng = np.random.default_rng(7121029245207162780)
574
+ data_orig = rng.random(size=20)
575
+ data = np.sort(data_orig)
576
+ data = ma.array(data, mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
577
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
578
+ assert_allclose(mstats.trimmed_std(data_orig, 0.1), data.std())
579
+
580
+ def test_trimmed_stde(self):
581
+ data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
582
+ 296,299,306,376,428,515,666,1310,2611])
583
+ assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
584
+ assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
585
+
586
+ def test_winsorization(self):
587
+ data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
588
+ 296,299,306,376,428,515,666,1310,2611])
589
+ assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
590
+ 21551.4, 1)
591
+ assert_almost_equal(
592
+ mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1),
593
+ 11887.3, 1)
594
+ data[5] = masked
595
+ winsorized = mstats.winsorize(data)
596
+ assert_equal(winsorized.mask, data.mask)
597
+
598
+ def test_winsorization_nan(self):
599
+ data = ma.array([np.nan, np.nan, 0, 1, 2])
600
+ assert_raises(ValueError, mstats.winsorize, data, (0.05, 0.05),
601
+ nan_policy='raise')
602
+ # Testing propagate (default behavior)
603
+ assert_equal(mstats.winsorize(data, (0.4, 0.4)),
604
+ ma.array([2, 2, 2, 2, 2]))
605
+ assert_equal(mstats.winsorize(data, (0.8, 0.8)),
606
+ ma.array([np.nan, np.nan, np.nan, np.nan, np.nan]))
607
+ assert_equal(mstats.winsorize(data, (0.4, 0.4), nan_policy='omit'),
608
+ ma.array([np.nan, np.nan, 2, 2, 2]))
609
+ assert_equal(mstats.winsorize(data, (0.8, 0.8), nan_policy='omit'),
610
+ ma.array([np.nan, np.nan, 2, 2, 2]))
611
+
612
+
613
+ @skip_xp_invalid_arg
614
+ class TestMoments:
615
+ # Comparison numbers are found using R v.1.5.1
616
+ # note that length(testcase) = 4
617
+ # testmathworks comes from documentation for the
618
+ # Statistics Toolbox for Matlab and can be found at both
619
+ # https://www.mathworks.com/help/stats/kurtosis.html
620
+ # https://www.mathworks.com/help/stats/skewness.html
621
+ # Note that both test cases came from here.
622
+ testcase = [1,2,3,4]
623
+ testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
624
+ np.nan])
625
+ testcase_2d = ma.array(
626
+ np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
627
+ [0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
628
+ [0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
629
+ [0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
630
+ [0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
631
+ mask=np.array([[True, False, False, True, False],
632
+ [True, True, True, False, True],
633
+ [False, False, False, False, False],
634
+ [True, True, True, True, True],
635
+ [False, False, True, False, False]], dtype=bool))
636
+
637
+ def _assert_equal(self, actual, expect, *, shape=None, dtype=None):
638
+ expect = np.asarray(expect)
639
+ if shape is not None:
640
+ expect = np.broadcast_to(expect, shape)
641
+ assert_array_equal(actual, expect)
642
+ if dtype is None:
643
+ dtype = expect.dtype
644
+ assert actual.dtype == dtype
645
+
646
+ def test_moment(self):
647
+ y = mstats.moment(self.testcase,1)
648
+ assert_almost_equal(y,0.0,10)
649
+ y = mstats.moment(self.testcase,2)
650
+ assert_almost_equal(y,1.25)
651
+ y = mstats.moment(self.testcase,3)
652
+ assert_almost_equal(y,0.0)
653
+ y = mstats.moment(self.testcase,4)
654
+ assert_almost_equal(y,2.5625)
655
+
656
+ # check array_like input for moment
657
+ y = mstats.moment(self.testcase, [1, 2, 3, 4])
658
+ assert_allclose(y, [0, 1.25, 0, 2.5625])
659
+
660
+ # check moment input consists only of integers
661
+ y = mstats.moment(self.testcase, 0.0)
662
+ assert_allclose(y, 1.0)
663
+ assert_raises(ValueError, mstats.moment, self.testcase, 1.2)
664
+ y = mstats.moment(self.testcase, [1.0, 2, 3, 4.0])
665
+ assert_allclose(y, [0, 1.25, 0, 2.5625])
666
+
667
+ # test empty input
668
+ y = mstats.moment([])
669
+ self._assert_equal(y, np.nan, dtype=np.float64)
670
+ y = mstats.moment(np.array([], dtype=np.float32))
671
+ self._assert_equal(y, np.nan, dtype=np.float32)
672
+ y = mstats.moment(np.zeros((1, 0)), axis=0)
673
+ self._assert_equal(y, [], shape=(0,), dtype=np.float64)
674
+ y = mstats.moment([[]], axis=1)
675
+ self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)
676
+ y = mstats.moment([[]], moment=[0, 1], axis=0)
677
+ self._assert_equal(y, [], shape=(2, 0))
678
+
679
+ x = np.arange(10.)
680
+ x[9] = np.nan
681
+ assert_equal(mstats.moment(x, 2), ma.masked) # NaN value is ignored
682
+
683
+ def test_variation(self):
684
+ y = mstats.variation(self.testcase)
685
+ assert_almost_equal(y,0.44721359549996, 10)
686
+
687
+ def test_variation_ddof(self):
688
+ # test variation with delta degrees of freedom
689
+ # regression test for gh-13341
690
+ a = np.array([1, 2, 3, 4, 5])
691
+ y = mstats.variation(a, ddof=1)
692
+ assert_almost_equal(y, 0.5270462766947299)
693
+
694
+ def test_skewness(self):
695
+ y = mstats.skew(self.testmathworks)
696
+ assert_almost_equal(y,-0.29322304336607,10)
697
+ y = mstats.skew(self.testmathworks,bias=0)
698
+ assert_almost_equal(y,-0.437111105023940,10)
699
+ y = mstats.skew(self.testcase)
700
+ assert_almost_equal(y,0.0,10)
701
+
702
+ # test that skew works on multidimensional masked arrays
703
+ correct_2d = ma.array(
704
+ np.array([0.6882870394455785, 0, 0.2665647526856708,
705
+ 0, -0.05211472114254485]),
706
+ mask=np.array([False, False, False, True, False], dtype=bool)
707
+ )
708
+ assert_allclose(mstats.skew(self.testcase_2d, 1), correct_2d)
709
+ for i, row in enumerate(self.testcase_2d):
710
+ assert_almost_equal(mstats.skew(row), correct_2d[i])
711
+
712
+ correct_2d_bias_corrected = ma.array(
713
+ np.array([1.685952043212545, 0.0, 0.3973712716070531, 0,
714
+ -0.09026534484117164]),
715
+ mask=np.array([False, False, False, True, False], dtype=bool)
716
+ )
717
+ assert_allclose(mstats.skew(self.testcase_2d, 1, bias=False),
718
+ correct_2d_bias_corrected)
719
+ for i, row in enumerate(self.testcase_2d):
720
+ assert_almost_equal(mstats.skew(row, bias=False),
721
+ correct_2d_bias_corrected[i])
722
+
723
+ # Check consistency between stats and mstats implementations
724
+ assert_allclose(mstats.skew(self.testcase_2d[2, :]),
725
+ stats.skew(self.testcase_2d[2, :]))
726
+
727
+ def test_kurtosis(self):
728
+ # Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
729
+ # for compatibility with Matlab)
730
+ y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
731
+ assert_almost_equal(y, 2.1658856802973, 10)
732
+ # Note that MATLAB has confusing docs for the following case
733
+ # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
734
+ # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
735
+ # The MATLAB docs imply that both should give Fisher's
736
+ y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0)
737
+ assert_almost_equal(y, 3.663542721189047, 10)
738
+ y = mstats.kurtosis(self.testcase, 0, 0)
739
+ assert_almost_equal(y, 1.64)
740
+
741
+ # test that kurtosis works on multidimensional masked arrays
742
+ correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
743
+ -1.26979517952]),
744
+ mask=np.array([False, False, False, True,
745
+ False], dtype=bool))
746
+ assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
747
+ correct_2d)
748
+ for i, row in enumerate(self.testcase_2d):
749
+ assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
750
+
751
+ correct_2d_bias_corrected = ma.array(
752
+ np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
753
+ mask=np.array([False, False, False, True, False], dtype=bool))
754
+ assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
755
+ bias=False),
756
+ correct_2d_bias_corrected)
757
+ for i, row in enumerate(self.testcase_2d):
758
+ assert_almost_equal(mstats.kurtosis(row, bias=False),
759
+ correct_2d_bias_corrected[i])
760
+
761
+ # Check consistency between stats and mstats implementations
762
+ assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
763
+ stats.kurtosis(self.testcase_2d[2, :]),
764
+ nulp=4)
765
+
766
+
767
+ class TestMode:
768
+ def test_mode(self):
769
+ a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
770
+ a2 = np.reshape(a1, (3,5))
771
+ a3 = np.array([1,2,3,4,5,6])
772
+ a4 = np.reshape(a3, (3,2))
773
+ ma1 = ma.masked_where(ma.array(a1) > 2, a1)
774
+ ma2 = ma.masked_where(a2 > 2, a2)
775
+ ma3 = ma.masked_where(a3 < 2, a3)
776
+ ma4 = ma.masked_where(ma.array(a4) < 2, a4)
777
+ assert_equal(mstats.mode(a1, axis=None), (3,4))
778
+ assert_equal(mstats.mode(a1, axis=0), (3,4))
779
+ assert_equal(mstats.mode(ma1, axis=None), (0,3))
780
+ assert_equal(mstats.mode(a2, axis=None), (3,4))
781
+ assert_equal(mstats.mode(ma2, axis=None), (0,3))
782
+ assert_equal(mstats.mode(a3, axis=None), (1,1))
783
+ assert_equal(mstats.mode(ma3, axis=None), (2,1))
784
+ assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
785
+ assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
786
+ assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
787
+ assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
788
+ assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
789
+ assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
790
+
791
+ a1_res = mstats.mode(a1, axis=None)
792
+
793
+ # test for namedtuple attributes
794
+ attributes = ('mode', 'count')
795
+ check_named_results(a1_res, attributes, ma=True)
796
+
797
+ def test_mode_modifies_input(self):
798
+ # regression test for gh-6428: mode(..., axis=None) may not modify
799
+ # the input array
800
+ im = np.zeros((100, 100))
801
+ im[:50, :] += 1
802
+ im[:, :50] += 1
803
+ cp = im.copy()
804
+ mstats.mode(im, None)
805
+ assert_equal(im, cp)
806
+
807
+
808
+ class TestPercentile:
809
+ def setup_method(self):
810
+ self.a1 = [3, 4, 5, 10, -3, -5, 6]
811
+ self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
812
+ self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
813
+
814
+ def test_percentile(self):
815
+ x = np.arange(8) * 0.5
816
+ assert_equal(mstats.scoreatpercentile(x, 0), 0.)
817
+ assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
818
+ assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
819
+
820
+ def test_2D(self):
821
+ x = ma.array([[1, 1, 1],
822
+ [1, 1, 1],
823
+ [4, 4, 3],
824
+ [1, 1, 1],
825
+ [1, 1, 1]])
826
+ assert_equal(mstats.scoreatpercentile(x, 50), [1, 1, 1])
827
+
828
+
829
+ @skip_xp_invalid_arg
830
+ class TestVariability:
831
+ """ Comparison numbers are found using R v.1.5.1
832
+ note that length(testcase) = 4
833
+ """
834
+ testcase = ma.fix_invalid([1,2,3,4,np.nan])
835
+
836
+ def test_sem(self):
837
+ # This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
838
+ y = mstats.sem(self.testcase)
839
+ assert_almost_equal(y, 0.6454972244)
840
+ n = self.testcase.count()
841
+ assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
842
+ mstats.sem(self.testcase, ddof=2))
843
+
844
+ def test_zmap(self):
845
+ # This is not in R, so tested by using:
846
+ # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
847
+ y = mstats.zmap(self.testcase, self.testcase)
848
+ desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
849
+ 0.44721359549996, 1.3416407864999])
850
+ assert_array_almost_equal(desired_unmaskedvals,
851
+ y.data[y.mask == False], decimal=12) # noqa: E712
852
+
853
+ def test_zscore(self):
854
+ # This is not in R, so tested by using:
855
+ # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
856
+ y = mstats.zscore(self.testcase)
857
+ desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
858
+ 0.44721359549996, 1.3416407864999, np.nan])
859
+ assert_almost_equal(desired, y, decimal=12)
860
+
861
+
862
+ @skip_xp_invalid_arg
863
+ class TestMisc:
864
+
865
+ def test_obrientransform(self):
866
+ args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
867
+ [6]+[7]*2+[8]*4+[9]*9+[10]*16]
868
+ result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
869
+ [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
870
+ assert_almost_equal(np.round(mstats.obrientransform(*args).T, 4),
871
+ result, 4)
872
+
873
+ def test_ks_2samp(self):
874
+ x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
875
+ [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
876
+ [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
877
+ [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
878
+ x = ma.fix_invalid(x).T
879
+ (winter, spring, summer, fall) = x.T
880
+
881
+ assert_almost_equal(np.round(mstats.ks_2samp(winter, spring), 4),
882
+ (0.1818, 0.9628))
883
+ assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'g'), 4),
884
+ (0.1469, 0.6886))
885
+ assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'l'), 4),
886
+ (0.1818, 0.6011))
887
+
888
+ def test_friedmanchisq(self):
889
+ # No missing values
890
+ args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
891
+ [7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
892
+ [6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
893
+ result = mstats.friedmanchisquare(*args)
894
+ assert_almost_equal(result[0], 10.4737, 4)
895
+ assert_almost_equal(result[1], 0.005317, 6)
896
+ # Missing values
897
+ x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
898
+ [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
899
+ [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
900
+ [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
901
+ x = ma.fix_invalid(x)
902
+ result = mstats.friedmanchisquare(*x)
903
+ assert_almost_equal(result[0], 2.0156, 4)
904
+ assert_almost_equal(result[1], 0.5692, 4)
905
+
906
+ # test for namedtuple attributes
907
+ attributes = ('statistic', 'pvalue')
908
+ check_named_results(result, attributes, ma=True)
909
+
910
+
911
+ def test_regress_simple():
912
+ # Regress a line with sinusoidal noise. Test for #1273.
913
+ x = np.linspace(0, 100, 100)
914
+ y = 0.2 * np.linspace(0, 100, 100) + 10
915
+ y += np.sin(np.linspace(0, 20, 100))
916
+
917
+ result = mstats.linregress(x, y)
918
+
919
+ # Result is of a correct class and with correct fields
920
+ lr = _stats_py.LinregressResult
921
+ assert_(isinstance(result, lr))
922
+ attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
923
+ check_named_results(result, attributes, ma=True)
924
+ assert 'intercept_stderr' in dir(result)
925
+
926
+ # Slope and intercept are estimated correctly
927
+ assert_almost_equal(result.slope, 0.19644990055858422)
928
+ assert_almost_equal(result.intercept, 10.211269918932341)
929
+ assert_almost_equal(result.stderr, 0.002395781449783862)
930
+ assert_almost_equal(result.intercept_stderr, 0.13866936078570702)
931
+
932
+
933
+ def test_linregress_identical_x():
934
+ x = np.zeros(10)
935
+ y = np.random.random(10)
936
+ msg = "Cannot calculate a linear regression if all x values are identical"
937
+ with assert_raises(ValueError, match=msg):
938
+ mstats.linregress(x, y)
939
+
940
+
941
+ class TestTheilslopes:
942
+ def test_theilslopes(self):
943
+ # Test for basic slope and intercept.
944
+ slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1])
945
+ assert_almost_equal(slope, 0.5)
946
+ assert_almost_equal(intercept, 0.5)
947
+
948
+ slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1],
949
+ method='joint')
950
+ assert_almost_equal(slope, 0.5)
951
+ assert_almost_equal(intercept, 0.0)
952
+
953
+ # Test for correct masking.
954
+ y = np.ma.array([0, 1, 100, 1], mask=[False, False, True, False])
955
+ slope, intercept, lower, upper = mstats.theilslopes(y)
956
+ assert_almost_equal(slope, 1./3)
957
+ assert_almost_equal(intercept, 2./3)
958
+
959
+ slope, intercept, lower, upper = mstats.theilslopes(y,
960
+ method='joint')
961
+ assert_almost_equal(slope, 1./3)
962
+ assert_almost_equal(intercept, 0.0)
963
+
964
+ # Test of confidence intervals from example in Sen (1968).
965
+ x = [1, 2, 3, 4, 10, 12, 18]
966
+ y = [9, 15, 19, 20, 45, 55, 78]
967
+ slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
968
+ assert_almost_equal(slope, 4)
969
+ assert_almost_equal(intercept, 4.0)
970
+ assert_almost_equal(upper, 4.38, decimal=2)
971
+ assert_almost_equal(lower, 3.71, decimal=2)
972
+
973
+ slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07,
974
+ method='joint')
975
+ assert_almost_equal(slope, 4)
976
+ assert_almost_equal(intercept, 6.0)
977
+ assert_almost_equal(upper, 4.38, decimal=2)
978
+ assert_almost_equal(lower, 3.71, decimal=2)
979
+
980
+
981
+ def test_theilslopes_warnings(self):
982
+ # Test `theilslopes` with degenerate input; see gh-15943
983
+ msg = "All `x` coordinates.*|Mean of empty slice.|invalid value encountered.*"
984
+ with pytest.warns(RuntimeWarning, match=msg):
985
+ res = mstats.theilslopes([0, 1], [0, 0])
986
+ assert np.all(np.isnan(res))
987
+ with suppress_warnings() as sup:
988
+ sup.filter(RuntimeWarning, "invalid value encountered...")
989
+ res = mstats.theilslopes([0, 0, 0], [0, 1, 0])
990
+ assert_allclose(res, (0, 0, np.nan, np.nan))
991
+
992
+
993
+ def test_theilslopes_namedtuple_consistency(self):
994
+ """
995
+ Simple test to ensure tuple backwards-compatibility of the returned
996
+ TheilslopesResult object
997
+ """
998
+ y = [1, 2, 4]
999
+ x = [4, 6, 8]
1000
+ slope, intercept, low_slope, high_slope = mstats.theilslopes(y, x)
1001
+ result = mstats.theilslopes(y, x)
1002
+
1003
+ # note all four returned values are distinct here
1004
+ assert_equal(slope, result.slope)
1005
+ assert_equal(intercept, result.intercept)
1006
+ assert_equal(low_slope, result.low_slope)
1007
+ assert_equal(high_slope, result.high_slope)
1008
+
1009
+ def test_gh19678_uint8(self):
1010
+ # `theilslopes` returned unexpected results when `y` was an unsigned type.
1011
+ # Check that this is resolved.
1012
+ rng = np.random.default_rng(2549824598234528)
1013
+ y = rng.integers(0, 255, size=10, dtype=np.uint8)
1014
+ res = stats.theilslopes(y, y)
1015
+ np.testing.assert_allclose(res.slope, 1)
1016
+
1017
+
1018
+ def test_siegelslopes():
1019
+ # method should be exact for straight line
1020
+ y = 2 * np.arange(10) + 0.5
1021
+ assert_equal(mstats.siegelslopes(y), (2.0, 0.5))
1022
+ assert_equal(mstats.siegelslopes(y, method='separate'), (2.0, 0.5))
1023
+
1024
+ x = 2 * np.arange(10)
1025
+ y = 5 * x - 3.0
1026
+ assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
1027
+ assert_equal(mstats.siegelslopes(y, x, method='separate'), (5.0, -3.0))
1028
+
1029
+ # method is robust to outliers: brekdown point of 50%
1030
+ y[:4] = 1000
1031
+ assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
1032
+
1033
+ # if there are no outliers, results should be comparble to linregress
1034
+ x = np.arange(10)
1035
+ y = -2.3 + 0.3*x + stats.norm.rvs(size=10, random_state=231)
1036
+ slope_ols, intercept_ols, _, _, _ = stats.linregress(x, y)
1037
+
1038
+ slope, intercept = mstats.siegelslopes(y, x)
1039
+ assert_allclose(slope, slope_ols, rtol=0.1)
1040
+ assert_allclose(intercept, intercept_ols, rtol=0.1)
1041
+
1042
+ slope, intercept = mstats.siegelslopes(y, x, method='separate')
1043
+ assert_allclose(slope, slope_ols, rtol=0.1)
1044
+ assert_allclose(intercept, intercept_ols, rtol=0.1)
1045
+
1046
+
1047
+ def test_siegelslopes_namedtuple_consistency():
1048
+ """
1049
+ Simple test to ensure tuple backwards-compatibility of the returned
1050
+ SiegelslopesResult object.
1051
+ """
1052
+ y = [1, 2, 4]
1053
+ x = [4, 6, 8]
1054
+ slope, intercept = mstats.siegelslopes(y, x)
1055
+ result = mstats.siegelslopes(y, x)
1056
+
1057
+ # note both returned values are distinct here
1058
+ assert_equal(slope, result.slope)
1059
+ assert_equal(intercept, result.intercept)
1060
+
1061
+
1062
+ def test_sen_seasonal_slopes():
1063
+ rng = np.random.default_rng(5765986256978575148)
1064
+ x = rng.random(size=(100, 4))
1065
+ intra_slope, inter_slope = mstats.sen_seasonal_slopes(x)
1066
+
1067
+ # reference implementation from the `sen_seasonal_slopes` documentation
1068
+ def dijk(yi):
1069
+ n = len(yi)
1070
+ x = np.arange(n)
1071
+ dy = yi - yi[:, np.newaxis]
1072
+ dx = x - x[:, np.newaxis]
1073
+ mask = np.triu(np.ones((n, n), dtype=bool), k=1)
1074
+ return dy[mask]/dx[mask]
1075
+
1076
+ for i in range(4):
1077
+ assert_allclose(np.median(dijk(x[:, i])), intra_slope[i])
1078
+
1079
+ all_slopes = np.concatenate([dijk(x[:, i]) for i in range(x.shape[1])])
1080
+ assert_allclose(np.median(all_slopes), inter_slope)
1081
+
1082
+
1083
+ def test_plotting_positions():
1084
+ # Regression test for #1256
1085
+ pos = mstats.plotting_positions(np.arange(3), 0, 0)
1086
+ assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
1087
+
1088
+
1089
+ @skip_xp_invalid_arg
1090
+ class TestNormalitytests:
1091
+
1092
+ def test_vs_nonmasked(self):
1093
+ x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
1094
+ assert_array_almost_equal(mstats.normaltest(x),
1095
+ stats.normaltest(x))
1096
+ assert_array_almost_equal(mstats.skewtest(x),
1097
+ stats.skewtest(x))
1098
+ assert_array_almost_equal(mstats.kurtosistest(x),
1099
+ stats.kurtosistest(x))
1100
+
1101
+ funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
1102
+ mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
1103
+ x = [1, 2, 3, 4]
1104
+ for func, mfunc in zip(funcs, mfuncs):
1105
+ with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
1106
+ res = func(x)
1107
+ assert np.isnan(res.statistic)
1108
+ assert np.isnan(res.pvalue)
1109
+ assert_raises(ValueError, mfunc, x)
1110
+
1111
+ def test_axis_None(self):
1112
+ # Test axis=None (equal to axis=0 for 1-D input)
1113
+ x = np.array((-2,-1,0,1,2,3)*4)**2
1114
+ assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
1115
+ assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
1116
+ assert_allclose(mstats.kurtosistest(x, axis=None),
1117
+ mstats.kurtosistest(x))
1118
+
1119
+ def test_maskedarray_input(self):
1120
+ # Add some masked values, test result doesn't change
1121
+ x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
1122
+ xm = np.ma.array(np.r_[np.inf, x, 10],
1123
+ mask=np.r_[True, [False] * x.size, True])
1124
+ assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
1125
+ assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
1126
+ assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
1127
+
1128
+ def test_nd_input(self):
1129
+ x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
1130
+ x_2d = np.vstack([x] * 2).T
1131
+ for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
1132
+ res_1d = func(x)
1133
+ res_2d = func(x_2d)
1134
+ assert_allclose(res_2d[0], [res_1d[0]] * 2)
1135
+ assert_allclose(res_2d[1], [res_1d[1]] * 2)
1136
+
1137
+ def test_normaltest_result_attributes(self):
1138
+ x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
1139
+ res = mstats.normaltest(x)
1140
+ attributes = ('statistic', 'pvalue')
1141
+ check_named_results(res, attributes, ma=True)
1142
+
1143
+ def test_kurtosistest_result_attributes(self):
1144
+ x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
1145
+ res = mstats.kurtosistest(x)
1146
+ attributes = ('statistic', 'pvalue')
1147
+ check_named_results(res, attributes, ma=True)
1148
+
1149
+ def test_regression_9033(self):
1150
+ # x clearly non-normal but power of negative denom needs
1151
+ # to be handled correctly to reject normality
1152
+ counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
1153
+ x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
1154
+ assert_equal(mstats.kurtosistest(x)[1] < 0.01, True)
1155
+
1156
+ @pytest.mark.parametrize("test", ["skewtest", "kurtosistest"])
1157
+ @pytest.mark.parametrize("alternative", ["less", "greater"])
1158
+ def test_alternative(self, test, alternative):
1159
+ x = stats.norm.rvs(loc=10, scale=2.5, size=30, random_state=123)
1160
+
1161
+ stats_test = getattr(stats, test)
1162
+ mstats_test = getattr(mstats, test)
1163
+
1164
+ z_ex, p_ex = stats_test(x, alternative=alternative)
1165
+ z, p = mstats_test(x, alternative=alternative)
1166
+ assert_allclose(z, z_ex, atol=1e-12)
1167
+ assert_allclose(p, p_ex, atol=1e-12)
1168
+
1169
+ # test with masked arrays
1170
+ x[1:5] = np.nan
1171
+ x = np.ma.masked_array(x, mask=np.isnan(x))
1172
+ z_ex, p_ex = stats_test(x.compressed(), alternative=alternative)
1173
+ z, p = mstats_test(x, alternative=alternative)
1174
+ assert_allclose(z, z_ex, atol=1e-12)
1175
+ assert_allclose(p, p_ex, atol=1e-12)
1176
+
1177
+ def test_bad_alternative(self):
1178
+ x = stats.norm.rvs(size=20, random_state=123)
1179
+ msg = r"`alternative` must be..."
1180
+
1181
+ with pytest.raises(ValueError, match=msg):
1182
+ mstats.skewtest(x, alternative='error')
1183
+
1184
+ with pytest.raises(ValueError, match=msg):
1185
+ mstats.kurtosistest(x, alternative='error')
1186
+
1187
+
1188
+ class TestFOneway:
1189
+ def test_result_attributes(self):
1190
+ a = np.array([655, 788], dtype=np.uint16)
1191
+ b = np.array([789, 772], dtype=np.uint16)
1192
+ res = mstats.f_oneway(a, b)
1193
+ attributes = ('statistic', 'pvalue')
1194
+ check_named_results(res, attributes, ma=True)
1195
+
1196
+
1197
+ class TestMannwhitneyu:
1198
+ # data from gh-1428
1199
+ x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1200
+ 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1201
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1202
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1203
+ 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1204
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1205
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1206
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1207
+ 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1208
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1209
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1210
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1211
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1212
+ 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1213
+ 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1214
+ 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1215
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1216
+ 1., 1., 1., 1., 1., 1.])
1217
+
1218
+ y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
1219
+ 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1220
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1221
+ 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1222
+ 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
1223
+ 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
1224
+ 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1225
+ 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1226
+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1227
+ 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1228
+ 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1229
+ 1., 1., 1., 1.])
1230
+
1231
+ def test_result_attributes(self):
1232
+ res = mstats.mannwhitneyu(self.x, self.y)
1233
+ attributes = ('statistic', 'pvalue')
1234
+ check_named_results(res, attributes, ma=True)
1235
+
1236
+ def test_against_stats(self):
1237
+ # gh-4641 reported that stats.mannwhitneyu returned half the p-value
1238
+ # of mstats.mannwhitneyu. Default alternative of stats.mannwhitneyu
1239
+ # is now two-sided, so they match.
1240
+ res1 = mstats.mannwhitneyu(self.x, self.y)
1241
+ res2 = stats.mannwhitneyu(self.x, self.y)
1242
+ assert res1.statistic == res2.statistic
1243
+ assert_allclose(res1.pvalue, res2.pvalue)
1244
+
1245
+
1246
+ class TestKruskal:
1247
+ def test_result_attributes(self):
1248
+ x = [1, 3, 5, 7, 9]
1249
+ y = [2, 4, 6, 8, 10]
1250
+
1251
+ res = mstats.kruskal(x, y)
1252
+ attributes = ('statistic', 'pvalue')
1253
+ check_named_results(res, attributes, ma=True)
1254
+
1255
+
1256
+ # TODO: for all ttest functions, add tests with masked array inputs
1257
+ class TestTtest_rel:
1258
+ def test_vs_nonmasked(self):
1259
+ np.random.seed(1234567)
1260
+ outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
1261
+
1262
+ # 1-D inputs
1263
+ res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
1264
+ res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
1265
+ assert_allclose(res1, res2)
1266
+
1267
+ # 2-D inputs
1268
+ res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
1269
+ res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
1270
+ assert_allclose(res1, res2)
1271
+ res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
1272
+ res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
1273
+ assert_allclose(res1, res2)
1274
+
1275
+ # Check default is axis=0
1276
+ res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
1277
+ assert_allclose(res2, res3)
1278
+
1279
+ def test_fully_masked(self):
1280
+ np.random.seed(1234567)
1281
+ outcome = ma.masked_array(np.random.randn(3, 2),
1282
+ mask=[[1, 1, 1], [0, 0, 0]])
1283
+ with suppress_warnings() as sup:
1284
+ sup.filter(RuntimeWarning, "invalid value encountered in absolute")
1285
+ for pair in [(outcome[:, 0], outcome[:, 1]),
1286
+ ([np.nan, np.nan], [1.0, 2.0])]:
1287
+ t, p = mstats.ttest_rel(*pair)
1288
+ assert_array_equal(t, (np.nan, np.nan))
1289
+ assert_array_equal(p, (np.nan, np.nan))
1290
+
1291
+ def test_result_attributes(self):
1292
+ np.random.seed(1234567)
1293
+ outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
1294
+
1295
+ res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
1296
+ attributes = ('statistic', 'pvalue')
1297
+ check_named_results(res, attributes, ma=True)
1298
+
1299
+ def test_invalid_input_size(self):
1300
+ assert_raises(ValueError, mstats.ttest_rel,
1301
+ np.arange(10), np.arange(11))
1302
+ x = np.arange(24)
1303
+ assert_raises(ValueError, mstats.ttest_rel,
1304
+ x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
1305
+ assert_raises(ValueError, mstats.ttest_rel,
1306
+ x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
1307
+
1308
+ def test_empty(self):
1309
+ res1 = mstats.ttest_rel([], [])
1310
+ assert_(np.all(np.isnan(res1)))
1311
+
1312
+ def test_zero_division(self):
1313
+ t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
1314
+ assert_equal((np.abs(t), p), (np.inf, 0))
1315
+
1316
+ with suppress_warnings() as sup:
1317
+ sup.filter(RuntimeWarning, "invalid value encountered in absolute")
1318
+ t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
1319
+ assert_array_equal(t, np.array([np.nan, np.nan]))
1320
+ assert_array_equal(p, np.array([np.nan, np.nan]))
1321
+
1322
+ def test_bad_alternative(self):
1323
+ msg = r"alternative must be 'less', 'greater' or 'two-sided'"
1324
+ with pytest.raises(ValueError, match=msg):
1325
+ mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
1326
+
1327
+ @pytest.mark.parametrize("alternative", ["less", "greater"])
1328
+ def test_alternative(self, alternative):
1329
+ x = stats.norm.rvs(loc=10, scale=5, size=25, random_state=42)
1330
+ y = stats.norm.rvs(loc=8, scale=2, size=25, random_state=42)
1331
+
1332
+ t_ex, p_ex = stats.ttest_rel(x, y, alternative=alternative)
1333
+ t, p = mstats.ttest_rel(x, y, alternative=alternative)
1334
+ assert_allclose(t, t_ex, rtol=1e-14)
1335
+ assert_allclose(p, p_ex, rtol=1e-14)
1336
+
1337
+ # test with masked arrays
1338
+ x[1:10] = np.nan
1339
+ y[1:10] = np.nan
1340
+ x = np.ma.masked_array(x, mask=np.isnan(x))
1341
+ y = np.ma.masked_array(y, mask=np.isnan(y))
1342
+ t, p = mstats.ttest_rel(x, y, alternative=alternative)
1343
+ t_ex, p_ex = stats.ttest_rel(x.compressed(), y.compressed(),
1344
+ alternative=alternative)
1345
+ assert_allclose(t, t_ex, rtol=1e-14)
1346
+ assert_allclose(p, p_ex, rtol=1e-14)
1347
+
1348
+
1349
+ class TestTtest_ind:
1350
+ def test_vs_nonmasked(self):
1351
+ np.random.seed(1234567)
1352
+ outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
1353
+
1354
+ # 1-D inputs
1355
+ res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
1356
+ res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
1357
+ assert_allclose(res1, res2)
1358
+
1359
+ # 2-D inputs
1360
+ res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
1361
+ res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
1362
+ assert_allclose(res1, res2)
1363
+ res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
1364
+ res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
1365
+ assert_allclose(res1, res2)
1366
+
1367
+ # Check default is axis=0
1368
+ res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
1369
+ assert_allclose(res2, res3)
1370
+
1371
+ # Check equal_var
1372
+ res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
1373
+ res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
1374
+ assert_allclose(res4, res5)
1375
+ res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
1376
+ res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
1377
+ assert_allclose(res4, res5)
1378
+
1379
+ def test_fully_masked(self):
1380
+ np.random.seed(1234567)
1381
+ outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])
1382
+ with suppress_warnings() as sup:
1383
+ sup.filter(RuntimeWarning, "invalid value encountered in absolute")
1384
+ for pair in [(outcome[:, 0], outcome[:, 1]),
1385
+ ([np.nan, np.nan], [1.0, 2.0])]:
1386
+ t, p = mstats.ttest_ind(*pair)
1387
+ assert_array_equal(t, (np.nan, np.nan))
1388
+ assert_array_equal(p, (np.nan, np.nan))
1389
+
1390
+ def test_result_attributes(self):
1391
+ np.random.seed(1234567)
1392
+ outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
1393
+
1394
+ res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
1395
+ attributes = ('statistic', 'pvalue')
1396
+ check_named_results(res, attributes, ma=True)
1397
+
1398
+ def test_empty(self):
1399
+ res1 = mstats.ttest_ind([], [])
1400
+ assert_(np.all(np.isnan(res1)))
1401
+
1402
+ def test_zero_division(self):
1403
+ t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
1404
+ assert_equal((np.abs(t), p), (np.inf, 0))
1405
+
1406
+ with suppress_warnings() as sup:
1407
+ sup.filter(RuntimeWarning, "invalid value encountered in absolute")
1408
+ t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
1409
+ assert_array_equal(t, (np.nan, np.nan))
1410
+ assert_array_equal(p, (np.nan, np.nan))
1411
+
1412
+ t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
1413
+ assert_equal((np.abs(t), p), (np.inf, 0))
1414
+ assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],
1415
+ equal_var=False), (np.nan, np.nan))
1416
+
1417
+ def test_bad_alternative(self):
1418
+ msg = r"alternative must be 'less', 'greater' or 'two-sided'"
1419
+ with pytest.raises(ValueError, match=msg):
1420
+ mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
1421
+
1422
+ @pytest.mark.parametrize("alternative", ["less", "greater"])
1423
+ def test_alternative(self, alternative):
1424
+ x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
1425
+ y = stats.norm.rvs(loc=8, scale=2, size=100, random_state=123)
1426
+
1427
+ t_ex, p_ex = stats.ttest_ind(x, y, alternative=alternative)
1428
+ t, p = mstats.ttest_ind(x, y, alternative=alternative)
1429
+ assert_allclose(t, t_ex, rtol=1e-14)
1430
+ assert_allclose(p, p_ex, rtol=1e-14)
1431
+
1432
+ # test with masked arrays
1433
+ x[1:10] = np.nan
1434
+ y[80:90] = np.nan
1435
+ x = np.ma.masked_array(x, mask=np.isnan(x))
1436
+ y = np.ma.masked_array(y, mask=np.isnan(y))
1437
+ t_ex, p_ex = stats.ttest_ind(x.compressed(), y.compressed(),
1438
+ alternative=alternative)
1439
+ t, p = mstats.ttest_ind(x, y, alternative=alternative)
1440
+ assert_allclose(t, t_ex, rtol=1e-14)
1441
+ assert_allclose(p, p_ex, rtol=1e-14)
1442
+
1443
+
1444
+ class TestTtest_1samp:
1445
+ def test_vs_nonmasked(self):
1446
+ np.random.seed(1234567)
1447
+ outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
1448
+
1449
+ # 1-D inputs
1450
+ res1 = stats.ttest_1samp(outcome[:, 0], 1)
1451
+ res2 = mstats.ttest_1samp(outcome[:, 0], 1)
1452
+ assert_allclose(res1, res2)
1453
+
1454
+ def test_fully_masked(self):
1455
+ np.random.seed(1234567)
1456
+ outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])
1457
+ expected = (np.nan, np.nan)
1458
+ with suppress_warnings() as sup:
1459
+ sup.filter(RuntimeWarning, "invalid value encountered in absolute")
1460
+ for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]:
1461
+ t, p = mstats.ttest_1samp(*pair)
1462
+ assert_array_equal(p, expected)
1463
+ assert_array_equal(t, expected)
1464
+
1465
+ def test_result_attributes(self):
1466
+ np.random.seed(1234567)
1467
+ outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
1468
+
1469
+ res = mstats.ttest_1samp(outcome[:, 0], 1)
1470
+ attributes = ('statistic', 'pvalue')
1471
+ check_named_results(res, attributes, ma=True)
1472
+
1473
+ def test_empty(self):
1474
+ res1 = mstats.ttest_1samp([], 1)
1475
+ assert_(np.all(np.isnan(res1)))
1476
+
1477
+ def test_zero_division(self):
1478
+ t, p = mstats.ttest_1samp([0, 0, 0], 1)
1479
+ assert_equal((np.abs(t), p), (np.inf, 0))
1480
+
1481
+ with suppress_warnings() as sup:
1482
+ sup.filter(RuntimeWarning, "invalid value encountered in absolute")
1483
+ t, p = mstats.ttest_1samp([0, 0, 0], 0)
1484
+ assert_(np.isnan(t))
1485
+ assert_array_equal(p, (np.nan, np.nan))
1486
+
1487
+ def test_bad_alternative(self):
1488
+ msg = r"alternative must be 'less', 'greater' or 'two-sided'"
1489
+ with pytest.raises(ValueError, match=msg):
1490
+ mstats.ttest_1samp([1, 2, 3], 4, alternative='foo')
1491
+
1492
+ @pytest.mark.parametrize("alternative", ["less", "greater"])
1493
+ def test_alternative(self, alternative):
1494
+ x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
1495
+
1496
+ t_ex, p_ex = stats.ttest_1samp(x, 9, alternative=alternative)
1497
+ t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
1498
+ assert_allclose(t, t_ex, rtol=1e-14)
1499
+ assert_allclose(p, p_ex, rtol=1e-14)
1500
+
1501
+ # test with masked arrays
1502
+ x[1:10] = np.nan
1503
+ x = np.ma.masked_array(x, mask=np.isnan(x))
1504
+ t_ex, p_ex = stats.ttest_1samp(x.compressed(), 9,
1505
+ alternative=alternative)
1506
+ t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
1507
+ assert_allclose(t, t_ex, rtol=1e-14)
1508
+ assert_allclose(p, p_ex, rtol=1e-14)
1509
+
1510
+
1511
+ class TestDescribe:
1512
+ """
1513
+ Tests for mstats.describe.
1514
+
1515
+ Note that there are also tests for `mstats.describe` in the
1516
+ class TestCompareWithStats.
1517
+ """
1518
+ def test_basic_with_axis(self):
1519
+ # This is a basic test that is also a regression test for gh-7303.
1520
+ a = np.ma.masked_array([[0, 1, 2, 3, 4, 9],
1521
+ [5, 5, 0, 9, 3, 3]],
1522
+ mask=[[0, 0, 0, 0, 0, 1],
1523
+ [0, 0, 1, 1, 0, 0]])
1524
+ result = mstats.describe(a, axis=1)
1525
+ assert_equal(result.nobs, [5, 4])
1526
+ amin, amax = result.minmax
1527
+ assert_equal(amin, [0, 3])
1528
+ assert_equal(amax, [4, 5])
1529
+ assert_equal(result.mean, [2.0, 4.0])
1530
+ assert_equal(result.variance, [2.0, 1.0])
1531
+ assert_equal(result.skewness, [0.0, 0.0])
1532
+ assert_allclose(result.kurtosis, [-1.3, -2.0])
1533
+
1534
+
1535
+ @skip_xp_invalid_arg
1536
+ class TestCompareWithStats:
1537
+ """
1538
+ Class to compare mstats results with stats results.
1539
+
1540
+ It is in general assumed that scipy.stats is at a more mature stage than
1541
+ stats.mstats. If a routine in mstats results in similar results like in
1542
+ scipy.stats, this is considered also as a proper validation of scipy.mstats
1543
+ routine.
1544
+
1545
+ Different sample sizes are used for testing, as some problems between stats
1546
+ and mstats are dependent on sample size.
1547
+
1548
+ Author: Alexander Loew
1549
+
1550
+ NOTE that some tests fail. This might be caused by
1551
+ a) actual differences or bugs between stats and mstats
1552
+ b) numerical inaccuracies
1553
+ c) different definitions of routine interfaces
1554
+
1555
+ These failures need to be checked. Current workaround is to have disabled these
1556
+ tests, but issuing reports on scipy-dev
1557
+
1558
+ """
1559
+ def get_n(self):
1560
+ """ Returns list of sample sizes to be used for comparison. """
1561
+ return [1000, 100, 10, 5]
1562
+
1563
+ def generate_xy_sample(self, n):
1564
+ # This routine generates numpy arrays and corresponding masked arrays
1565
+ # with the same data, but additional masked values
1566
+ np.random.seed(1234567)
1567
+ x = np.random.randn(n)
1568
+ y = x + np.random.randn(n)
1569
+ xm = np.full(len(x) + 5, 1e16)
1570
+ ym = np.full(len(y) + 5, 1e16)
1571
+ xm[0:len(x)] = x
1572
+ ym[0:len(y)] = y
1573
+ mask = xm > 9e15
1574
+ xm = np.ma.array(xm, mask=mask)
1575
+ ym = np.ma.array(ym, mask=mask)
1576
+ return x, y, xm, ym
1577
+
1578
+ def generate_xy_sample2D(self, n, nx):
1579
+ x = np.full((n, nx), np.nan)
1580
+ y = np.full((n, nx), np.nan)
1581
+ xm = np.full((n+5, nx), np.nan)
1582
+ ym = np.full((n+5, nx), np.nan)
1583
+
1584
+ for i in range(nx):
1585
+ x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n)
1586
+
1587
+ xm[0:n, :] = x[0:n]
1588
+ ym[0:n, :] = y[0:n]
1589
+ xm = np.ma.array(xm, mask=np.isnan(xm))
1590
+ ym = np.ma.array(ym, mask=np.isnan(ym))
1591
+ return x, y, xm, ym
1592
+
1593
+ def test_linregress(self):
1594
+ for n in self.get_n():
1595
+ x, y, xm, ym = self.generate_xy_sample(n)
1596
+ result1 = stats.linregress(x, y)
1597
+ result2 = stats.mstats.linregress(xm, ym)
1598
+ assert_allclose(np.asarray(result1), np.asarray(result2))
1599
+
1600
+ def test_pearsonr(self):
1601
+ for n in self.get_n():
1602
+ x, y, xm, ym = self.generate_xy_sample(n)
1603
+ r, p = stats.pearsonr(x, y)
1604
+ rm, pm = stats.mstats.pearsonr(xm, ym)
1605
+
1606
+ assert_almost_equal(r, rm, decimal=14)
1607
+ assert_almost_equal(p, pm, decimal=14)
1608
+
1609
+ def test_spearmanr(self):
1610
+ for n in self.get_n():
1611
+ x, y, xm, ym = self.generate_xy_sample(n)
1612
+ r, p = stats.spearmanr(x, y)
1613
+ rm, pm = stats.mstats.spearmanr(xm, ym)
1614
+ assert_almost_equal(r, rm, 14)
1615
+ assert_almost_equal(p, pm, 14)
1616
+
1617
+ def test_spearmanr_backcompat_useties(self):
1618
+ # A regression test to ensure we don't break backwards compat
1619
+ # more than we have to (see gh-9204).
1620
+ x = np.arange(6)
1621
+ assert_raises(ValueError, mstats.spearmanr, x, x, False)
1622
+
1623
+ def test_gmean(self):
1624
+ for n in self.get_n():
1625
+ x, y, xm, ym = self.generate_xy_sample(n)
1626
+ r = stats.gmean(abs(x))
1627
+ rm = stats.mstats.gmean(abs(xm))
1628
+ assert_allclose(r, rm, rtol=1e-13)
1629
+
1630
+ r = stats.gmean(abs(y))
1631
+ rm = stats.mstats.gmean(abs(ym))
1632
+ assert_allclose(r, rm, rtol=1e-13)
1633
+
1634
+ def test_hmean(self):
1635
+ for n in self.get_n():
1636
+ x, y, xm, ym = self.generate_xy_sample(n)
1637
+
1638
+ r = stats.hmean(abs(x))
1639
+ rm = stats.mstats.hmean(abs(xm))
1640
+ assert_almost_equal(r, rm, 10)
1641
+
1642
+ r = stats.hmean(abs(y))
1643
+ rm = stats.mstats.hmean(abs(ym))
1644
+ assert_almost_equal(r, rm, 10)
1645
+
1646
+ def test_skew(self):
1647
+ for n in self.get_n():
1648
+ x, y, xm, ym = self.generate_xy_sample(n)
1649
+
1650
+ r = stats.skew(x)
1651
+ rm = stats.mstats.skew(xm)
1652
+ assert_almost_equal(r, rm, 10)
1653
+
1654
+ r = stats.skew(y)
1655
+ rm = stats.mstats.skew(ym)
1656
+ assert_almost_equal(r, rm, 10)
1657
+
1658
+ def test_moment(self):
1659
+ for n in self.get_n():
1660
+ x, y, xm, ym = self.generate_xy_sample(n)
1661
+
1662
+ r = stats.moment(x)
1663
+ rm = stats.mstats.moment(xm)
1664
+ assert_almost_equal(r, rm, 10)
1665
+
1666
+ r = stats.moment(y)
1667
+ rm = stats.mstats.moment(ym)
1668
+ assert_almost_equal(r, rm, 10)
1669
+
1670
+ def test_zscore(self):
1671
+ for n in self.get_n():
1672
+ x, y, xm, ym = self.generate_xy_sample(n)
1673
+
1674
+ # reference solution
1675
+ zx = (x - x.mean()) / x.std()
1676
+ zy = (y - y.mean()) / y.std()
1677
+
1678
+ # validate stats
1679
+ assert_allclose(stats.zscore(x), zx, rtol=1e-10)
1680
+ assert_allclose(stats.zscore(y), zy, rtol=1e-10)
1681
+
1682
+ # compare stats and mstats
1683
+ assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
1684
+ rtol=1e-10)
1685
+ assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
1686
+ rtol=1e-10)
1687
+
1688
+ def test_kurtosis(self):
1689
+ for n in self.get_n():
1690
+ x, y, xm, ym = self.generate_xy_sample(n)
1691
+ r = stats.kurtosis(x)
1692
+ rm = stats.mstats.kurtosis(xm)
1693
+ assert_almost_equal(r, rm, 10)
1694
+
1695
+ r = stats.kurtosis(y)
1696
+ rm = stats.mstats.kurtosis(ym)
1697
+ assert_almost_equal(r, rm, 10)
1698
+
1699
+ def test_sem(self):
1700
+ # example from stats.sem doc
1701
+ a = np.arange(20).reshape(5, 4)
1702
+ am = np.ma.array(a)
1703
+ r = stats.sem(a, ddof=1)
1704
+ rm = stats.mstats.sem(am, ddof=1)
1705
+
1706
+ assert_allclose(r, 2.82842712, atol=1e-5)
1707
+ assert_allclose(rm, 2.82842712, atol=1e-5)
1708
+
1709
+ for n in self.get_n():
1710
+ x, y, xm, ym = self.generate_xy_sample(n)
1711
+ assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
1712
+ stats.sem(x, axis=None, ddof=0), decimal=13)
1713
+ assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
1714
+ stats.sem(y, axis=None, ddof=0), decimal=13)
1715
+ assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
1716
+ stats.sem(x, axis=None, ddof=1), decimal=13)
1717
+ assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
1718
+ stats.sem(y, axis=None, ddof=1), decimal=13)
1719
+
1720
+ def test_describe(self):
1721
+ for n in self.get_n():
1722
+ x, y, xm, ym = self.generate_xy_sample(n)
1723
+ r = stats.describe(x, ddof=1)
1724
+ rm = stats.mstats.describe(xm, ddof=1)
1725
+ for ii in range(6):
1726
+ assert_almost_equal(np.asarray(r[ii]),
1727
+ np.asarray(rm[ii]),
1728
+ decimal=12)
1729
+
1730
+ def test_describe_result_attributes(self):
1731
+ actual = mstats.describe(np.arange(5))
1732
+ attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
1733
+ 'kurtosis')
1734
+ check_named_results(actual, attributes, ma=True)
1735
+
1736
+ def test_rankdata(self):
1737
+ for n in self.get_n():
1738
+ x, y, xm, ym = self.generate_xy_sample(n)
1739
+ r = stats.rankdata(x)
1740
+ rm = stats.mstats.rankdata(x)
1741
+ assert_allclose(r, rm)
1742
+
1743
+ def test_tmean(self):
1744
+ for n in self.get_n():
1745
+ x, y, xm, ym = self.generate_xy_sample(n)
1746
+ assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
1747
+ assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
1748
+
1749
+ def test_tmax(self):
1750
+ for n in self.get_n():
1751
+ x, y, xm, ym = self.generate_xy_sample(n)
1752
+ assert_almost_equal(stats.tmax(x,2.),
1753
+ stats.mstats.tmax(xm,2.), 10)
1754
+ assert_almost_equal(stats.tmax(y,2.),
1755
+ stats.mstats.tmax(ym,2.), 10)
1756
+
1757
+ assert_almost_equal(stats.tmax(x, upperlimit=3.),
1758
+ stats.mstats.tmax(xm, upperlimit=3.), 10)
1759
+ assert_almost_equal(stats.tmax(y, upperlimit=3.),
1760
+ stats.mstats.tmax(ym, upperlimit=3.), 10)
1761
+
1762
+ def test_tmin(self):
1763
+ for n in self.get_n():
1764
+ x, y, xm, ym = self.generate_xy_sample(n)
1765
+ assert_equal(stats.tmin(x), stats.mstats.tmin(xm))
1766
+ assert_equal(stats.tmin(y), stats.mstats.tmin(ym))
1767
+
1768
+ assert_almost_equal(stats.tmin(x, lowerlimit=-1.),
1769
+ stats.mstats.tmin(xm, lowerlimit=-1.), 10)
1770
+ assert_almost_equal(stats.tmin(y, lowerlimit=-1.),
1771
+ stats.mstats.tmin(ym, lowerlimit=-1.), 10)
1772
+
1773
+ def test_zmap(self):
1774
+ for n in self.get_n():
1775
+ x, y, xm, ym = self.generate_xy_sample(n)
1776
+ z = stats.zmap(x, y)
1777
+ zm = stats.mstats.zmap(xm, ym)
1778
+ assert_allclose(z, zm[0:len(z)], atol=1e-10)
1779
+
1780
+ def test_variation(self):
1781
+ for n in self.get_n():
1782
+ x, y, xm, ym = self.generate_xy_sample(n)
1783
+ assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
1784
+ decimal=12)
1785
+ assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
1786
+ decimal=12)
1787
+
1788
+ def test_tvar(self):
1789
+ for n in self.get_n():
1790
+ x, y, xm, ym = self.generate_xy_sample(n)
1791
+ assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
1792
+ decimal=12)
1793
+ assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
1794
+ decimal=12)
1795
+
1796
+ def test_trimboth(self):
1797
+ a = np.arange(20)
1798
+ b = stats.trimboth(a, 0.1)
1799
+ bm = stats.mstats.trimboth(a, 0.1)
1800
+ assert_allclose(np.sort(b), bm.data[~bm.mask])
1801
+
1802
+ def test_tsem(self):
1803
+ for n in self.get_n():
1804
+ x, y, xm, ym = self.generate_xy_sample(n)
1805
+ assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm),
1806
+ decimal=14)
1807
+ assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym),
1808
+ decimal=14)
1809
+ assert_almost_equal(stats.tsem(x, limits=(-2., 2.)),
1810
+ stats.mstats.tsem(xm, limits=(-2., 2.)),
1811
+ decimal=14)
1812
+
1813
+ def test_skewtest(self):
1814
+ # this test is for 1D data
1815
+ for n in self.get_n():
1816
+ if n > 8:
1817
+ x, y, xm, ym = self.generate_xy_sample(n)
1818
+ r = stats.skewtest(x)
1819
+ rm = stats.mstats.skewtest(xm)
1820
+ assert_allclose(r, rm)
1821
+
1822
+ def test_skewtest_result_attributes(self):
1823
+ x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
1824
+ res = mstats.skewtest(x)
1825
+ attributes = ('statistic', 'pvalue')
1826
+ check_named_results(res, attributes, ma=True)
1827
+
1828
+ def test_skewtest_2D_notmasked(self):
1829
+ # a normal ndarray is passed to the masked function
1830
+ x = np.random.random((20, 2)) * 20.
1831
+ r = stats.skewtest(x)
1832
+ rm = stats.mstats.skewtest(x)
1833
+ assert_allclose(np.asarray(r), np.asarray(rm))
1834
+
1835
+ def test_skewtest_2D_WithMask(self):
1836
+ nx = 2
1837
+ for n in self.get_n():
1838
+ if n > 8:
1839
+ x, y, xm, ym = self.generate_xy_sample2D(n, nx)
1840
+ r = stats.skewtest(x)
1841
+ rm = stats.mstats.skewtest(xm)
1842
+
1843
+ assert_allclose(r[0][0], rm[0][0], rtol=1e-14)
1844
+ assert_allclose(r[0][1], rm[0][1], rtol=1e-14)
1845
+
1846
+ def test_normaltest(self):
1847
+ with np.errstate(over='raise'), suppress_warnings() as sup:
1848
+ sup.filter(UserWarning, "`kurtosistest` p-value may be inaccurate")
1849
+ sup.filter(UserWarning, "kurtosistest only valid for n>=20")
1850
+ for n in self.get_n():
1851
+ if n > 8:
1852
+ x, y, xm, ym = self.generate_xy_sample(n)
1853
+ r = stats.normaltest(x)
1854
+ rm = stats.mstats.normaltest(xm)
1855
+ assert_allclose(np.asarray(r), np.asarray(rm))
1856
+
1857
+ def test_find_repeats(self):
1858
+ x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float')
1859
+ tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float')
1860
+ mask = (tmp == 5.)
1861
+ xm = np.ma.array(tmp, mask=mask)
1862
+ x_orig, xm_orig = x.copy(), xm.copy()
1863
+
1864
+ r = stats.find_repeats(x)
1865
+ rm = stats.mstats.find_repeats(xm)
1866
+
1867
+ assert_equal(r, rm)
1868
+ assert_equal(x, x_orig)
1869
+ assert_equal(xm, xm_orig)
1870
+
1871
+ # This crazy behavior is expected by count_tied_groups, but is not
1872
+ # in the docstring...
1873
+ _, counts = stats.mstats.find_repeats([])
1874
+ assert_equal(counts, np.array(0, dtype=np.intp))
1875
+
1876
+ def test_kendalltau(self):
1877
+ for n in self.get_n():
1878
+ x, y, xm, ym = self.generate_xy_sample(n)
1879
+ r = stats.kendalltau(x, y)
1880
+ rm = stats.mstats.kendalltau(xm, ym)
1881
+ assert_almost_equal(r[0], rm[0], decimal=10)
1882
+ assert_almost_equal(r[1], rm[1], decimal=7)
1883
+
1884
+ def test_obrientransform(self):
1885
+ for n in self.get_n():
1886
+ x, y, xm, ym = self.generate_xy_sample(n)
1887
+ r = stats.obrientransform(x)
1888
+ rm = stats.mstats.obrientransform(xm)
1889
+ assert_almost_equal(r.T, rm[0:len(x)])
1890
+
1891
+ def test_ks_1samp(self):
1892
+ """Checks that mstats.ks_1samp and stats.ks_1samp agree on masked arrays."""
1893
+ for mode in ['auto', 'exact', 'asymp']:
1894
+ with suppress_warnings():
1895
+ for alternative in ['less', 'greater', 'two-sided']:
1896
+ for n in self.get_n():
1897
+ x, y, xm, ym = self.generate_xy_sample(n)
1898
+ res1 = stats.ks_1samp(x, stats.norm.cdf,
1899
+ alternative=alternative, mode=mode)
1900
+ res2 = stats.mstats.ks_1samp(xm, stats.norm.cdf,
1901
+ alternative=alternative, mode=mode)
1902
+ assert_equal(np.asarray(res1), np.asarray(res2))
1903
+ res3 = stats.ks_1samp(xm, stats.norm.cdf,
1904
+ alternative=alternative, mode=mode)
1905
+ assert_equal(np.asarray(res1), np.asarray(res3))
1906
+
1907
+ def test_kstest_1samp(self):
1908
+ """
1909
+ Checks that 1-sample mstats.kstest and stats.kstest agree on masked arrays.
1910
+ """
1911
+ for mode in ['auto', 'exact', 'asymp']:
1912
+ with suppress_warnings():
1913
+ for alternative in ['less', 'greater', 'two-sided']:
1914
+ for n in self.get_n():
1915
+ x, y, xm, ym = self.generate_xy_sample(n)
1916
+ res1 = stats.kstest(x, 'norm',
1917
+ alternative=alternative, mode=mode)
1918
+ res2 = stats.mstats.kstest(xm, 'norm',
1919
+ alternative=alternative, mode=mode)
1920
+ assert_equal(np.asarray(res1), np.asarray(res2))
1921
+ res3 = stats.kstest(xm, 'norm',
1922
+ alternative=alternative, mode=mode)
1923
+ assert_equal(np.asarray(res1), np.asarray(res3))
1924
+
1925
+ def test_ks_2samp(self):
1926
+ """Checks that mstats.ks_2samp and stats.ks_2samp agree on masked arrays.
1927
+ gh-8431"""
1928
+ for mode in ['auto', 'exact', 'asymp']:
1929
+ with suppress_warnings() as sup:
1930
+ if mode in ['auto', 'exact']:
1931
+ message = "ks_2samp: Exact calculation unsuccessful."
1932
+ sup.filter(RuntimeWarning, message)
1933
+ for alternative in ['less', 'greater', 'two-sided']:
1934
+ for n in self.get_n():
1935
+ x, y, xm, ym = self.generate_xy_sample(n)
1936
+ res1 = stats.ks_2samp(x, y,
1937
+ alternative=alternative, mode=mode)
1938
+ res2 = stats.mstats.ks_2samp(xm, ym,
1939
+ alternative=alternative, mode=mode)
1940
+ assert_equal(np.asarray(res1), np.asarray(res2))
1941
+ res3 = stats.ks_2samp(xm, y,
1942
+ alternative=alternative, mode=mode)
1943
+ assert_equal(np.asarray(res1), np.asarray(res3))
1944
+
1945
+ def test_kstest_2samp(self):
1946
+ """
1947
+ Checks that 2-sample mstats.kstest and stats.kstest agree on masked arrays.
1948
+ """
1949
+ for mode in ['auto', 'exact', 'asymp']:
1950
+ with suppress_warnings() as sup:
1951
+ if mode in ['auto', 'exact']:
1952
+ message = "ks_2samp: Exact calculation unsuccessful."
1953
+ sup.filter(RuntimeWarning, message)
1954
+ for alternative in ['less', 'greater', 'two-sided']:
1955
+ for n in self.get_n():
1956
+ x, y, xm, ym = self.generate_xy_sample(n)
1957
+ res1 = stats.kstest(x, y,
1958
+ alternative=alternative, mode=mode)
1959
+ res2 = stats.mstats.kstest(xm, ym,
1960
+ alternative=alternative, mode=mode)
1961
+ assert_equal(np.asarray(res1), np.asarray(res2))
1962
+ res3 = stats.kstest(xm, y,
1963
+ alternative=alternative, mode=mode)
1964
+ assert_equal(np.asarray(res1), np.asarray(res3))
1965
+
1966
+
1967
+ class TestBrunnerMunzel:
1968
+ # Data from (Lumley, 1996)
1969
+ X = np.ma.masked_invalid([1, 2, 1, 1, 1, np.nan, 1, 1,
1970
+ 1, 1, 1, 2, 4, 1, 1, np.nan])
1971
+ Y = np.ma.masked_invalid([3, 3, 4, 3, np.nan, 1, 2, 3, 1, 1, 5, 4])
1972
+ significant = 14
1973
+
1974
+ def test_brunnermunzel_one_sided(self):
1975
+ # Results are compared with R's lawstat package.
1976
+ u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='less')
1977
+ u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='greater')
1978
+ u3, p3 = mstats.brunnermunzel(self.X, self.Y, alternative='greater')
1979
+ u4, p4 = mstats.brunnermunzel(self.Y, self.X, alternative='less')
1980
+
1981
+ assert_almost_equal(p1, p2, decimal=self.significant)
1982
+ assert_almost_equal(p3, p4, decimal=self.significant)
1983
+ assert_(p1 != p3)
1984
+ assert_almost_equal(u1, 3.1374674823029505,
1985
+ decimal=self.significant)
1986
+ assert_almost_equal(u2, -3.1374674823029505,
1987
+ decimal=self.significant)
1988
+ assert_almost_equal(u3, 3.1374674823029505,
1989
+ decimal=self.significant)
1990
+ assert_almost_equal(u4, -3.1374674823029505,
1991
+ decimal=self.significant)
1992
+ assert_almost_equal(p1, 0.0028931043330757342,
1993
+ decimal=self.significant)
1994
+ assert_almost_equal(p3, 0.99710689566692423,
1995
+ decimal=self.significant)
1996
+
1997
+ def test_brunnermunzel_two_sided(self):
1998
+ # Results are compared with R's lawstat package.
1999
+ u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='two-sided')
2000
+ u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='two-sided')
2001
+
2002
+ assert_almost_equal(p1, p2, decimal=self.significant)
2003
+ assert_almost_equal(u1, 3.1374674823029505,
2004
+ decimal=self.significant)
2005
+ assert_almost_equal(u2, -3.1374674823029505,
2006
+ decimal=self.significant)
2007
+ assert_almost_equal(p1, 0.0057862086661515377,
2008
+ decimal=self.significant)
2009
+
2010
+ def test_brunnermunzel_default(self):
2011
+ # The default value for alternative is two-sided
2012
+ u1, p1 = mstats.brunnermunzel(self.X, self.Y)
2013
+ u2, p2 = mstats.brunnermunzel(self.Y, self.X)
2014
+
2015
+ assert_almost_equal(p1, p2, decimal=self.significant)
2016
+ assert_almost_equal(u1, 3.1374674823029505,
2017
+ decimal=self.significant)
2018
+ assert_almost_equal(u2, -3.1374674823029505,
2019
+ decimal=self.significant)
2020
+ assert_almost_equal(p1, 0.0057862086661515377,
2021
+ decimal=self.significant)
2022
+
2023
+ def test_brunnermunzel_alternative_error(self):
2024
+ alternative = "error"
2025
+ distribution = "t"
2026
+ assert_(alternative not in ["two-sided", "greater", "less"])
2027
+ assert_raises(ValueError,
2028
+ mstats.brunnermunzel,
2029
+ self.X,
2030
+ self.Y,
2031
+ alternative,
2032
+ distribution)
2033
+
2034
+ def test_brunnermunzel_distribution_norm(self):
2035
+ u1, p1 = mstats.brunnermunzel(self.X, self.Y, distribution="normal")
2036
+ u2, p2 = mstats.brunnermunzel(self.Y, self.X, distribution="normal")
2037
+ assert_almost_equal(p1, p2, decimal=self.significant)
2038
+ assert_almost_equal(u1, 3.1374674823029505,
2039
+ decimal=self.significant)
2040
+ assert_almost_equal(u2, -3.1374674823029505,
2041
+ decimal=self.significant)
2042
+ assert_almost_equal(p1, 0.0017041417600383024,
2043
+ decimal=self.significant)
2044
+
2045
+ def test_brunnermunzel_distribution_error(self):
2046
+ alternative = "two-sided"
2047
+ distribution = "error"
2048
+ assert_(alternative not in ["t", "normal"])
2049
+ assert_raises(ValueError,
2050
+ mstats.brunnermunzel,
2051
+ self.X,
2052
+ self.Y,
2053
+ alternative,
2054
+ distribution)
2055
+
2056
+ def test_brunnermunzel_empty_imput(self):
2057
+ u1, p1 = mstats.brunnermunzel(self.X, [])
2058
+ u2, p2 = mstats.brunnermunzel([], self.Y)
2059
+ u3, p3 = mstats.brunnermunzel([], [])
2060
+
2061
+ assert_(np.isnan(u1))
2062
+ assert_(np.isnan(p1))
2063
+ assert_(np.isnan(u2))
2064
+ assert_(np.isnan(p2))
2065
+ assert_(np.isnan(u3))
2066
+ assert_(np.isnan(p3))
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_multicomp.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+
3
+ import numpy as np
4
+ import pytest
5
+ from numpy.testing import assert_allclose
6
+
7
+ from scipy import stats
8
+ from scipy.stats._multicomp import _pvalue_dunnett, DunnettResult
9
+
10
+
11
+ class TestDunnett:
12
+ # For the following tests, p-values were computed using Matlab, e.g.
13
+ # sample = [18. 15. 18. 16. 17. 15. 14. 14. 14. 15. 15....
14
+ # 14. 15. 14. 22. 18. 21. 21. 10. 10. 11. 9....
15
+ # 25. 26. 17.5 16. 15.5 14.5 22. 22. 24. 22.5 29....
16
+ # 24.5 20. 18. 18.5 17.5 26.5 13. 16.5 13. 13. 13....
17
+ # 28. 27. 34. 31. 29. 27. 24. 23. 38. 36. 25....
18
+ # 38. 26. 22. 36. 27. 27. 32. 28. 31....
19
+ # 24. 27. 33. 32. 28. 19. 37. 31. 36. 36....
20
+ # 34. 38. 32. 38. 32....
21
+ # 26. 24. 26. 25. 29. 29.5 16.5 36. 44....
22
+ # 25. 27. 19....
23
+ # 25. 20....
24
+ # 28.];
25
+ # j = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ...
26
+ # 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ...
27
+ # 0 0 0 0...
28
+ # 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1...
29
+ # 2 2 2 2 2 2 2 2 2...
30
+ # 3 3 3...
31
+ # 4 4...
32
+ # 5];
33
+ # [~, ~, stats] = anova1(sample, j, "off");
34
+ # [results, ~, ~, gnames] = multcompare(stats, ...
35
+ # "CriticalValueType", "dunnett", ...
36
+ # "Approximate", false);
37
+ # tbl = array2table(results, "VariableNames", ...
38
+ # ["Group", "Control Group", "Lower Limit", ...
39
+ # "Difference", "Upper Limit", "P-value"]);
40
+ # tbl.("Group") = gnames(tbl.("Group"));
41
+ # tbl.("Control Group") = gnames(tbl.("Control Group"))
42
+
43
+ # Matlab doesn't report the statistic, so the statistics were
44
+ # computed using R multcomp `glht`, e.g.:
45
+ # library(multcomp)
46
+ # options(digits=16)
47
+ # control < - c(18.0, 15.0, 18.0, 16.0, 17.0, 15.0, 14.0, 14.0, 14.0,
48
+ # 15.0, 15.0, 14.0, 15.0, 14.0, 22.0, 18.0, 21.0, 21.0,
49
+ # 10.0, 10.0, 11.0, 9.0, 25.0, 26.0, 17.5, 16.0, 15.5,
50
+ # 14.5, 22.0, 22.0, 24.0, 22.5, 29.0, 24.5, 20.0, 18.0,
51
+ # 18.5, 17.5, 26.5, 13.0, 16.5, 13.0, 13.0, 13.0, 28.0,
52
+ # 27.0, 34.0, 31.0, 29.0, 27.0, 24.0, 23.0, 38.0, 36.0,
53
+ # 25.0, 38.0, 26.0, 22.0, 36.0, 27.0, 27.0, 32.0, 28.0,
54
+ # 31.0)
55
+ # t < - c(24.0, 27.0, 33.0, 32.0, 28.0, 19.0, 37.0, 31.0, 36.0, 36.0,
56
+ # 34.0, 38.0, 32.0, 38.0, 32.0)
57
+ # w < - c(26.0, 24.0, 26.0, 25.0, 29.0, 29.5, 16.5, 36.0, 44.0)
58
+ # x < - c(25.0, 27.0, 19.0)
59
+ # y < - c(25.0, 20.0)
60
+ # z < - c(28.0)
61
+ #
62
+ # groups = factor(rep(c("control", "t", "w", "x", "y", "z"),
63
+ # times=c(length(control), length(t), length(w),
64
+ # length(x), length(y), length(z))))
65
+ # df < - data.frame(response=c(control, t, w, x, y, z),
66
+ # group=groups)
67
+ # model < - aov(response
68
+ # ~group, data = df)
69
+ # test < - glht(model=model,
70
+ # linfct=mcp(group="Dunnett"),
71
+ # alternative="g")
72
+ # summary(test)
73
+ # confint(test)
74
+ # p-values agreed with those produced by Matlab to at least atol=1e-3
75
+
76
+ # From Matlab's documentation on multcompare
77
+ samples_1 = [
78
+ [
79
+ 24.0, 27.0, 33.0, 32.0, 28.0, 19.0, 37.0, 31.0, 36.0, 36.0,
80
+ 34.0, 38.0, 32.0, 38.0, 32.0
81
+ ],
82
+ [26.0, 24.0, 26.0, 25.0, 29.0, 29.5, 16.5, 36.0, 44.0],
83
+ [25.0, 27.0, 19.0],
84
+ [25.0, 20.0],
85
+ [28.0]
86
+ ]
87
+ control_1 = [
88
+ 18.0, 15.0, 18.0, 16.0, 17.0, 15.0, 14.0, 14.0, 14.0, 15.0, 15.0,
89
+ 14.0, 15.0, 14.0, 22.0, 18.0, 21.0, 21.0, 10.0, 10.0, 11.0, 9.0,
90
+ 25.0, 26.0, 17.5, 16.0, 15.5, 14.5, 22.0, 22.0, 24.0, 22.5, 29.0,
91
+ 24.5, 20.0, 18.0, 18.5, 17.5, 26.5, 13.0, 16.5, 13.0, 13.0, 13.0,
92
+ 28.0, 27.0, 34.0, 31.0, 29.0, 27.0, 24.0, 23.0, 38.0, 36.0, 25.0,
93
+ 38.0, 26.0, 22.0, 36.0, 27.0, 27.0, 32.0, 28.0, 31.0
94
+ ]
95
+ pvalue_1 = [4.727e-06, 0.022346, 0.97912, 0.99953, 0.86579] # Matlab
96
+ # Statistic, alternative p-values, and CIs computed with R multcomp `glht`
97
+ p_1_twosided = [1e-4, 0.02237, 0.97913, 0.99953, 0.86583]
98
+ p_1_greater = [1e-4, 0.011217, 0.768500, 0.896991, 0.577211]
99
+ p_1_less = [1, 1, 0.99660, 0.98398, .99953]
100
+ statistic_1 = [5.27356, 2.91270, 0.60831, 0.27002, 0.96637]
101
+ ci_1_twosided = [[5.3633917835622, 0.7296142201217, -8.3879817106607,
102
+ -11.9090753452911, -11.7655021543469],
103
+ [15.9709832164378, 13.8936496687672, 13.4556900439941,
104
+ 14.6434503452911, 25.4998771543469]]
105
+ ci_1_greater = [5.9036402398526, 1.4000632918725, -7.2754756323636,
106
+ -10.5567456382391, -9.8675629499576]
107
+ ci_1_less = [15.4306165948619, 13.2230539537359, 12.3429406339544,
108
+ 13.2908248513211, 23.6015228251660]
109
+ pvalues_1 = dict(twosided=p_1_twosided, less=p_1_less, greater=p_1_greater)
110
+ cis_1 = dict(twosided=ci_1_twosided, less=ci_1_less, greater=ci_1_greater)
111
+ case_1 = dict(samples=samples_1, control=control_1, statistic=statistic_1,
112
+ pvalues=pvalues_1, cis=cis_1)
113
+
114
+ # From Dunnett1955 comparing with R's DescTools: DunnettTest
115
+ samples_2 = [[9.76, 8.80, 7.68, 9.36], [12.80, 9.68, 12.16, 9.20, 10.55]]
116
+ control_2 = [7.40, 8.50, 7.20, 8.24, 9.84, 8.32]
117
+ pvalue_2 = [0.6201, 0.0058]
118
+ # Statistic, alternative p-values, and CIs computed with R multcomp `glht`
119
+ p_2_twosided = [0.6201020, 0.0058254]
120
+ p_2_greater = [0.3249776, 0.0029139]
121
+ p_2_less = [0.91676, 0.99984]
122
+ statistic_2 = [0.85703, 3.69375]
123
+ ci_2_twosided = [[-1.2564116462124, 0.8396273539789],
124
+ [2.5564116462124, 4.4163726460211]]
125
+ ci_2_greater = [-0.9588591188156, 1.1187563667543]
126
+ ci_2_less = [2.2588591188156, 4.1372436332457]
127
+ pvalues_2 = dict(twosided=p_2_twosided, less=p_2_less, greater=p_2_greater)
128
+ cis_2 = dict(twosided=ci_2_twosided, less=ci_2_less, greater=ci_2_greater)
129
+ case_2 = dict(samples=samples_2, control=control_2, statistic=statistic_2,
130
+ pvalues=pvalues_2, cis=cis_2)
131
+
132
+ samples_3 = [[55, 64, 64], [55, 49, 52], [50, 44, 41]]
133
+ control_3 = [55, 47, 48]
134
+ pvalue_3 = [0.0364, 0.8966, 0.4091]
135
+ # Statistic, alternative p-values, and CIs computed with R multcomp `glht`
136
+ p_3_twosided = [0.036407, 0.896539, 0.409295]
137
+ p_3_greater = [0.018277, 0.521109, 0.981892]
138
+ p_3_less = [0.99944, 0.90054, 0.20974]
139
+ statistic_3 = [3.09073, 0.56195, -1.40488]
140
+ ci_3_twosided = [[0.7529028025053, -8.2470971974947, -15.2470971974947],
141
+ [21.2470971974947, 12.2470971974947, 5.2470971974947]]
142
+ ci_3_greater = [2.4023682323149, -6.5976317676851, -13.5976317676851]
143
+ ci_3_less = [19.5984402363662, 10.5984402363662, 3.5984402363662]
144
+ pvalues_3 = dict(twosided=p_3_twosided, less=p_3_less, greater=p_3_greater)
145
+ cis_3 = dict(twosided=ci_3_twosided, less=ci_3_less, greater=ci_3_greater)
146
+ case_3 = dict(samples=samples_3, control=control_3, statistic=statistic_3,
147
+ pvalues=pvalues_3, cis=cis_3)
148
+
149
+ # From Thomson and Short,
150
+ # Mucociliary function in health, chronic obstructive airway disease,
151
+ # and asbestosis, Journal of Applied Physiology, 1969. Table 1
152
+ # Comparing with R's DescTools: DunnettTest
153
+ samples_4 = [[3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]]
154
+ control_4 = [2.9, 3.0, 2.5, 2.6, 3.2]
155
+ pvalue_4 = [0.5832, 0.9982]
156
+ # Statistic, alternative p-values, and CIs computed with R multcomp `glht`
157
+ p_4_twosided = [0.58317, 0.99819]
158
+ p_4_greater = [0.30225, 0.69115]
159
+ p_4_less = [0.91929, 0.65212]
160
+ statistic_4 = [0.90875, -0.05007]
161
+ ci_4_twosided = [[-0.6898153448579, -1.0333456251632],
162
+ [1.4598153448579, 0.9933456251632]]
163
+ ci_4_greater = [-0.5186459268412, -0.8719655502147 ]
164
+ ci_4_less = [1.2886459268412, 0.8319655502147]
165
+ pvalues_4 = dict(twosided=p_4_twosided, less=p_4_less, greater=p_4_greater)
166
+ cis_4 = dict(twosided=ci_4_twosided, less=ci_4_less, greater=ci_4_greater)
167
+ case_4 = dict(samples=samples_4, control=control_4, statistic=statistic_4,
168
+ pvalues=pvalues_4, cis=cis_4)
169
+
170
+ @pytest.mark.parametrize(
171
+ 'rho, n_groups, df, statistic, pvalue, alternative',
172
+ [
173
+ # From Dunnett1955
174
+ # Tables 1a and 1b pages 1117-1118
175
+ (0.5, 1, 10, 1.81, 0.05, "greater"), # different than two-sided
176
+ (0.5, 3, 10, 2.34, 0.05, "greater"),
177
+ (0.5, 2, 30, 1.99, 0.05, "greater"),
178
+ (0.5, 5, 30, 2.33, 0.05, "greater"),
179
+ (0.5, 4, 12, 3.32, 0.01, "greater"),
180
+ (0.5, 7, 12, 3.56, 0.01, "greater"),
181
+ (0.5, 2, 60, 2.64, 0.01, "greater"),
182
+ (0.5, 4, 60, 2.87, 0.01, "greater"),
183
+ (0.5, 4, 60, [2.87, 2.21], [0.01, 0.05], "greater"),
184
+ # Tables 2a and 2b pages 1119-1120
185
+ (0.5, 1, 10, 2.23, 0.05, "two-sided"), # two-sided
186
+ (0.5, 3, 10, 2.81, 0.05, "two-sided"),
187
+ (0.5, 2, 30, 2.32, 0.05, "two-sided"),
188
+ (0.5, 3, 20, 2.57, 0.05, "two-sided"),
189
+ (0.5, 4, 12, 3.76, 0.01, "two-sided"),
190
+ (0.5, 7, 12, 4.08, 0.01, "two-sided"),
191
+ (0.5, 2, 60, 2.90, 0.01, "two-sided"),
192
+ (0.5, 4, 60, 3.14, 0.01, "two-sided"),
193
+ (0.5, 4, 60, [3.14, 2.55], [0.01, 0.05], "two-sided"),
194
+ ],
195
+ )
196
+ def test_critical_values(
197
+ self, rho, n_groups, df, statistic, pvalue, alternative
198
+ ):
199
+ rng = np.random.default_rng(165250594791731684851746311027739134893)
200
+ rho = np.full((n_groups, n_groups), rho)
201
+ np.fill_diagonal(rho, 1)
202
+
203
+ statistic = np.array(statistic)
204
+ res = _pvalue_dunnett(
205
+ rho=rho, df=df, statistic=statistic,
206
+ alternative=alternative,
207
+ rng=rng
208
+ )
209
+ assert_allclose(res, pvalue, atol=5e-3)
210
+
211
+ @pytest.mark.parametrize(
212
+ 'samples, control, pvalue, statistic',
213
+ [
214
+ (samples_1, control_1, pvalue_1, statistic_1),
215
+ (samples_2, control_2, pvalue_2, statistic_2),
216
+ (samples_3, control_3, pvalue_3, statistic_3),
217
+ (samples_4, control_4, pvalue_4, statistic_4),
218
+ ]
219
+ )
220
+ def test_basic(self, samples, control, pvalue, statistic):
221
+ rng = np.random.default_rng(11681140010308601919115036826969764808)
222
+
223
+ res = stats.dunnett(*samples, control=control, random_state=rng)
224
+
225
+ assert isinstance(res, DunnettResult)
226
+ assert_allclose(res.statistic, statistic, rtol=5e-5)
227
+ assert_allclose(res.pvalue, pvalue, rtol=1e-2, atol=1e-4)
228
+
229
+ @pytest.mark.parametrize(
230
+ 'alternative',
231
+ ['two-sided', 'less', 'greater']
232
+ )
233
+ def test_ttest_ind(self, alternative):
234
+ # check that `dunnett` agrees with `ttest_ind`
235
+ # when there are only two groups
236
+ rng = np.random.default_rng(114184017807316971636137493526995620351)
237
+
238
+ for _ in range(10):
239
+ sample = rng.integers(-100, 100, size=(10,))
240
+ control = rng.integers(-100, 100, size=(10,))
241
+
242
+ res = stats.dunnett(
243
+ sample, control=control,
244
+ alternative=alternative, random_state=rng
245
+ )
246
+ ref = stats.ttest_ind(
247
+ sample, control,
248
+ alternative=alternative, random_state=rng
249
+ )
250
+
251
+ assert_allclose(res.statistic, ref.statistic, rtol=1e-3, atol=1e-5)
252
+ assert_allclose(res.pvalue, ref.pvalue, rtol=1e-3, atol=1e-5)
253
+
254
+ @pytest.mark.parametrize(
255
+ 'alternative, pvalue',
256
+ [
257
+ ('less', [0, 1]),
258
+ ('greater', [1, 0]),
259
+ ('two-sided', [0, 0]),
260
+ ]
261
+ )
262
+ def test_alternatives(self, alternative, pvalue):
263
+ rng = np.random.default_rng(114184017807316971636137493526995620351)
264
+
265
+ # width of 20 and min diff between samples/control is 60
266
+ # and maximal diff would be 100
267
+ sample_less = rng.integers(0, 20, size=(10,))
268
+ control = rng.integers(80, 100, size=(10,))
269
+ sample_greater = rng.integers(160, 180, size=(10,))
270
+
271
+ res = stats.dunnett(
272
+ sample_less, sample_greater, control=control,
273
+ alternative=alternative, random_state=rng
274
+ )
275
+ assert_allclose(res.pvalue, pvalue, atol=1e-7)
276
+
277
+ ci = res.confidence_interval()
278
+ # two-sided is comparable for high/low
279
+ if alternative == 'less':
280
+ assert np.isneginf(ci.low).all()
281
+ assert -100 < ci.high[0] < -60
282
+ assert 60 < ci.high[1] < 100
283
+ elif alternative == 'greater':
284
+ assert -100 < ci.low[0] < -60
285
+ assert 60 < ci.low[1] < 100
286
+ assert np.isposinf(ci.high).all()
287
+ elif alternative == 'two-sided':
288
+ assert -100 < ci.low[0] < -60
289
+ assert 60 < ci.low[1] < 100
290
+ assert -100 < ci.high[0] < -60
291
+ assert 60 < ci.high[1] < 100
292
+
293
+ @pytest.mark.parametrize("case", [case_1, case_2, case_3, case_4])
294
+ @pytest.mark.parametrize("alternative", ['less', 'greater', 'two-sided'])
295
+ def test_against_R_multicomp_glht(self, case, alternative):
296
+ rng = np.random.default_rng(189117774084579816190295271136455278291)
297
+ samples = case['samples']
298
+ control = case['control']
299
+ alternatives = {'less': 'less', 'greater': 'greater',
300
+ 'two-sided': 'twosided'}
301
+ p_ref = case['pvalues'][alternative.replace('-', '')]
302
+
303
+ res = stats.dunnett(*samples, control=control, alternative=alternative,
304
+ random_state=rng)
305
+ # atol can't be tighter because R reports some pvalues as "< 1e-4"
306
+ assert_allclose(res.pvalue, p_ref, rtol=5e-3, atol=1e-4)
307
+
308
+ ci_ref = case['cis'][alternatives[alternative]]
309
+ if alternative == "greater":
310
+ ci_ref = [ci_ref, np.inf]
311
+ elif alternative == "less":
312
+ ci_ref = [-np.inf, ci_ref]
313
+ assert res._ci is None
314
+ assert res._ci_cl is None
315
+ ci = res.confidence_interval(confidence_level=0.95)
316
+ assert_allclose(ci.low, ci_ref[0], rtol=5e-3, atol=1e-5)
317
+ assert_allclose(ci.high, ci_ref[1], rtol=5e-3, atol=1e-5)
318
+
319
+ # re-run to use the cached value "is" to check id as same object
320
+ assert res._ci is ci
321
+ assert res._ci_cl == 0.95
322
+ ci_ = res.confidence_interval(confidence_level=0.95)
323
+ assert ci_ is ci
324
+
325
+ @pytest.mark.parametrize('alternative', ["two-sided", "less", "greater"])
326
+ def test_str(self, alternative):
327
+ rng = np.random.default_rng(189117774084579816190295271136455278291)
328
+
329
+ res = stats.dunnett(
330
+ *self.samples_3, control=self.control_3, alternative=alternative,
331
+ random_state=rng
332
+ )
333
+
334
+ # check some str output
335
+ res_str = str(res)
336
+ assert '(Sample 2 - Control)' in res_str
337
+ assert '95.0%' in res_str
338
+
339
+ if alternative == 'less':
340
+ assert '-inf' in res_str
341
+ assert '19.' in res_str
342
+ elif alternative == 'greater':
343
+ assert 'inf' in res_str
344
+ assert '-13.' in res_str
345
+ else:
346
+ assert 'inf' not in res_str
347
+ assert '21.' in res_str
348
+
349
+ def test_warnings(self):
350
+ rng = np.random.default_rng(189117774084579816190295271136455278291)
351
+
352
+ res = stats.dunnett(
353
+ *self.samples_3, control=self.control_3, random_state=rng
354
+ )
355
+ msg = r"Computation of the confidence interval did not converge"
356
+ with pytest.warns(UserWarning, match=msg):
357
+ res._allowance(tol=1e-5)
358
+
359
+ def test_raises(self):
360
+ samples, control = self.samples_3, self.control_3
361
+
362
+ # alternative
363
+ with pytest.raises(ValueError, match="alternative must be"):
364
+ stats.dunnett(*samples, control=control, alternative='bob')
365
+
366
+ # 2D for a sample
367
+ samples_ = copy.deepcopy(samples)
368
+ samples_[0] = [samples_[0]]
369
+ with pytest.raises(ValueError, match="must be 1D arrays"):
370
+ stats.dunnett(*samples_, control=control)
371
+
372
+ # 2D for control
373
+ control_ = copy.deepcopy(control)
374
+ control_ = [control_]
375
+ with pytest.raises(ValueError, match="must be 1D arrays"):
376
+ stats.dunnett(*samples, control=control_)
377
+
378
+ # No obs in a sample
379
+ samples_ = copy.deepcopy(samples)
380
+ samples_[1] = []
381
+ with pytest.raises(ValueError, match="at least 1 observation"):
382
+ stats.dunnett(*samples_, control=control)
383
+
384
+ # No obs in control
385
+ control_ = []
386
+ with pytest.raises(ValueError, match="at least 1 observation"):
387
+ stats.dunnett(*samples, control=control_)
388
+
389
+ res = stats.dunnett(*samples, control=control)
390
+ with pytest.raises(ValueError, match="Confidence level must"):
391
+ res.confidence_interval(confidence_level=3)
392
+
393
+ @pytest.mark.filterwarnings("ignore:Computation of the confidence")
394
+ @pytest.mark.parametrize('n_samples', [1, 2, 3])
395
+ def test_shapes(self, n_samples):
396
+ rng = np.random.default_rng(689448934110805334)
397
+ samples = rng.normal(size=(n_samples, 10))
398
+ control = rng.normal(size=10)
399
+ res = stats.dunnett(*samples, control=control, random_state=rng)
400
+ assert res.statistic.shape == (n_samples,)
401
+ assert res.pvalue.shape == (n_samples,)
402
+ ci = res.confidence_interval()
403
+ assert ci.low.shape == (n_samples,)
404
+ assert ci.high.shape == (n_samples,)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_odds_ratio.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import numpy as np
3
+ from numpy.testing import assert_equal, assert_allclose
4
+ from .._discrete_distns import nchypergeom_fisher, hypergeom
5
+ from scipy.stats._odds_ratio import odds_ratio
6
+ from .data.fisher_exact_results_from_r import data
7
+
8
+
9
+ class TestOddsRatio:
10
+
11
+ @pytest.mark.parametrize('parameters, rresult', data)
12
+ def test_results_from_r(self, parameters, rresult):
13
+ alternative = parameters.alternative.replace('.', '-')
14
+ result = odds_ratio(parameters.table)
15
+ # The results computed by R are not very accurate.
16
+ if result.statistic < 400:
17
+ or_rtol = 5e-4
18
+ ci_rtol = 2e-2
19
+ else:
20
+ or_rtol = 5e-2
21
+ ci_rtol = 1e-1
22
+ assert_allclose(result.statistic,
23
+ rresult.conditional_odds_ratio, rtol=or_rtol)
24
+ ci = result.confidence_interval(parameters.confidence_level,
25
+ alternative)
26
+ assert_allclose((ci.low, ci.high), rresult.conditional_odds_ratio_ci,
27
+ rtol=ci_rtol)
28
+
29
+ # Also do a self-check for the conditional odds ratio.
30
+ # With the computed conditional odds ratio as the noncentrality
31
+ # parameter of the noncentral hypergeometric distribution with
32
+ # parameters table.sum(), table[0].sum(), and table[:,0].sum() as
33
+ # total, ngood and nsample, respectively, the mean of the distribution
34
+ # should equal table[0, 0].
35
+ cor = result.statistic
36
+ table = np.array(parameters.table)
37
+ total = table.sum()
38
+ ngood = table[0].sum()
39
+ nsample = table[:, 0].sum()
40
+ # nchypergeom_fisher does not allow the edge cases where the
41
+ # noncentrality parameter is 0 or inf, so handle those values
42
+ # separately here.
43
+ if cor == 0:
44
+ nchg_mean = hypergeom.support(total, ngood, nsample)[0]
45
+ elif cor == np.inf:
46
+ nchg_mean = hypergeom.support(total, ngood, nsample)[1]
47
+ else:
48
+ nchg_mean = nchypergeom_fisher.mean(total, ngood, nsample, cor)
49
+ assert_allclose(nchg_mean, table[0, 0], rtol=1e-13)
50
+
51
+ # Check that the confidence interval is correct.
52
+ alpha = 1 - parameters.confidence_level
53
+ if alternative == 'two-sided':
54
+ if ci.low > 0:
55
+ sf = nchypergeom_fisher.sf(table[0, 0] - 1,
56
+ total, ngood, nsample, ci.low)
57
+ assert_allclose(sf, alpha/2, rtol=1e-11)
58
+ if np.isfinite(ci.high):
59
+ cdf = nchypergeom_fisher.cdf(table[0, 0],
60
+ total, ngood, nsample, ci.high)
61
+ assert_allclose(cdf, alpha/2, rtol=1e-11)
62
+ elif alternative == 'less':
63
+ if np.isfinite(ci.high):
64
+ cdf = nchypergeom_fisher.cdf(table[0, 0],
65
+ total, ngood, nsample, ci.high)
66
+ assert_allclose(cdf, alpha, rtol=1e-11)
67
+ else:
68
+ # alternative == 'greater'
69
+ if ci.low > 0:
70
+ sf = nchypergeom_fisher.sf(table[0, 0] - 1,
71
+ total, ngood, nsample, ci.low)
72
+ assert_allclose(sf, alpha, rtol=1e-11)
73
+
74
+ @pytest.mark.parametrize('table', [
75
+ [[0, 0], [5, 10]],
76
+ [[5, 10], [0, 0]],
77
+ [[0, 5], [0, 10]],
78
+ [[5, 0], [10, 0]],
79
+ ])
80
+ def test_row_or_col_zero(self, table):
81
+ result = odds_ratio(table)
82
+ assert_equal(result.statistic, np.nan)
83
+ ci = result.confidence_interval()
84
+ assert_equal((ci.low, ci.high), (0, np.inf))
85
+
86
+ @pytest.mark.parametrize("case",
87
+ [[0.95, 'two-sided', 0.4879913, 2.635883],
88
+ [0.90, 'two-sided', 0.5588516, 2.301663]])
89
+ def test_sample_odds_ratio_ci(self, case):
90
+ # Compare the sample odds ratio confidence interval to the R function
91
+ # oddsratio.wald from the epitools package, e.g.
92
+ # > library(epitools)
93
+ # > table = matrix(c(10, 20, 41, 93), nrow=2, ncol=2, byrow=TRUE)
94
+ # > result = oddsratio.wald(table)
95
+ # > result$measure
96
+ # odds ratio with 95% C.I.
97
+ # Predictor estimate lower upper
98
+ # Exposed1 1.000000 NA NA
99
+ # Exposed2 1.134146 0.4879913 2.635883
100
+
101
+ confidence_level, alternative, ref_low, ref_high = case
102
+ table = [[10, 20], [41, 93]]
103
+ result = odds_ratio(table, kind='sample')
104
+ assert_allclose(result.statistic, 1.134146, rtol=1e-6)
105
+ ci = result.confidence_interval(confidence_level, alternative)
106
+ assert_allclose([ci.low, ci.high], [ref_low, ref_high], rtol=1e-6)
107
+
108
+ @pytest.mark.slow
109
+ @pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
110
+ def test_sample_odds_ratio_one_sided_ci(self, alternative):
111
+ # can't find a good reference for one-sided CI, so bump up the sample
112
+ # size and compare against the conditional odds ratio CI
113
+ table = [[1000, 2000], [4100, 9300]]
114
+ res = odds_ratio(table, kind='sample')
115
+ ref = odds_ratio(table, kind='conditional')
116
+ assert_allclose(res.statistic, ref.statistic, atol=1e-5)
117
+ assert_allclose(res.confidence_interval(alternative=alternative),
118
+ ref.confidence_interval(alternative=alternative),
119
+ atol=2e-3)
120
+
121
+ @pytest.mark.parametrize('kind', ['sample', 'conditional'])
122
+ @pytest.mark.parametrize('bad_table', [123, "foo", [10, 11, 12]])
123
+ def test_invalid_table_shape(self, kind, bad_table):
124
+ with pytest.raises(ValueError, match="Invalid shape"):
125
+ odds_ratio(bad_table, kind=kind)
126
+
127
+ def test_invalid_table_type(self):
128
+ with pytest.raises(ValueError, match='must be an array of integers'):
129
+ odds_ratio([[1.0, 3.4], [5.0, 9.9]])
130
+
131
+ def test_negative_table_values(self):
132
+ with pytest.raises(ValueError, match='must be nonnegative'):
133
+ odds_ratio([[1, 2], [3, -4]])
134
+
135
+ def test_invalid_kind(self):
136
+ with pytest.raises(ValueError, match='`kind` must be'):
137
+ odds_ratio([[10, 20], [30, 14]], kind='magnetoreluctance')
138
+
139
+ def test_invalid_alternative(self):
140
+ result = odds_ratio([[5, 10], [2, 32]])
141
+ with pytest.raises(ValueError, match='`alternative` must be'):
142
+ result.confidence_interval(alternative='depleneration')
143
+
144
+ @pytest.mark.parametrize('level', [-0.5, 1.5])
145
+ def test_invalid_confidence_level(self, level):
146
+ result = odds_ratio([[5, 10], [2, 32]])
147
+ with pytest.raises(ValueError, match='must be between 0 and 1'):
148
+ result.confidence_interval(confidence_level=level)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_rank.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_equal, assert_array_equal
3
+ import pytest
4
+
5
+ from scipy.conftest import skip_xp_invalid_arg
6
+ from scipy.stats import rankdata, tiecorrect
7
+ from scipy._lib._util import np_long
8
+
9
+
10
+ class TestTieCorrect:
11
+
12
+ def test_empty(self):
13
+ """An empty array requires no correction, should return 1.0."""
14
+ ranks = np.array([], dtype=np.float64)
15
+ c = tiecorrect(ranks)
16
+ assert_equal(c, 1.0)
17
+
18
+ def test_one(self):
19
+ """A single element requires no correction, should return 1.0."""
20
+ ranks = np.array([1.0], dtype=np.float64)
21
+ c = tiecorrect(ranks)
22
+ assert_equal(c, 1.0)
23
+
24
+ def test_no_correction(self):
25
+ """Arrays with no ties require no correction."""
26
+ ranks = np.arange(2.0)
27
+ c = tiecorrect(ranks)
28
+ assert_equal(c, 1.0)
29
+ ranks = np.arange(3.0)
30
+ c = tiecorrect(ranks)
31
+ assert_equal(c, 1.0)
32
+
33
+ def test_basic(self):
34
+ """Check a few basic examples of the tie correction factor."""
35
+ # One tie of two elements
36
+ ranks = np.array([1.0, 2.5, 2.5])
37
+ c = tiecorrect(ranks)
38
+ T = 2.0
39
+ N = ranks.size
40
+ expected = 1.0 - (T**3 - T) / (N**3 - N)
41
+ assert_equal(c, expected)
42
+
43
+ # One tie of two elements (same as above, but tie is not at the end)
44
+ ranks = np.array([1.5, 1.5, 3.0])
45
+ c = tiecorrect(ranks)
46
+ T = 2.0
47
+ N = ranks.size
48
+ expected = 1.0 - (T**3 - T) / (N**3 - N)
49
+ assert_equal(c, expected)
50
+
51
+ # One tie of three elements
52
+ ranks = np.array([1.0, 3.0, 3.0, 3.0])
53
+ c = tiecorrect(ranks)
54
+ T = 3.0
55
+ N = ranks.size
56
+ expected = 1.0 - (T**3 - T) / (N**3 - N)
57
+ assert_equal(c, expected)
58
+
59
+ # Two ties, lengths 2 and 3.
60
+ ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
61
+ c = tiecorrect(ranks)
62
+ T1 = 2.0
63
+ T2 = 3.0
64
+ N = ranks.size
65
+ expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
66
+ assert_equal(c, expected)
67
+
68
+ def test_overflow(self):
69
+ ntie, k = 2000, 5
70
+ a = np.repeat(np.arange(k), ntie)
71
+ n = a.size # ntie * k
72
+ out = tiecorrect(rankdata(a))
73
+ assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))
74
+
75
+
76
+ class TestRankData:
77
+
78
+ def test_empty(self):
79
+ """stats.rankdata([]) should return an empty array."""
80
+ a = np.array([], dtype=int)
81
+ r = rankdata(a)
82
+ assert_array_equal(r, np.array([], dtype=np.float64))
83
+ r = rankdata([])
84
+ assert_array_equal(r, np.array([], dtype=np.float64))
85
+
86
+ @pytest.mark.parametrize("shape", [(0, 1, 2)])
87
+ @pytest.mark.parametrize("axis", [None, *range(3)])
88
+ def test_empty_multidim(self, shape, axis):
89
+ a = np.empty(shape, dtype=int)
90
+ r = rankdata(a, axis=axis)
91
+ expected_shape = (0,) if axis is None else shape
92
+ assert_equal(r.shape, expected_shape)
93
+ assert_equal(r.dtype, np.float64)
94
+
95
+ def test_one(self):
96
+ """Check stats.rankdata with an array of length 1."""
97
+ data = [100]
98
+ a = np.array(data, dtype=int)
99
+ r = rankdata(a)
100
+ assert_array_equal(r, np.array([1.0], dtype=np.float64))
101
+ r = rankdata(data)
102
+ assert_array_equal(r, np.array([1.0], dtype=np.float64))
103
+
104
+ def test_basic(self):
105
+ """Basic tests of stats.rankdata."""
106
+ data = [100, 10, 50]
107
+ expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)
108
+ a = np.array(data, dtype=int)
109
+ r = rankdata(a)
110
+ assert_array_equal(r, expected)
111
+ r = rankdata(data)
112
+ assert_array_equal(r, expected)
113
+
114
+ data = [40, 10, 30, 10, 50]
115
+ expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)
116
+ a = np.array(data, dtype=int)
117
+ r = rankdata(a)
118
+ assert_array_equal(r, expected)
119
+ r = rankdata(data)
120
+ assert_array_equal(r, expected)
121
+
122
+ data = [20, 20, 20, 10, 10, 10]
123
+ expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)
124
+ a = np.array(data, dtype=int)
125
+ r = rankdata(a)
126
+ assert_array_equal(r, expected)
127
+ r = rankdata(data)
128
+ assert_array_equal(r, expected)
129
+ # The docstring states explicitly that the argument is flattened.
130
+ a2d = a.reshape(2, 3)
131
+ r = rankdata(a2d)
132
+ assert_array_equal(r, expected)
133
+
134
+ @skip_xp_invalid_arg
135
+ def test_rankdata_object_string(self):
136
+
137
+ def min_rank(a):
138
+ return [1 + sum(i < j for i in a) for j in a]
139
+
140
+ def max_rank(a):
141
+ return [sum(i <= j for i in a) for j in a]
142
+
143
+ def ordinal_rank(a):
144
+ return min_rank([(x, i) for i, x in enumerate(a)])
145
+
146
+ def average_rank(a):
147
+ return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]
148
+
149
+ def dense_rank(a):
150
+ b = np.unique(a)
151
+ return [1 + sum(i < j for i in b) for j in a]
152
+
153
+ rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,
154
+ average=average_rank, dense=dense_rank)
155
+
156
+ def check_ranks(a):
157
+ for method in 'min', 'max', 'dense', 'ordinal', 'average':
158
+ out = rankdata(a, method=method)
159
+ assert_array_equal(out, rankf[method](a))
160
+
161
+ val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']
162
+ check_ranks(np.random.choice(val, 200))
163
+ check_ranks(np.random.choice(val, 200).astype('object'))
164
+
165
+ val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')
166
+ check_ranks(np.random.choice(val, 200).astype('object'))
167
+
168
+ def test_large_int(self):
169
+ data = np.array([2**60, 2**60+1], dtype=np.uint64)
170
+ r = rankdata(data)
171
+ assert_array_equal(r, [1.0, 2.0])
172
+
173
+ data = np.array([2**60, 2**60+1], dtype=np.int64)
174
+ r = rankdata(data)
175
+ assert_array_equal(r, [1.0, 2.0])
176
+
177
+ data = np.array([2**60, -2**60+1], dtype=np.int64)
178
+ r = rankdata(data)
179
+ assert_array_equal(r, [2.0, 1.0])
180
+
181
+ def test_big_tie(self):
182
+ for n in [10000, 100000, 1000000]:
183
+ data = np.ones(n, dtype=int)
184
+ r = rankdata(data)
185
+ expected_rank = 0.5 * (n + 1)
186
+ assert_array_equal(r, expected_rank * data,
187
+ "test failed with n=%d" % n)
188
+
189
+ def test_axis(self):
190
+ data = [[0, 2, 1],
191
+ [4, 2, 2]]
192
+ expected0 = [[1., 1.5, 1.],
193
+ [2., 1.5, 2.]]
194
+ r0 = rankdata(data, axis=0)
195
+ assert_array_equal(r0, expected0)
196
+ expected1 = [[1., 3., 2.],
197
+ [3., 1.5, 1.5]]
198
+ r1 = rankdata(data, axis=1)
199
+ assert_array_equal(r1, expected1)
200
+
201
+ methods = ["average", "min", "max", "dense", "ordinal"]
202
+ dtypes = [np.float64] + [np_long]*4
203
+
204
+ @pytest.mark.parametrize("axis", [0, 1])
205
+ @pytest.mark.parametrize("method, dtype", zip(methods, dtypes))
206
+ def test_size_0_axis(self, axis, method, dtype):
207
+ shape = (3, 0)
208
+ data = np.zeros(shape)
209
+ r = rankdata(data, method=method, axis=axis)
210
+ assert_equal(r.shape, shape)
211
+ assert_equal(r.dtype, dtype)
212
+
213
+ @pytest.mark.parametrize('axis', range(3))
214
+ @pytest.mark.parametrize('method', methods)
215
+ def test_nan_policy_omit_3d(self, axis, method):
216
+ shape = (20, 21, 22)
217
+ rng = np.random.RandomState(23983242)
218
+
219
+ a = rng.random(size=shape)
220
+ i = rng.random(size=shape) < 0.4
221
+ j = rng.random(size=shape) < 0.1
222
+ k = rng.random(size=shape) < 0.1
223
+ a[i] = np.nan
224
+ a[j] = -np.inf
225
+ a[k] - np.inf
226
+
227
+ def rank_1d_omit(a, method):
228
+ out = np.zeros_like(a)
229
+ i = np.isnan(a)
230
+ a_compressed = a[~i]
231
+ res = rankdata(a_compressed, method)
232
+ out[~i] = res
233
+ out[i] = np.nan
234
+ return out
235
+
236
+ def rank_omit(a, method, axis):
237
+ return np.apply_along_axis(lambda a: rank_1d_omit(a, method),
238
+ axis, a)
239
+
240
+ res = rankdata(a, method, axis=axis, nan_policy='omit')
241
+ res0 = rank_omit(a, method, axis=axis)
242
+
243
+ assert_array_equal(res, res0)
244
+
245
+ def test_nan_policy_2d_axis_none(self):
246
+ # 2 2d-array test with axis=None
247
+ data = [[0, np.nan, 3],
248
+ [4, 2, np.nan],
249
+ [1, 2, 2]]
250
+ assert_array_equal(rankdata(data, axis=None, nan_policy='omit'),
251
+ [1., np.nan, 6., 7., 4., np.nan, 2., 4., 4.])
252
+ assert_array_equal(rankdata(data, axis=None, nan_policy='propagate'),
253
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
254
+ np.nan, np.nan, np.nan])
255
+
256
+ def test_nan_policy_raise(self):
257
+ # 1 1d-array test
258
+ data = [0, 2, 3, -2, np.nan, np.nan]
259
+ with pytest.raises(ValueError, match="The input contains nan"):
260
+ rankdata(data, nan_policy='raise')
261
+
262
+ # 2 2d-array test
263
+ data = [[0, np.nan, 3],
264
+ [4, 2, np.nan],
265
+ [np.nan, 2, 2]]
266
+
267
+ with pytest.raises(ValueError, match="The input contains nan"):
268
+ rankdata(data, axis=0, nan_policy="raise")
269
+
270
+ with pytest.raises(ValueError, match="The input contains nan"):
271
+ rankdata(data, axis=1, nan_policy="raise")
272
+
273
+ def test_nan_policy_propagate(self):
274
+ # 1 1d-array test
275
+ data = [0, 2, 3, -2, np.nan, np.nan]
276
+ assert_array_equal(rankdata(data, nan_policy='propagate'),
277
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan])
278
+
279
+ # 2 2d-array test
280
+ data = [[0, np.nan, 3],
281
+ [4, 2, np.nan],
282
+ [1, 2, 2]]
283
+ assert_array_equal(rankdata(data, axis=0, nan_policy='propagate'),
284
+ [[1, np.nan, np.nan],
285
+ [3, np.nan, np.nan],
286
+ [2, np.nan, np.nan]])
287
+ assert_array_equal(rankdata(data, axis=1, nan_policy='propagate'),
288
+ [[np.nan, np.nan, np.nan],
289
+ [np.nan, np.nan, np.nan],
290
+ [1, 2.5, 2.5]])
291
+
292
+
293
+ _cases = (
294
+ # values, method, expected
295
+ ([], 'average', []),
296
+ ([], 'min', []),
297
+ ([], 'max', []),
298
+ ([], 'dense', []),
299
+ ([], 'ordinal', []),
300
+ #
301
+ ([100], 'average', [1.0]),
302
+ ([100], 'min', [1.0]),
303
+ ([100], 'max', [1.0]),
304
+ ([100], 'dense', [1.0]),
305
+ ([100], 'ordinal', [1.0]),
306
+ #
307
+ ([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
308
+ ([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
309
+ ([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
310
+ ([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
311
+ ([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
312
+ #
313
+ ([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
314
+ ([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
315
+ ([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
316
+ ([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
317
+ ([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
318
+ #
319
+ ([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
320
+ ([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
321
+ ([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
322
+ ([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
323
+ ([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
324
+ #
325
+ ([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
326
+ ([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
327
+ ([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
328
+ ([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
329
+ ([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
330
+ #
331
+ ([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
332
+ )
333
+
334
+
335
+ def test_cases():
336
+ for values, method, expected in _cases:
337
+ r = rankdata(values, method=method)
338
+ assert_array_equal(r, expected)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_relative_risk.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import numpy as np
3
+ from numpy.testing import assert_allclose, assert_equal
4
+ from scipy.stats.contingency import relative_risk
5
+
6
+
7
+ # Test just the calculation of the relative risk, including edge
8
+ # cases that result in a relative risk of 0, inf or nan.
9
+ @pytest.mark.parametrize(
10
+ 'exposed_cases, exposed_total, control_cases, control_total, expected_rr',
11
+ [(1, 4, 3, 8, 0.25 / 0.375),
12
+ (0, 10, 5, 20, 0),
13
+ (0, 10, 0, 20, np.nan),
14
+ (5, 15, 0, 20, np.inf)]
15
+ )
16
+ def test_relative_risk(exposed_cases, exposed_total,
17
+ control_cases, control_total, expected_rr):
18
+ result = relative_risk(exposed_cases, exposed_total,
19
+ control_cases, control_total)
20
+ assert_allclose(result.relative_risk, expected_rr, rtol=1e-13)
21
+
22
+
23
+ def test_relative_risk_confidence_interval():
24
+ result = relative_risk(exposed_cases=16, exposed_total=128,
25
+ control_cases=24, control_total=256)
26
+ rr = result.relative_risk
27
+ ci = result.confidence_interval(confidence_level=0.95)
28
+ # The corresponding calculation in R using the epitools package.
29
+ #
30
+ # > library(epitools)
31
+ # > c <- matrix(c(232, 112, 24, 16), nrow=2)
32
+ # > result <- riskratio(c)
33
+ # > result$measure
34
+ # risk ratio with 95% C.I.
35
+ # Predictor estimate lower upper
36
+ # Exposed1 1.000000 NA NA
37
+ # Exposed2 1.333333 0.7347317 2.419628
38
+ #
39
+ # The last line is the result that we want.
40
+ assert_allclose(rr, 4/3)
41
+ assert_allclose((ci.low, ci.high), (0.7347317, 2.419628), rtol=5e-7)
42
+
43
+
44
+ def test_relative_risk_ci_conflevel0():
45
+ result = relative_risk(exposed_cases=4, exposed_total=12,
46
+ control_cases=5, control_total=30)
47
+ rr = result.relative_risk
48
+ assert_allclose(rr, 2.0, rtol=1e-14)
49
+ ci = result.confidence_interval(0)
50
+ assert_allclose((ci.low, ci.high), (2.0, 2.0), rtol=1e-12)
51
+
52
+
53
+ def test_relative_risk_ci_conflevel1():
54
+ result = relative_risk(exposed_cases=4, exposed_total=12,
55
+ control_cases=5, control_total=30)
56
+ ci = result.confidence_interval(1)
57
+ assert_equal((ci.low, ci.high), (0, np.inf))
58
+
59
+
60
+ def test_relative_risk_ci_edge_cases_00():
61
+ result = relative_risk(exposed_cases=0, exposed_total=12,
62
+ control_cases=0, control_total=30)
63
+ assert_equal(result.relative_risk, np.nan)
64
+ ci = result.confidence_interval()
65
+ assert_equal((ci.low, ci.high), (np.nan, np.nan))
66
+
67
+
68
+ def test_relative_risk_ci_edge_cases_01():
69
+ result = relative_risk(exposed_cases=0, exposed_total=12,
70
+ control_cases=1, control_total=30)
71
+ assert_equal(result.relative_risk, 0)
72
+ ci = result.confidence_interval()
73
+ assert_equal((ci.low, ci.high), (0.0, np.nan))
74
+
75
+
76
+ def test_relative_risk_ci_edge_cases_10():
77
+ result = relative_risk(exposed_cases=1, exposed_total=12,
78
+ control_cases=0, control_total=30)
79
+ assert_equal(result.relative_risk, np.inf)
80
+ ci = result.confidence_interval()
81
+ assert_equal((ci.low, ci.high), (np.nan, np.inf))
82
+
83
+
84
+ @pytest.mark.parametrize('ec, et, cc, ct', [(0, 0, 10, 20),
85
+ (-1, 10, 1, 5),
86
+ (1, 10, 0, 0),
87
+ (1, 10, -1, 4)])
88
+ def test_relative_risk_bad_value(ec, et, cc, ct):
89
+ with pytest.raises(ValueError, match="must be an integer not less than"):
90
+ relative_risk(ec, et, cc, ct)
91
+
92
+
93
+ def test_relative_risk_bad_type():
94
+ with pytest.raises(TypeError, match="must be an integer"):
95
+ relative_risk(1, 10, 2.0, 40)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_sampling.py ADDED
@@ -0,0 +1,1447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ import pickle
3
+ import pytest
4
+ from copy import deepcopy
5
+ import platform
6
+ import sys
7
+ import math
8
+ import numpy as np
9
+ from numpy.testing import assert_allclose, assert_equal, suppress_warnings
10
+ from scipy.stats.sampling import (
11
+ TransformedDensityRejection,
12
+ DiscreteAliasUrn,
13
+ DiscreteGuideTable,
14
+ NumericalInversePolynomial,
15
+ NumericalInverseHermite,
16
+ RatioUniforms,
17
+ SimpleRatioUniforms,
18
+ UNURANError
19
+ )
20
+ from pytest import raises as assert_raises
21
+ from scipy import stats
22
+ from scipy import special
23
+ from scipy.stats import chisquare, cramervonmises
24
+ from scipy.stats._distr_params import distdiscrete, distcont
25
+ from scipy._lib._util import check_random_state
26
+
27
+
28
+ # common test data: this data can be shared between all the tests.
29
+
30
+
31
+ # Normal distribution shared between all the continuous methods
32
+ class StandardNormal:
33
+ def pdf(self, x):
34
+ # normalization constant needed for NumericalInverseHermite
35
+ return 1./np.sqrt(2.*np.pi) * np.exp(-0.5 * x*x)
36
+
37
+ def dpdf(self, x):
38
+ return 1./np.sqrt(2.*np.pi) * -x * np.exp(-0.5 * x*x)
39
+
40
+ def cdf(self, x):
41
+ return special.ndtr(x)
42
+
43
+
44
+ all_methods = [
45
+ ("TransformedDensityRejection", {"dist": StandardNormal()}),
46
+ ("DiscreteAliasUrn", {"dist": [0.02, 0.18, 0.8]}),
47
+ ("DiscreteGuideTable", {"dist": [0.02, 0.18, 0.8]}),
48
+ ("NumericalInversePolynomial", {"dist": StandardNormal()}),
49
+ ("NumericalInverseHermite", {"dist": StandardNormal()}),
50
+ ("SimpleRatioUniforms", {"dist": StandardNormal(), "mode": 0})
51
+ ]
52
+
53
+ if (sys.implementation.name == 'pypy'
54
+ and sys.implementation.version < (7, 3, 10)):
55
+ # changed in PyPy for v7.3.10
56
+ floaterr = r"unsupported operand type for float\(\): 'list'"
57
+ else:
58
+ floaterr = r"must be real number, not list"
59
+ # Make sure an internal error occurs in UNU.RAN when invalid callbacks are
60
+ # passed. Moreover, different generators throw different error messages.
61
+ # So, in case of an `UNURANError`, we do not validate the error message.
62
+ bad_pdfs_common = [
63
+ # Negative PDF
64
+ (lambda x: -x, UNURANError, r"..."),
65
+ # Returning wrong type
66
+ (lambda x: [], TypeError, floaterr),
67
+ # Undefined name inside the function
68
+ (lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa: F821, E501
69
+ # Infinite value returned => Overflow error.
70
+ (lambda x: np.inf, UNURANError, r"..."),
71
+ # NaN value => internal error in UNU.RAN
72
+ (lambda x: np.nan, UNURANError, r"..."),
73
+ # signature of PDF wrong
74
+ (lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
75
+ ]
76
+
77
+
78
+ # same approach for dpdf
79
+ bad_dpdf_common = [
80
+ # Infinite value returned.
81
+ (lambda x: np.inf, UNURANError, r"..."),
82
+ # NaN value => internal error in UNU.RAN
83
+ (lambda x: np.nan, UNURANError, r"..."),
84
+ # Returning wrong type
85
+ (lambda x: [], TypeError, floaterr),
86
+ # Undefined name inside the function
87
+ (lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa: F821, E501
88
+ # signature of dPDF wrong
89
+ (lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
90
+ ]
91
+
92
+
93
+ # same approach for logpdf
94
+ bad_logpdfs_common = [
95
+ # Returning wrong type
96
+ (lambda x: [], TypeError, floaterr),
97
+ # Undefined name inside the function
98
+ (lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa: F821, E501
99
+ # Infinite value returned => Overflow error.
100
+ (lambda x: np.inf, UNURANError, r"..."),
101
+ # NaN value => internal error in UNU.RAN
102
+ (lambda x: np.nan, UNURANError, r"..."),
103
+ # signature of logpdf wrong
104
+ (lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
105
+ ]
106
+
107
+
108
+ bad_pv_common = [
109
+ ([], r"must contain at least one element"),
110
+ ([[1.0, 0.0]], r"wrong number of dimensions \(expected 1, got 2\)"),
111
+ ([0.2, 0.4, np.nan, 0.8], r"must contain only finite / non-nan values"),
112
+ ([0.2, 0.4, np.inf, 0.8], r"must contain only finite / non-nan values"),
113
+ ([0.0, 0.0], r"must contain at least one non-zero value"),
114
+ ]
115
+
116
+
117
+ # size of the domains is incorrect
118
+ bad_sized_domains = [
119
+ # > 2 elements in the domain
120
+ ((1, 2, 3), ValueError, r"must be a length 2 tuple"),
121
+ # empty domain
122
+ ((), ValueError, r"must be a length 2 tuple")
123
+ ]
124
+
125
+ # domain values are incorrect
126
+ bad_domains = [
127
+ ((2, 1), UNURANError, r"left >= right"),
128
+ ((1, 1), UNURANError, r"left >= right"),
129
+ ]
130
+
131
+ # infinite and nan values present in domain.
132
+ inf_nan_domains = [
133
+ # left >= right
134
+ ((10, 10), UNURANError, r"left >= right"),
135
+ ((np.inf, np.inf), UNURANError, r"left >= right"),
136
+ ((-np.inf, -np.inf), UNURANError, r"left >= right"),
137
+ ((np.inf, -np.inf), UNURANError, r"left >= right"),
138
+ # Also include nans in some of the domains.
139
+ ((-np.inf, np.nan), ValueError, r"only non-nan values"),
140
+ ((np.nan, np.inf), ValueError, r"only non-nan values")
141
+ ]
142
+
143
+ # `nan` values present in domain. Some distributions don't support
144
+ # infinite tails, so don't mix the nan values with infinities.
145
+ nan_domains = [
146
+ ((0, np.nan), ValueError, r"only non-nan values"),
147
+ ((np.nan, np.nan), ValueError, r"only non-nan values")
148
+ ]
149
+
150
+
151
+ # all the methods should throw errors for nan, bad sized, and bad valued
152
+ # domains.
153
+ @pytest.mark.parametrize("domain, err, msg",
154
+ bad_domains + bad_sized_domains +
155
+ nan_domains) # type: ignore[operator]
156
+ @pytest.mark.parametrize("method, kwargs", all_methods)
157
+ def test_bad_domain(domain, err, msg, method, kwargs):
158
+ Method = getattr(stats.sampling, method)
159
+ with pytest.raises(err, match=msg):
160
+ Method(**kwargs, domain=domain)
161
+
162
+
163
+ @pytest.mark.parametrize("method, kwargs", all_methods)
164
+ def test_random_state(method, kwargs):
165
+ Method = getattr(stats.sampling, method)
166
+
167
+ # simple seed that works for any version of NumPy
168
+ seed = 123
169
+ rng1 = Method(**kwargs, random_state=seed)
170
+ rng2 = Method(**kwargs, random_state=seed)
171
+ assert_equal(rng1.rvs(100), rng2.rvs(100))
172
+
173
+ # global seed
174
+ np.random.seed(123)
175
+ rng1 = Method(**kwargs)
176
+ rvs1 = rng1.rvs(100)
177
+ np.random.seed(None)
178
+ rng2 = Method(**kwargs, random_state=123)
179
+ rvs2 = rng2.rvs(100)
180
+ assert_equal(rvs1, rvs2)
181
+
182
+ # Generator seed for new NumPy
183
+ # when a RandomState is given, it should take the bitgen_t
184
+ # member of the class and create a Generator instance.
185
+ seed1 = np.random.RandomState(np.random.MT19937(123))
186
+ seed2 = np.random.Generator(np.random.MT19937(123))
187
+ rng1 = Method(**kwargs, random_state=seed1)
188
+ rng2 = Method(**kwargs, random_state=seed2)
189
+ assert_equal(rng1.rvs(100), rng2.rvs(100))
190
+
191
+
192
+ def test_set_random_state():
193
+ rng1 = TransformedDensityRejection(StandardNormal(), random_state=123)
194
+ rng2 = TransformedDensityRejection(StandardNormal())
195
+ rng2.set_random_state(123)
196
+ assert_equal(rng1.rvs(100), rng2.rvs(100))
197
+ rng = TransformedDensityRejection(StandardNormal(), random_state=123)
198
+ rvs1 = rng.rvs(100)
199
+ rng.set_random_state(123)
200
+ rvs2 = rng.rvs(100)
201
+ assert_equal(rvs1, rvs2)
202
+
203
+
204
+ def test_threading_behaviour():
205
+ # Test if the API is thread-safe.
206
+ # This verifies if the lock mechanism and the use of `PyErr_Occurred`
207
+ # is correct.
208
+ errors = {"err1": None, "err2": None}
209
+
210
+ class Distribution:
211
+ def __init__(self, pdf_msg):
212
+ self.pdf_msg = pdf_msg
213
+
214
+ def pdf(self, x):
215
+ if 49.9 < x < 50.0:
216
+ raise ValueError(self.pdf_msg)
217
+ return x
218
+
219
+ def dpdf(self, x):
220
+ return 1
221
+
222
+ def func1():
223
+ dist = Distribution('foo')
224
+ rng = TransformedDensityRejection(dist, domain=(10, 100),
225
+ random_state=12)
226
+ try:
227
+ rng.rvs(100000)
228
+ except ValueError as e:
229
+ errors['err1'] = e.args[0]
230
+
231
+ def func2():
232
+ dist = Distribution('bar')
233
+ rng = TransformedDensityRejection(dist, domain=(10, 100),
234
+ random_state=2)
235
+ try:
236
+ rng.rvs(100000)
237
+ except ValueError as e:
238
+ errors['err2'] = e.args[0]
239
+
240
+ t1 = threading.Thread(target=func1)
241
+ t2 = threading.Thread(target=func2)
242
+
243
+ t1.start()
244
+ t2.start()
245
+
246
+ t1.join()
247
+ t2.join()
248
+
249
+ assert errors['err1'] == 'foo'
250
+ assert errors['err2'] == 'bar'
251
+
252
+
253
+ @pytest.mark.parametrize("method, kwargs", all_methods)
254
+ def test_pickle(method, kwargs):
255
+ Method = getattr(stats.sampling, method)
256
+ rng1 = Method(**kwargs, random_state=123)
257
+ obj = pickle.dumps(rng1)
258
+ rng2 = pickle.loads(obj)
259
+ assert_equal(rng1.rvs(100), rng2.rvs(100))
260
+
261
+
262
+ @pytest.mark.parametrize("size", [None, 0, (0, ), 1, (10, 3), (2, 3, 4, 5),
263
+ (0, 0), (0, 1)])
264
+ def test_rvs_size(size):
265
+ # As the `rvs` method is present in the base class and shared between
266
+ # all the classes, we can just test with one of the methods.
267
+ rng = TransformedDensityRejection(StandardNormal())
268
+ if size is None:
269
+ assert np.isscalar(rng.rvs(size))
270
+ else:
271
+ if np.isscalar(size):
272
+ size = (size, )
273
+ assert rng.rvs(size).shape == size
274
+
275
+
276
+ def test_with_scipy_distribution():
277
+ # test if the setup works with SciPy's rv_frozen distributions
278
+ dist = stats.norm()
279
+ urng = np.random.default_rng(0)
280
+ rng = NumericalInverseHermite(dist, random_state=urng)
281
+ u = np.linspace(0, 1, num=100)
282
+ check_cont_samples(rng, dist, dist.stats())
283
+ assert_allclose(dist.ppf(u), rng.ppf(u))
284
+ # test if it works with `loc` and `scale`
285
+ dist = stats.norm(loc=10., scale=5.)
286
+ rng = NumericalInverseHermite(dist, random_state=urng)
287
+ check_cont_samples(rng, dist, dist.stats())
288
+ assert_allclose(dist.ppf(u), rng.ppf(u))
289
+ # check for discrete distributions
290
+ dist = stats.binom(10, 0.2)
291
+ rng = DiscreteAliasUrn(dist, random_state=urng)
292
+ domain = dist.support()
293
+ pv = dist.pmf(np.arange(domain[0], domain[1]+1))
294
+ check_discr_samples(rng, pv, dist.stats())
295
+
296
+
297
+ def check_cont_samples(rng, dist, mv_ex, rtol=1e-7, atol=1e-1):
298
+ rvs = rng.rvs(100000)
299
+ mv = rvs.mean(), rvs.var()
300
+ # test the moments only if the variance is finite
301
+ if np.isfinite(mv_ex[1]):
302
+ assert_allclose(mv, mv_ex, rtol=rtol, atol=atol)
303
+ # Cramer Von Mises test for goodness-of-fit
304
+ rvs = rng.rvs(500)
305
+ dist.cdf = np.vectorize(dist.cdf)
306
+ pval = cramervonmises(rvs, dist.cdf).pvalue
307
+ assert pval > 0.1
308
+
309
+
310
+ def check_discr_samples(rng, pv, mv_ex, rtol=1e-3, atol=1e-1):
311
+ rvs = rng.rvs(100000)
312
+ # test if the first few moments match
313
+ mv = rvs.mean(), rvs.var()
314
+ assert_allclose(mv, mv_ex, rtol=rtol, atol=atol)
315
+ # normalize
316
+ pv = pv / pv.sum()
317
+ # chi-squared test for goodness-of-fit
318
+ obs_freqs = np.zeros_like(pv)
319
+ _, freqs = np.unique(rvs, return_counts=True)
320
+ freqs = freqs / freqs.sum()
321
+ obs_freqs[:freqs.size] = freqs
322
+ pval = chisquare(obs_freqs, pv).pvalue
323
+ assert pval > 0.1
324
+
325
+
326
+ def test_warning_center_not_in_domain():
327
+ # UNURAN will warn if the center provided or the one computed w/o the
328
+ # domain is outside of the domain
329
+ msg = "102 : center moved into domain of distribution"
330
+ with pytest.warns(RuntimeWarning, match=msg):
331
+ NumericalInversePolynomial(StandardNormal(), center=0, domain=(3, 5))
332
+ with pytest.warns(RuntimeWarning, match=msg):
333
+ NumericalInversePolynomial(StandardNormal(), domain=(3, 5))
334
+
335
+
336
+ @pytest.mark.parametrize('method', ["SimpleRatioUniforms",
337
+ "NumericalInversePolynomial",
338
+ "TransformedDensityRejection"])
339
+ def test_error_mode_not_in_domain(method):
340
+ # UNURAN raises an error if the mode is not in the domain
341
+ # the behavior is different compared to the case that center is not in the
342
+ # domain. mode is supposed to be the exact value, center can be an
343
+ # approximate value
344
+ Method = getattr(stats.sampling, method)
345
+ msg = "17 : mode not in domain"
346
+ with pytest.raises(UNURANError, match=msg):
347
+ Method(StandardNormal(), mode=0, domain=(3, 5))
348
+
349
+
350
+ @pytest.mark.parametrize('method', ["NumericalInverseHermite",
351
+ "NumericalInversePolynomial"])
352
+ class TestQRVS:
353
+ def test_input_validation(self, method):
354
+ match = "`qmc_engine` must be an instance of..."
355
+ with pytest.raises(ValueError, match=match):
356
+ Method = getattr(stats.sampling, method)
357
+ gen = Method(StandardNormal())
358
+ gen.qrvs(qmc_engine=0)
359
+
360
+ # issues with QMCEngines and old NumPy
361
+ Method = getattr(stats.sampling, method)
362
+ gen = Method(StandardNormal())
363
+
364
+ match = "`d` must be consistent with dimension of `qmc_engine`."
365
+ with pytest.raises(ValueError, match=match):
366
+ gen.qrvs(d=3, qmc_engine=stats.qmc.Halton(2))
367
+
368
+ qrngs = [None, stats.qmc.Sobol(1, seed=0), stats.qmc.Halton(3, seed=0)]
369
+ # `size=None` should not add anything to the shape, `size=1` should
370
+ sizes = [(None, tuple()), (1, (1,)), (4, (4,)),
371
+ ((4,), (4,)), ((2, 4), (2, 4))] # type: ignore
372
+ # Neither `d=None` nor `d=1` should add anything to the shape
373
+ ds = [(None, tuple()), (1, tuple()), (3, (3,))]
374
+
375
+ @pytest.mark.parametrize('qrng', qrngs)
376
+ @pytest.mark.parametrize('size_in, size_out', sizes)
377
+ @pytest.mark.parametrize('d_in, d_out', ds)
378
+ def test_QRVS_shape_consistency(self, qrng, size_in, size_out,
379
+ d_in, d_out, method):
380
+ w32 = sys.platform == "win32" and platform.architecture()[0] == "32bit"
381
+ if w32 and method == "NumericalInversePolynomial":
382
+ pytest.xfail("NumericalInversePolynomial.qrvs fails for Win "
383
+ "32-bit")
384
+
385
+ dist = StandardNormal()
386
+ Method = getattr(stats.sampling, method)
387
+ gen = Method(dist)
388
+
389
+ # If d and qrng.d are inconsistent, an error is raised
390
+ if d_in is not None and qrng is not None and qrng.d != d_in:
391
+ match = "`d` must be consistent with dimension of `qmc_engine`."
392
+ with pytest.raises(ValueError, match=match):
393
+ gen.qrvs(size_in, d=d_in, qmc_engine=qrng)
394
+ return
395
+
396
+ # Sometimes d is really determined by qrng
397
+ if d_in is None and qrng is not None and qrng.d != 1:
398
+ d_out = (qrng.d,)
399
+
400
+ shape_expected = size_out + d_out
401
+
402
+ qrng2 = deepcopy(qrng)
403
+ qrvs = gen.qrvs(size=size_in, d=d_in, qmc_engine=qrng)
404
+ if size_in is not None:
405
+ assert qrvs.shape == shape_expected
406
+
407
+ if qrng2 is not None:
408
+ uniform = qrng2.random(np.prod(size_in) or 1)
409
+ qrvs2 = stats.norm.ppf(uniform).reshape(shape_expected)
410
+ assert_allclose(qrvs, qrvs2, atol=1e-12)
411
+
412
+ def test_QRVS_size_tuple(self, method):
413
+ # QMCEngine samples are always of shape (n, d). When `size` is a tuple,
414
+ # we set `n = prod(size)` in the call to qmc_engine.random, transform
415
+ # the sample, and reshape it to the final dimensions. When we reshape,
416
+ # we need to be careful, because the _columns_ of the sample returned
417
+ # by a QMCEngine are "independent"-ish, but the elements within the
418
+ # columns are not. We need to make sure that this doesn't get mixed up
419
+ # by reshaping: qrvs[..., i] should remain "independent"-ish of
420
+ # qrvs[..., i+1], but the elements within qrvs[..., i] should be
421
+ # transformed from the same low-discrepancy sequence.
422
+
423
+ dist = StandardNormal()
424
+ Method = getattr(stats.sampling, method)
425
+ gen = Method(dist)
426
+
427
+ size = (3, 4)
428
+ d = 5
429
+ qrng = stats.qmc.Halton(d, seed=0)
430
+ qrng2 = stats.qmc.Halton(d, seed=0)
431
+
432
+ uniform = qrng2.random(np.prod(size))
433
+
434
+ qrvs = gen.qrvs(size=size, d=d, qmc_engine=qrng)
435
+ qrvs2 = stats.norm.ppf(uniform)
436
+
437
+ for i in range(d):
438
+ sample = qrvs[..., i]
439
+ sample2 = qrvs2[:, i].reshape(size)
440
+ assert_allclose(sample, sample2, atol=1e-12)
441
+
442
+
443
+ class TestTransformedDensityRejection:
444
+ # Simple Custom Distribution
445
+ class dist0:
446
+ def pdf(self, x):
447
+ return 3/4 * (1-x*x)
448
+
449
+ def dpdf(self, x):
450
+ return 3/4 * (-2*x)
451
+
452
+ def cdf(self, x):
453
+ return 3/4 * (x - x**3/3 + 2/3)
454
+
455
+ def support(self):
456
+ return -1, 1
457
+
458
+ # Standard Normal Distribution
459
+ class dist1:
460
+ def pdf(self, x):
461
+ return stats.norm._pdf(x / 0.1)
462
+
463
+ def dpdf(self, x):
464
+ return -x / 0.01 * stats.norm._pdf(x / 0.1)
465
+
466
+ def cdf(self, x):
467
+ return stats.norm._cdf(x / 0.1)
468
+
469
+ # pdf with piecewise linear function as transformed density
470
+ # with T = -1/sqrt with shift. Taken from UNU.RAN test suite
471
+ # (from file t_tdr_ps.c)
472
+ class dist2:
473
+ def __init__(self, shift):
474
+ self.shift = shift
475
+
476
+ def pdf(self, x):
477
+ x -= self.shift
478
+ y = 1. / (abs(x) + 1.)
479
+ return 0.5 * y * y
480
+
481
+ def dpdf(self, x):
482
+ x -= self.shift
483
+ y = 1. / (abs(x) + 1.)
484
+ y = y * y * y
485
+ return y if (x < 0.) else -y
486
+
487
+ def cdf(self, x):
488
+ x -= self.shift
489
+ if x <= 0.:
490
+ return 0.5 / (1. - x)
491
+ else:
492
+ return 1. - 0.5 / (1. + x)
493
+
494
+ dists = [dist0(), dist1(), dist2(0.), dist2(10000.)]
495
+
496
+ # exact mean and variance of the distributions in the list dists
497
+ mv0 = [0., 4./15.]
498
+ mv1 = [0., 0.01]
499
+ mv2 = [0., np.inf]
500
+ mv3 = [10000., np.inf]
501
+ mvs = [mv0, mv1, mv2, mv3]
502
+
503
+ @pytest.mark.parametrize("dist, mv_ex",
504
+ zip(dists, mvs))
505
+ def test_basic(self, dist, mv_ex):
506
+ with suppress_warnings() as sup:
507
+ # filter the warnings thrown by UNU.RAN
508
+ sup.filter(RuntimeWarning)
509
+ rng = TransformedDensityRejection(dist, random_state=42)
510
+ check_cont_samples(rng, dist, mv_ex)
511
+
512
+ # PDF 0 everywhere => bad construction points
513
+ bad_pdfs = [(lambda x: 0, UNURANError, r"50 : bad construction points.")]
514
+ bad_pdfs += bad_pdfs_common # type: ignore[arg-type]
515
+
516
+ @pytest.mark.parametrize("pdf, err, msg", bad_pdfs)
517
+ def test_bad_pdf(self, pdf, err, msg):
518
+ class dist:
519
+ pass
520
+ dist.pdf = pdf
521
+ dist.dpdf = lambda x: 1 # an arbitrary dPDF
522
+ with pytest.raises(err, match=msg):
523
+ TransformedDensityRejection(dist)
524
+
525
+ @pytest.mark.parametrize("dpdf, err, msg", bad_dpdf_common)
526
+ def test_bad_dpdf(self, dpdf, err, msg):
527
+ class dist:
528
+ pass
529
+ dist.pdf = lambda x: x
530
+ dist.dpdf = dpdf
531
+ with pytest.raises(err, match=msg):
532
+ TransformedDensityRejection(dist, domain=(1, 10))
533
+
534
+ # test domains with inf + nan in them. need to write a custom test for
535
+ # this because not all methods support infinite tails.
536
+ @pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
537
+ def test_inf_nan_domains(self, domain, err, msg):
538
+ with pytest.raises(err, match=msg):
539
+ TransformedDensityRejection(StandardNormal(), domain=domain)
540
+
541
+ @pytest.mark.parametrize("construction_points", [-1, 0, 0.1])
542
+ def test_bad_construction_points_scalar(self, construction_points):
543
+ with pytest.raises(ValueError, match=r"`construction_points` must be "
544
+ r"a positive integer."):
545
+ TransformedDensityRejection(
546
+ StandardNormal(), construction_points=construction_points
547
+ )
548
+
549
+ def test_bad_construction_points_array(self):
550
+ # empty array
551
+ construction_points = []
552
+ with pytest.raises(ValueError, match=r"`construction_points` must "
553
+ r"either be a "
554
+ r"scalar or a non-empty array."):
555
+ TransformedDensityRejection(
556
+ StandardNormal(), construction_points=construction_points
557
+ )
558
+
559
+ # construction_points not monotonically increasing
560
+ construction_points = [1, 1, 1, 1, 1, 1]
561
+ with pytest.warns(RuntimeWarning, match=r"33 : starting points not "
562
+ r"strictly monotonically "
563
+ r"increasing"):
564
+ TransformedDensityRejection(
565
+ StandardNormal(), construction_points=construction_points
566
+ )
567
+
568
+ # construction_points containing nans
569
+ construction_points = [np.nan, np.nan, np.nan]
570
+ with pytest.raises(UNURANError, match=r"50 : bad construction "
571
+ r"points."):
572
+ TransformedDensityRejection(
573
+ StandardNormal(), construction_points=construction_points
574
+ )
575
+
576
+ # construction_points out of domain
577
+ construction_points = [-10, 10]
578
+ with pytest.warns(RuntimeWarning, match=r"50 : starting point out of "
579
+ r"domain"):
580
+ TransformedDensityRejection(
581
+ StandardNormal(), domain=(-3, 3),
582
+ construction_points=construction_points
583
+ )
584
+
585
+ @pytest.mark.parametrize("c", [-1., np.nan, np.inf, 0.1, 1.])
586
+ def test_bad_c(self, c):
587
+ msg = r"`c` must either be -0.5 or 0."
588
+ with pytest.raises(ValueError, match=msg):
589
+ TransformedDensityRejection(StandardNormal(), c=-1.)
590
+
591
+ u = [np.linspace(0, 1, num=1000), [], [[]], [np.nan],
592
+ [-np.inf, np.nan, np.inf], 0,
593
+ [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]]
594
+
595
+ @pytest.mark.parametrize("u", u)
596
+ def test_ppf_hat(self, u):
597
+ # Increase the `max_squeeze_hat_ratio` so the ppf_hat is more
598
+ # accurate.
599
+ rng = TransformedDensityRejection(StandardNormal(),
600
+ max_squeeze_hat_ratio=0.9999)
601
+ # Older versions of NumPy throw RuntimeWarnings for comparisons
602
+ # with nan.
603
+ with suppress_warnings() as sup:
604
+ sup.filter(RuntimeWarning, "invalid value encountered in greater")
605
+ sup.filter(RuntimeWarning, "invalid value encountered in "
606
+ "greater_equal")
607
+ sup.filter(RuntimeWarning, "invalid value encountered in less")
608
+ sup.filter(RuntimeWarning, "invalid value encountered in "
609
+ "less_equal")
610
+ res = rng.ppf_hat(u)
611
+ expected = stats.norm.ppf(u)
612
+ assert_allclose(res, expected, rtol=1e-3, atol=1e-5)
613
+ assert res.shape == expected.shape
614
+
615
+ def test_bad_dist(self):
616
+ # Empty distribution
617
+ class dist:
618
+ ...
619
+
620
+ msg = r"`pdf` required but not found."
621
+ with pytest.raises(ValueError, match=msg):
622
+ TransformedDensityRejection(dist)
623
+
624
+ # dPDF not present in dist
625
+ class dist:
626
+ pdf = lambda x: 1-x*x # noqa: E731
627
+
628
+ msg = r"`dpdf` required but not found."
629
+ with pytest.raises(ValueError, match=msg):
630
+ TransformedDensityRejection(dist)
631
+
632
+
633
+ class TestDiscreteAliasUrn:
634
+ # DAU fails on these probably because of large domains and small
635
+ # computation errors in PMF. Mean/SD match but chi-squared test fails.
636
+ basic_fail_dists = {
637
+ 'nchypergeom_fisher', # numerical errors on tails
638
+ 'nchypergeom_wallenius', # numerical errors on tails
639
+ 'randint' # fails on 32-bit ubuntu
640
+ }
641
+
642
+ @pytest.mark.parametrize("distname, params", distdiscrete)
643
+ def test_basic(self, distname, params):
644
+ if distname in self.basic_fail_dists:
645
+ msg = ("DAU fails on these probably because of large domains "
646
+ "and small computation errors in PMF.")
647
+ pytest.skip(msg)
648
+ if not isinstance(distname, str):
649
+ dist = distname
650
+ else:
651
+ dist = getattr(stats, distname)
652
+ dist = dist(*params)
653
+ domain = dist.support()
654
+ if not np.isfinite(domain[1] - domain[0]):
655
+ # DAU only works with finite domain. So, skip the distributions
656
+ # with infinite tails.
657
+ pytest.skip("DAU only works with a finite domain.")
658
+ k = np.arange(domain[0], domain[1]+1)
659
+ pv = dist.pmf(k)
660
+ mv_ex = dist.stats('mv')
661
+ rng = DiscreteAliasUrn(dist, random_state=42)
662
+ check_discr_samples(rng, pv, mv_ex)
663
+
664
+ # Can't use bad_pmf_common here as we evaluate PMF early on to avoid
665
+ # unhelpful errors from UNU.RAN.
666
+ bad_pmf = [
667
+ # inf returned
668
+ (lambda x: np.inf, ValueError,
669
+ r"must contain only finite / non-nan values"),
670
+ # nan returned
671
+ (lambda x: np.nan, ValueError,
672
+ r"must contain only finite / non-nan values"),
673
+ # all zeros
674
+ (lambda x: 0.0, ValueError,
675
+ r"must contain at least one non-zero value"),
676
+ # Undefined name inside the function
677
+ (lambda x: foo, NameError, # type: ignore[name-defined] # noqa: F821
678
+ r"name 'foo' is not defined"),
679
+ # Returning wrong type.
680
+ (lambda x: [], ValueError,
681
+ r"setting an array element with a sequence."),
682
+ # probabilities < 0
683
+ (lambda x: -x, UNURANError,
684
+ r"50 : probability < 0"),
685
+ # signature of PMF wrong
686
+ (lambda: 1.0, TypeError,
687
+ r"takes 0 positional arguments but 1 was given")
688
+ ]
689
+
690
+ @pytest.mark.parametrize("pmf, err, msg", bad_pmf)
691
+ def test_bad_pmf(self, pmf, err, msg):
692
+ class dist:
693
+ pass
694
+ dist.pmf = pmf
695
+ with pytest.raises(err, match=msg):
696
+ DiscreteAliasUrn(dist, domain=(1, 10))
697
+
698
+ @pytest.mark.parametrize("pv", [[0.18, 0.02, 0.8],
699
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
700
+ def test_sampling_with_pv(self, pv):
701
+ pv = np.asarray(pv, dtype=np.float64)
702
+ rng = DiscreteAliasUrn(pv, random_state=123)
703
+ rng.rvs(100_000)
704
+ pv = pv / pv.sum()
705
+ variates = np.arange(0, len(pv))
706
+ # test if the first few moments match
707
+ m_expected = np.average(variates, weights=pv)
708
+ v_expected = np.average((variates - m_expected) ** 2, weights=pv)
709
+ mv_expected = m_expected, v_expected
710
+ check_discr_samples(rng, pv, mv_expected)
711
+
712
+ @pytest.mark.parametrize("pv, msg", bad_pv_common)
713
+ def test_bad_pv(self, pv, msg):
714
+ with pytest.raises(ValueError, match=msg):
715
+ DiscreteAliasUrn(pv)
716
+
717
+ # DAU doesn't support infinite tails. So, it should throw an error when
718
+ # inf is present in the domain.
719
+ inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf),
720
+ (0, np.inf), (-np.inf, 0)]
721
+
722
+ @pytest.mark.parametrize("domain", inf_domain)
723
+ def test_inf_domain(self, domain):
724
+ with pytest.raises(ValueError, match=r"must be finite"):
725
+ DiscreteAliasUrn(stats.binom(10, 0.2), domain=domain)
726
+
727
+ def test_bad_urn_factor(self):
728
+ with pytest.warns(RuntimeWarning, match=r"relative urn size < 1."):
729
+ DiscreteAliasUrn([0.5, 0.5], urn_factor=-1)
730
+
731
+ def test_bad_args(self):
732
+ msg = (r"`domain` must be provided when the "
733
+ r"probability vector is not available.")
734
+
735
+ class dist:
736
+ def pmf(self, x):
737
+ return x
738
+
739
+ with pytest.raises(ValueError, match=msg):
740
+ DiscreteAliasUrn(dist)
741
+
742
+ def test_gh19359(self):
743
+ pv = special.softmax(np.ones((1533,)))
744
+ rng = DiscreteAliasUrn(pv, random_state=42)
745
+ # check the correctness
746
+ check_discr_samples(rng, pv, (1532 / 2, (1532**2 - 1) / 12),
747
+ rtol=5e-3)
748
+
749
+
750
+ class TestNumericalInversePolynomial:
751
+ # Simple Custom Distribution
752
+ class dist0:
753
+ def pdf(self, x):
754
+ return 3/4 * (1-x*x)
755
+
756
+ def cdf(self, x):
757
+ return 3/4 * (x - x**3/3 + 2/3)
758
+
759
+ def support(self):
760
+ return -1, 1
761
+
762
+ # Standard Normal Distribution
763
+ class dist1:
764
+ def pdf(self, x):
765
+ return stats.norm._pdf(x / 0.1)
766
+
767
+ def cdf(self, x):
768
+ return stats.norm._cdf(x / 0.1)
769
+
770
+ # Sin 2 distribution
771
+ # / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 1
772
+ # f(x) = <
773
+ # \ 0 otherwise
774
+ # Taken from UNU.RAN test suite (from file t_pinv.c)
775
+ class dist2:
776
+ def pdf(self, x):
777
+ return 0.05 + 0.45 * (1 + np.sin(2*np.pi*x))
778
+
779
+ def cdf(self, x):
780
+ return (0.05*(x + 1) +
781
+ 0.9*(1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) /
782
+ (4.*np.pi))
783
+
784
+ def support(self):
785
+ return -1, 1
786
+
787
+ # Sin 10 distribution
788
+ # / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 5
789
+ # f(x) = <
790
+ # \ 0 otherwise
791
+ # Taken from UNU.RAN test suite (from file t_pinv.c)
792
+ class dist3:
793
+ def pdf(self, x):
794
+ return 0.2 * (0.05 + 0.45 * (1 + np.sin(2*np.pi*x)))
795
+
796
+ def cdf(self, x):
797
+ return x/10. + 0.5 + 0.09/(2*np.pi) * (np.cos(10*np.pi) -
798
+ np.cos(2*np.pi*x))
799
+
800
+ def support(self):
801
+ return -5, 5
802
+
803
+ dists = [dist0(), dist1(), dist2(), dist3()]
804
+
805
+ # exact mean and variance of the distributions in the list dists
806
+ mv0 = [0., 4./15.]
807
+ mv1 = [0., 0.01]
808
+ mv2 = [-0.45/np.pi, 2/3*0.5 - 0.45**2/np.pi**2]
809
+ mv3 = [-0.45/np.pi, 0.2 * 250/3 * 0.5 - 0.45**2/np.pi**2]
810
+ mvs = [mv0, mv1, mv2, mv3]
811
+
812
+ @pytest.mark.parametrize("dist, mv_ex",
813
+ zip(dists, mvs))
814
+ def test_basic(self, dist, mv_ex):
815
+ rng = NumericalInversePolynomial(dist, random_state=42)
816
+ check_cont_samples(rng, dist, mv_ex)
817
+
818
+ @pytest.mark.xslow
819
+ @pytest.mark.parametrize("distname, params", distcont)
820
+ def test_basic_all_scipy_dists(self, distname, params):
821
+
822
+ very_slow_dists = ['anglit', 'gausshyper', 'kappa4',
823
+ 'ksone', 'kstwo', 'levy_l',
824
+ 'levy_stable', 'studentized_range',
825
+ 'trapezoid', 'triang', 'vonmises']
826
+ # for these distributions, some assertions fail due to minor
827
+ # numerical differences. They can be avoided either by changing
828
+ # the seed or by increasing the u_resolution.
829
+ fail_dists = ['chi2', 'fatiguelife', 'gibrat',
830
+ 'halfgennorm', 'lognorm', 'ncf',
831
+ 'ncx2', 'pareto', 't']
832
+ # for these distributions, skip the check for agreement between sample
833
+ # moments and true moments. We cannot expect them to pass due to the
834
+ # high variance of sample moments.
835
+ skip_sample_moment_check = ['rel_breitwigner']
836
+
837
+ if distname in very_slow_dists:
838
+ pytest.skip(f"PINV too slow for {distname}")
839
+ if distname in fail_dists:
840
+ pytest.skip(f"PINV fails for {distname}")
841
+ dist = (getattr(stats, distname)
842
+ if isinstance(distname, str)
843
+ else distname)
844
+ dist = dist(*params)
845
+ with suppress_warnings() as sup:
846
+ sup.filter(RuntimeWarning)
847
+ rng = NumericalInversePolynomial(dist, random_state=42)
848
+ if distname in skip_sample_moment_check:
849
+ return
850
+ check_cont_samples(rng, dist, [dist.mean(), dist.var()])
851
+
852
+ @pytest.mark.parametrize("pdf, err, msg", bad_pdfs_common)
853
+ def test_bad_pdf(self, pdf, err, msg):
854
+ class dist:
855
+ pass
856
+ dist.pdf = pdf
857
+ with pytest.raises(err, match=msg):
858
+ NumericalInversePolynomial(dist, domain=[0, 5])
859
+
860
+ @pytest.mark.parametrize("logpdf, err, msg", bad_logpdfs_common)
861
+ def test_bad_logpdf(self, logpdf, err, msg):
862
+ class dist:
863
+ pass
864
+ dist.logpdf = logpdf
865
+ with pytest.raises(err, match=msg):
866
+ NumericalInversePolynomial(dist, domain=[0, 5])
867
+
868
+ # test domains with inf + nan in them. need to write a custom test for
869
+ # this because not all methods support infinite tails.
870
+ @pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
871
+ def test_inf_nan_domains(self, domain, err, msg):
872
+ with pytest.raises(err, match=msg):
873
+ NumericalInversePolynomial(StandardNormal(), domain=domain)
874
+
875
+ u = [
876
+ # test if quantile 0 and 1 return -inf and inf respectively and check
877
+ # the correctness of the PPF for equidistant points between 0 and 1.
878
+ np.linspace(0, 1, num=10000),
879
+ # test the PPF method for empty arrays
880
+ [], [[]],
881
+ # test if nans and infs return nan result.
882
+ [np.nan], [-np.inf, np.nan, np.inf],
883
+ # test if a scalar is returned for a scalar input.
884
+ 0,
885
+ # test for arrays with nans, values greater than 1 and less than 0,
886
+ # and some valid values.
887
+ [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
888
+ ]
889
+
890
+ @pytest.mark.parametrize("u", u)
891
+ def test_ppf(self, u):
892
+ dist = StandardNormal()
893
+ rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
894
+ # Older versions of NumPy throw RuntimeWarnings for comparisons
895
+ # with nan.
896
+ with suppress_warnings() as sup:
897
+ sup.filter(RuntimeWarning, "invalid value encountered in greater")
898
+ sup.filter(RuntimeWarning, "invalid value encountered in "
899
+ "greater_equal")
900
+ sup.filter(RuntimeWarning, "invalid value encountered in less")
901
+ sup.filter(RuntimeWarning, "invalid value encountered in "
902
+ "less_equal")
903
+ res = rng.ppf(u)
904
+ expected = stats.norm.ppf(u)
905
+ assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
906
+ assert res.shape == expected.shape
907
+
908
+ x = [np.linspace(-10, 10, num=10000), [], [[]], [np.nan],
909
+ [-np.inf, np.nan, np.inf], 0,
910
+ [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-np.inf, 3, 4]]]
911
+
912
+ @pytest.mark.parametrize("x", x)
913
+ def test_cdf(self, x):
914
+ dist = StandardNormal()
915
+ rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
916
+ # Older versions of NumPy throw RuntimeWarnings for comparisons
917
+ # with nan.
918
+ with suppress_warnings() as sup:
919
+ sup.filter(RuntimeWarning, "invalid value encountered in greater")
920
+ sup.filter(RuntimeWarning, "invalid value encountered in "
921
+ "greater_equal")
922
+ sup.filter(RuntimeWarning, "invalid value encountered in less")
923
+ sup.filter(RuntimeWarning, "invalid value encountered in "
924
+ "less_equal")
925
+ res = rng.cdf(x)
926
+ expected = stats.norm.cdf(x)
927
+ assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
928
+ assert res.shape == expected.shape
929
+
930
+ @pytest.mark.slow
931
+ def test_u_error(self):
932
+ dist = StandardNormal()
933
+ rng = NumericalInversePolynomial(dist, u_resolution=1e-10)
934
+ max_error, mae = rng.u_error()
935
+ assert max_error < 1e-10
936
+ assert mae <= max_error
937
+ rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
938
+ max_error, mae = rng.u_error()
939
+ assert max_error < 1e-14
940
+ assert mae <= max_error
941
+
942
+ bad_orders = [1, 4.5, 20, np.inf, np.nan]
943
+ bad_u_resolution = [1e-20, 1e-1, np.inf, np.nan]
944
+
945
+ @pytest.mark.parametrize("order", bad_orders)
946
+ def test_bad_orders(self, order):
947
+ dist = StandardNormal()
948
+
949
+ msg = r"`order` must be an integer in the range \[3, 17\]."
950
+ with pytest.raises(ValueError, match=msg):
951
+ NumericalInversePolynomial(dist, order=order)
952
+
953
+ @pytest.mark.parametrize("u_resolution", bad_u_resolution)
954
+ def test_bad_u_resolution(self, u_resolution):
955
+ msg = r"`u_resolution` must be between 1e-15 and 1e-5."
956
+ with pytest.raises(ValueError, match=msg):
957
+ NumericalInversePolynomial(StandardNormal(),
958
+ u_resolution=u_resolution)
959
+
960
+ def test_bad_args(self):
961
+
962
+ class BadDist:
963
+ def cdf(self, x):
964
+ return stats.norm._cdf(x)
965
+
966
+ dist = BadDist()
967
+ msg = r"Either of the methods `pdf` or `logpdf` must be specified"
968
+ with pytest.raises(ValueError, match=msg):
969
+ rng = NumericalInversePolynomial(dist)
970
+
971
+ dist = StandardNormal()
972
+ rng = NumericalInversePolynomial(dist)
973
+ msg = r"`sample_size` must be greater than or equal to 1000."
974
+ with pytest.raises(ValueError, match=msg):
975
+ rng.u_error(10)
976
+
977
+ class Distribution:
978
+ def pdf(self, x):
979
+ return np.exp(-0.5 * x*x)
980
+
981
+ dist = Distribution()
982
+ rng = NumericalInversePolynomial(dist)
983
+ msg = r"Exact CDF required but not found."
984
+ with pytest.raises(ValueError, match=msg):
985
+ rng.u_error()
986
+
987
+ def test_logpdf_pdf_consistency(self):
988
+ # 1. check that PINV works with pdf and logpdf only
989
+ # 2. check that generated ppf is the same (up to a small tolerance)
990
+
991
+ class MyDist:
992
+ pass
993
+
994
+ # create generator from dist with only pdf
995
+ dist_pdf = MyDist()
996
+ dist_pdf.pdf = lambda x: math.exp(-x*x/2)
997
+ rng1 = NumericalInversePolynomial(dist_pdf)
998
+
999
+ # create dist with only logpdf
1000
+ dist_logpdf = MyDist()
1001
+ dist_logpdf.logpdf = lambda x: -x*x/2
1002
+ rng2 = NumericalInversePolynomial(dist_logpdf)
1003
+
1004
+ q = np.linspace(1e-5, 1-1e-5, num=100)
1005
+ assert_allclose(rng1.ppf(q), rng2.ppf(q))
1006
+
1007
+
1008
+ class TestNumericalInverseHermite:
1009
+ # / (1 +sin(2 Pi x))/2 if |x| <= 1
1010
+ # f(x) = <
1011
+ # \ 0 otherwise
1012
+ # Taken from UNU.RAN test suite (from file t_hinv.c)
1013
+ class dist0:
1014
+ def pdf(self, x):
1015
+ return 0.5*(1. + np.sin(2.*np.pi*x))
1016
+
1017
+ def dpdf(self, x):
1018
+ return np.pi*np.cos(2.*np.pi*x)
1019
+
1020
+ def cdf(self, x):
1021
+ return (1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) / (4.*np.pi)
1022
+
1023
+ def support(self):
1024
+ return -1, 1
1025
+
1026
+ # / Max(sin(2 Pi x)),0)Pi/2 if -1 < x <0.5
1027
+ # f(x) = <
1028
+ # \ 0 otherwise
1029
+ # Taken from UNU.RAN test suite (from file t_hinv.c)
1030
+ class dist1:
1031
+ def pdf(self, x):
1032
+ if (x <= -0.5):
1033
+ return np.sin((2. * np.pi) * x) * 0.5 * np.pi
1034
+ if (x < 0.):
1035
+ return 0.
1036
+ if (x <= 0.5):
1037
+ return np.sin((2. * np.pi) * x) * 0.5 * np.pi
1038
+
1039
+ def dpdf(self, x):
1040
+ if (x <= -0.5):
1041
+ return np.cos((2. * np.pi) * x) * np.pi * np.pi
1042
+ if (x < 0.):
1043
+ return 0.
1044
+ if (x <= 0.5):
1045
+ return np.cos((2. * np.pi) * x) * np.pi * np.pi
1046
+
1047
+ def cdf(self, x):
1048
+ if (x <= -0.5):
1049
+ return 0.25 * (1 - np.cos((2. * np.pi) * x))
1050
+ if (x < 0.):
1051
+ return 0.5
1052
+ if (x <= 0.5):
1053
+ return 0.75 - 0.25 * np.cos((2. * np.pi) * x)
1054
+
1055
+ def support(self):
1056
+ return -1, 0.5
1057
+
1058
+ dists = [dist0(), dist1()]
1059
+
1060
+ # exact mean and variance of the distributions in the list dists
1061
+ mv0 = [-1/(2*np.pi), 1/3 - 1/(4*np.pi*np.pi)]
1062
+ mv1 = [-1/4, 3/8-1/(2*np.pi*np.pi) - 1/16]
1063
+ mvs = [mv0, mv1]
1064
+
1065
+ @pytest.mark.parametrize("dist, mv_ex",
1066
+ zip(dists, mvs))
1067
+ @pytest.mark.parametrize("order", [3, 5])
1068
+ def test_basic(self, dist, mv_ex, order):
1069
+ rng = NumericalInverseHermite(dist, order=order, random_state=42)
1070
+ check_cont_samples(rng, dist, mv_ex)
1071
+
1072
+ # test domains with inf + nan in them. need to write a custom test for
1073
+ # this because not all methods support infinite tails.
1074
+ @pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
1075
+ def test_inf_nan_domains(self, domain, err, msg):
1076
+ with pytest.raises(err, match=msg):
1077
+ NumericalInverseHermite(StandardNormal(), domain=domain)
1078
+
1079
+ def basic_test_all_scipy_dists(self, distname, shapes):
1080
+ slow_dists = {'ksone', 'kstwo', 'levy_stable', 'skewnorm'}
1081
+ fail_dists = {'beta', 'gausshyper', 'geninvgauss', 'ncf', 'nct',
1082
+ 'norminvgauss', 'genhyperbolic', 'studentized_range',
1083
+ 'vonmises', 'kappa4', 'invgauss', 'wald'}
1084
+
1085
+ if distname in slow_dists:
1086
+ pytest.skip("Distribution is too slow")
1087
+ if distname in fail_dists:
1088
+ # specific reasons documented in gh-13319
1089
+ # https://github.com/scipy/scipy/pull/13319#discussion_r626188955
1090
+ pytest.xfail("Fails - usually due to inaccurate CDF/PDF")
1091
+
1092
+ np.random.seed(0)
1093
+
1094
+ dist = getattr(stats, distname)(*shapes)
1095
+ fni = NumericalInverseHermite(dist)
1096
+
1097
+ x = np.random.rand(10)
1098
+ p_tol = np.max(np.abs(dist.ppf(x)-fni.ppf(x))/np.abs(dist.ppf(x)))
1099
+ u_tol = np.max(np.abs(dist.cdf(fni.ppf(x)) - x))
1100
+
1101
+ assert p_tol < 1e-8
1102
+ assert u_tol < 1e-12
1103
+
1104
+ @pytest.mark.filterwarnings('ignore::RuntimeWarning')
1105
+ @pytest.mark.xslow
1106
+ @pytest.mark.parametrize(("distname", "shapes"), distcont)
1107
+ def test_basic_all_scipy_dists(self, distname, shapes):
1108
+ # if distname == "truncnorm":
1109
+ # pytest.skip("Tested separately")
1110
+ self.basic_test_all_scipy_dists(distname, shapes)
1111
+
1112
+ @pytest.mark.filterwarnings('ignore::RuntimeWarning')
1113
+ def test_basic_truncnorm_gh17155(self):
1114
+ self.basic_test_all_scipy_dists("truncnorm", (0.1, 2))
1115
+
1116
+ def test_input_validation(self):
1117
+ match = r"`order` must be either 1, 3, or 5."
1118
+ with pytest.raises(ValueError, match=match):
1119
+ NumericalInverseHermite(StandardNormal(), order=2)
1120
+
1121
+ match = "`cdf` required but not found"
1122
+ with pytest.raises(ValueError, match=match):
1123
+ NumericalInverseHermite("norm")
1124
+
1125
+ match = "could not convert string to float"
1126
+ with pytest.raises(ValueError, match=match):
1127
+ NumericalInverseHermite(StandardNormal(),
1128
+ u_resolution='ekki')
1129
+
1130
+ rngs = [None, 0, np.random.RandomState(0)]
1131
+ rngs.append(np.random.default_rng(0)) # type: ignore
1132
+ sizes = [(None, tuple()), (8, (8,)), ((4, 5, 6), (4, 5, 6))]
1133
+
1134
+ @pytest.mark.parametrize('rng', rngs)
1135
+ @pytest.mark.parametrize('size_in, size_out', sizes)
1136
+ def test_RVS(self, rng, size_in, size_out):
1137
+ dist = StandardNormal()
1138
+ fni = NumericalInverseHermite(dist)
1139
+
1140
+ rng2 = deepcopy(rng)
1141
+ rvs = fni.rvs(size=size_in, random_state=rng)
1142
+ if size_in is not None:
1143
+ assert rvs.shape == size_out
1144
+
1145
+ if rng2 is not None:
1146
+ rng2 = check_random_state(rng2)
1147
+ uniform = rng2.uniform(size=size_in)
1148
+ rvs2 = stats.norm.ppf(uniform)
1149
+ assert_allclose(rvs, rvs2)
1150
+
1151
+ def test_inaccurate_CDF(self):
1152
+ # CDF function with inaccurate tail cannot be inverted; see gh-13319
1153
+ # https://github.com/scipy/scipy/pull/13319#discussion_r626188955
1154
+ shapes = (2.3098496451481823, 0.6268795430096368)
1155
+ match = ("98 : one or more intervals very short; possibly due to "
1156
+ "numerical problems with a pole or very flat tail")
1157
+
1158
+ # fails with default tol
1159
+ with pytest.warns(RuntimeWarning, match=match):
1160
+ NumericalInverseHermite(stats.beta(*shapes))
1161
+
1162
+ # no error with coarser tol
1163
+ NumericalInverseHermite(stats.beta(*shapes), u_resolution=1e-8)
1164
+
1165
+ def test_custom_distribution(self):
1166
+ dist1 = StandardNormal()
1167
+ fni1 = NumericalInverseHermite(dist1)
1168
+
1169
+ dist2 = stats.norm()
1170
+ fni2 = NumericalInverseHermite(dist2)
1171
+
1172
+ assert_allclose(fni1.rvs(random_state=0), fni2.rvs(random_state=0))
1173
+
1174
+ u = [
1175
+ # check the correctness of the PPF for equidistant points between
1176
+ # 0.02 and 0.98.
1177
+ np.linspace(0., 1., num=10000),
1178
+ # test the PPF method for empty arrays
1179
+ [], [[]],
1180
+ # test if nans and infs return nan result.
1181
+ [np.nan], [-np.inf, np.nan, np.inf],
1182
+ # test if a scalar is returned for a scalar input.
1183
+ 0,
1184
+ # test for arrays with nans, values greater than 1 and less than 0,
1185
+ # and some valid values.
1186
+ [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
1187
+ ]
1188
+
1189
+ @pytest.mark.parametrize("u", u)
1190
+ def test_ppf(self, u):
1191
+ dist = StandardNormal()
1192
+ rng = NumericalInverseHermite(dist, u_resolution=1e-12)
1193
+ # Older versions of NumPy throw RuntimeWarnings for comparisons
1194
+ # with nan.
1195
+ with suppress_warnings() as sup:
1196
+ sup.filter(RuntimeWarning, "invalid value encountered in greater")
1197
+ sup.filter(RuntimeWarning, "invalid value encountered in "
1198
+ "greater_equal")
1199
+ sup.filter(RuntimeWarning, "invalid value encountered in less")
1200
+ sup.filter(RuntimeWarning, "invalid value encountered in "
1201
+ "less_equal")
1202
+ res = rng.ppf(u)
1203
+ expected = stats.norm.ppf(u)
1204
+ assert_allclose(res, expected, rtol=1e-9, atol=3e-10)
1205
+ assert res.shape == expected.shape
1206
+
1207
+ @pytest.mark.slow
1208
+ def test_u_error(self):
1209
+ dist = StandardNormal()
1210
+ rng = NumericalInverseHermite(dist, u_resolution=1e-10)
1211
+ max_error, mae = rng.u_error()
1212
+ assert max_error < 1e-10
1213
+ assert mae <= max_error
1214
+ with suppress_warnings() as sup:
1215
+ # ignore warning about u-resolution being too small.
1216
+ sup.filter(RuntimeWarning)
1217
+ rng = NumericalInverseHermite(dist, u_resolution=1e-14)
1218
+ max_error, mae = rng.u_error()
1219
+ assert max_error < 1e-14
1220
+ assert mae <= max_error
1221
+
1222
+
1223
+ class TestDiscreteGuideTable:
1224
+ basic_fail_dists = {
1225
+ 'nchypergeom_fisher', # numerical errors on tails
1226
+ 'nchypergeom_wallenius', # numerical errors on tails
1227
+ 'randint' # fails on 32-bit ubuntu
1228
+ }
1229
+
1230
+ def test_guide_factor_gt3_raises_warning(self):
1231
+ pv = [0.1, 0.3, 0.6]
1232
+ urng = np.random.default_rng()
1233
+ with pytest.warns(RuntimeWarning):
1234
+ DiscreteGuideTable(pv, random_state=urng, guide_factor=7)
1235
+
1236
+ def test_guide_factor_zero_raises_warning(self):
1237
+ pv = [0.1, 0.3, 0.6]
1238
+ urng = np.random.default_rng()
1239
+ with pytest.warns(RuntimeWarning):
1240
+ DiscreteGuideTable(pv, random_state=urng, guide_factor=0)
1241
+
1242
+ def test_negative_guide_factor_raises_warning(self):
1243
+ # This occurs from the UNU.RAN wrapper automatically.
1244
+ # however it already gives a useful warning
1245
+ # Here we just test that a warning is raised.
1246
+ pv = [0.1, 0.3, 0.6]
1247
+ urng = np.random.default_rng()
1248
+ with pytest.warns(RuntimeWarning):
1249
+ DiscreteGuideTable(pv, random_state=urng, guide_factor=-1)
1250
+
1251
+ @pytest.mark.parametrize("distname, params", distdiscrete)
1252
+ def test_basic(self, distname, params):
1253
+ if distname in self.basic_fail_dists:
1254
+ msg = ("DGT fails on these probably because of large domains "
1255
+ "and small computation errors in PMF.")
1256
+ pytest.skip(msg)
1257
+
1258
+ if not isinstance(distname, str):
1259
+ dist = distname
1260
+ else:
1261
+ dist = getattr(stats, distname)
1262
+
1263
+ dist = dist(*params)
1264
+ domain = dist.support()
1265
+
1266
+ if not np.isfinite(domain[1] - domain[0]):
1267
+ # DGT only works with finite domain. So, skip the distributions
1268
+ # with infinite tails.
1269
+ pytest.skip("DGT only works with a finite domain.")
1270
+
1271
+ k = np.arange(domain[0], domain[1]+1)
1272
+ pv = dist.pmf(k)
1273
+ mv_ex = dist.stats('mv')
1274
+ rng = DiscreteGuideTable(dist, random_state=42)
1275
+ check_discr_samples(rng, pv, mv_ex)
1276
+
1277
+ u = [
1278
+ # the correctness of the PPF for equidistant points between 0 and 1.
1279
+ np.linspace(0, 1, num=10000),
1280
+ # test the PPF method for empty arrays
1281
+ [], [[]],
1282
+ # test if nans and infs return nan result.
1283
+ [np.nan], [-np.inf, np.nan, np.inf],
1284
+ # test if a scalar is returned for a scalar input.
1285
+ 0,
1286
+ # test for arrays with nans, values greater than 1 and less than 0,
1287
+ # and some valid values.
1288
+ [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
1289
+ ]
1290
+
1291
+ @pytest.mark.parametrize('u', u)
1292
+ def test_ppf(self, u):
1293
+ n, p = 4, 0.1
1294
+ dist = stats.binom(n, p)
1295
+ rng = DiscreteGuideTable(dist, random_state=42)
1296
+
1297
+ # Older versions of NumPy throw RuntimeWarnings for comparisons
1298
+ # with nan.
1299
+ with suppress_warnings() as sup:
1300
+ sup.filter(RuntimeWarning, "invalid value encountered in greater")
1301
+ sup.filter(RuntimeWarning, "invalid value encountered in "
1302
+ "greater_equal")
1303
+ sup.filter(RuntimeWarning, "invalid value encountered in less")
1304
+ sup.filter(RuntimeWarning, "invalid value encountered in "
1305
+ "less_equal")
1306
+
1307
+ res = rng.ppf(u)
1308
+ expected = stats.binom.ppf(u, n, p)
1309
+ assert_equal(res.shape, expected.shape)
1310
+ assert_equal(res, expected)
1311
+
1312
+ @pytest.mark.parametrize("pv, msg", bad_pv_common)
1313
+ def test_bad_pv(self, pv, msg):
1314
+ with pytest.raises(ValueError, match=msg):
1315
+ DiscreteGuideTable(pv)
1316
+
1317
+ # DGT doesn't support infinite tails. So, it should throw an error when
1318
+ # inf is present in the domain.
1319
+ inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf),
1320
+ (0, np.inf), (-np.inf, 0)]
1321
+
1322
+ @pytest.mark.parametrize("domain", inf_domain)
1323
+ def test_inf_domain(self, domain):
1324
+ with pytest.raises(ValueError, match=r"must be finite"):
1325
+ DiscreteGuideTable(stats.binom(10, 0.2), domain=domain)
1326
+
1327
+
1328
+ class TestSimpleRatioUniforms:
1329
+ # pdf with piecewise linear function as transformed density
1330
+ # with T = -1/sqrt with shift. Taken from UNU.RAN test suite
1331
+ # (from file t_srou.c)
1332
+ class dist:
1333
+ def __init__(self, shift):
1334
+ self.shift = shift
1335
+ self.mode = shift
1336
+
1337
+ def pdf(self, x):
1338
+ x -= self.shift
1339
+ y = 1. / (abs(x) + 1.)
1340
+ return 0.5 * y * y
1341
+
1342
+ def cdf(self, x):
1343
+ x -= self.shift
1344
+ if x <= 0.:
1345
+ return 0.5 / (1. - x)
1346
+ else:
1347
+ return 1. - 0.5 / (1. + x)
1348
+
1349
+ dists = [dist(0.), dist(10000.)]
1350
+
1351
+ # exact mean and variance of the distributions in the list dists
1352
+ mv1 = [0., np.inf]
1353
+ mv2 = [10000., np.inf]
1354
+ mvs = [mv1, mv2]
1355
+
1356
+ @pytest.mark.parametrize("dist, mv_ex",
1357
+ zip(dists, mvs))
1358
+ def test_basic(self, dist, mv_ex):
1359
+ rng = SimpleRatioUniforms(dist, mode=dist.mode, random_state=42)
1360
+ check_cont_samples(rng, dist, mv_ex)
1361
+ rng = SimpleRatioUniforms(dist, mode=dist.mode,
1362
+ cdf_at_mode=dist.cdf(dist.mode),
1363
+ random_state=42)
1364
+ check_cont_samples(rng, dist, mv_ex)
1365
+
1366
+ # test domains with inf + nan in them. need to write a custom test for
1367
+ # this because not all methods support infinite tails.
1368
+ @pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
1369
+ def test_inf_nan_domains(self, domain, err, msg):
1370
+ with pytest.raises(err, match=msg):
1371
+ SimpleRatioUniforms(StandardNormal(), domain=domain)
1372
+
1373
+ def test_bad_args(self):
1374
+ # pdf_area < 0
1375
+ with pytest.raises(ValueError, match=r"`pdf_area` must be > 0"):
1376
+ SimpleRatioUniforms(StandardNormal(), mode=0, pdf_area=-1)
1377
+
1378
+
1379
+ class TestRatioUniforms:
1380
+ """ Tests for rvs_ratio_uniforms.
1381
+ """
1382
+
1383
+ def test_rv_generation(self):
1384
+ # use KS test to check distribution of rvs
1385
+ # normal distribution
1386
+ f = stats.norm.pdf
1387
+ v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
1388
+ u = np.sqrt(f(0))
1389
+ gen = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=12345)
1390
+ assert_equal(stats.kstest(gen.rvs(2500), 'norm')[1] > 0.25, True)
1391
+
1392
+ # exponential distribution
1393
+ gen = RatioUniforms(lambda x: np.exp(-x), umax=1,
1394
+ vmin=0, vmax=2*np.exp(-1), random_state=12345)
1395
+ assert_equal(stats.kstest(gen.rvs(1000), 'expon')[1] > 0.25, True)
1396
+
1397
+ def test_shape(self):
1398
+ # test shape of return value depending on size parameter
1399
+ f = stats.norm.pdf
1400
+ v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
1401
+ u = np.sqrt(f(0))
1402
+
1403
+ gen1 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
1404
+ gen2 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
1405
+ gen3 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
1406
+ r1, r2, r3 = gen1.rvs(3), gen2.rvs((3,)), gen3.rvs((3, 1))
1407
+ assert_equal(r1, r2)
1408
+ assert_equal(r2, r3.flatten())
1409
+ assert_equal(r1.shape, (3,))
1410
+ assert_equal(r3.shape, (3, 1))
1411
+
1412
+ gen4 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=12)
1413
+ gen5 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=12)
1414
+ r4, r5 = gen4.rvs(size=(3, 3, 3)), gen5.rvs(size=27)
1415
+ assert_equal(r4.flatten(), r5)
1416
+ assert_equal(r4.shape, (3, 3, 3))
1417
+
1418
+ gen6 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
1419
+ gen7 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
1420
+ gen8 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234)
1421
+ r6, r7, r8 = gen6.rvs(), gen7.rvs(1), gen8.rvs((1,))
1422
+ assert_equal(r6, r7)
1423
+ assert_equal(r7, r8)
1424
+
1425
+ def test_random_state(self):
1426
+ f = stats.norm.pdf
1427
+ v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
1428
+ umax = np.sqrt(f(0))
1429
+ gen1 = RatioUniforms(f, umax=umax, vmin=-v, vmax=v, random_state=1234)
1430
+ r1 = gen1.rvs(10)
1431
+ np.random.seed(1234)
1432
+ gen2 = RatioUniforms(f, umax=umax, vmin=-v, vmax=v)
1433
+ r2 = gen2.rvs(10)
1434
+ assert_equal(r1, r2)
1435
+
1436
+ def test_exceptions(self):
1437
+ f = stats.norm.pdf
1438
+ # need vmin < vmax
1439
+ with assert_raises(ValueError, match="vmin must be smaller than vmax"):
1440
+ RatioUniforms(pdf=f, umax=1, vmin=3, vmax=1)
1441
+ with assert_raises(ValueError, match="vmin must be smaller than vmax"):
1442
+ RatioUniforms(pdf=f, umax=1, vmin=1, vmax=1)
1443
+ # need umax > 0
1444
+ with assert_raises(ValueError, match="umax must be positive"):
1445
+ RatioUniforms(pdf=f, umax=-1, vmin=1, vmax=3)
1446
+ with assert_raises(ValueError, match="umax must be positive"):
1447
+ RatioUniforms(pdf=f, umax=0, vmin=1, vmax=3)
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_sensitivity_analysis.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_allclose, assert_array_less
3
+ import pytest
4
+
5
+ from scipy import stats
6
+ from scipy.stats import sobol_indices
7
+ from scipy.stats._resampling import BootstrapResult
8
+ from scipy.stats._sensitivity_analysis import (
9
+ BootstrapSobolResult, f_ishigami, sample_AB, sample_A_B
10
+ )
11
+
12
+
13
+ @pytest.fixture(scope='session')
14
+ def ishigami_ref_indices():
15
+ """Reference values for Ishigami from Saltelli2007.
16
+
17
+ Chapter 4, exercise 5 pages 179-182.
18
+ """
19
+ a = 7.
20
+ b = 0.1
21
+
22
+ var = 0.5 + a**2/8 + b*np.pi**4/5 + b**2*np.pi**8/18
23
+ v1 = 0.5 + b*np.pi**4/5 + b**2*np.pi**8/50
24
+ v2 = a**2/8
25
+ v3 = 0
26
+ v12 = 0
27
+ # v13: mistake in the book, see other derivations e.g. in 10.1002/nme.4856
28
+ v13 = b**2*np.pi**8*8/225
29
+ v23 = 0
30
+
31
+ s_first = np.array([v1, v2, v3])/var
32
+ s_second = np.array([
33
+ [0., 0., v13],
34
+ [v12, 0., v23],
35
+ [v13, v23, 0.]
36
+ ])/var
37
+ s_total = s_first + s_second.sum(axis=1)
38
+
39
+ return s_first, s_total
40
+
41
+
42
+ def f_ishigami_vec(x):
43
+ """Output of shape (2, n)."""
44
+ res = f_ishigami(x)
45
+ return res, res
46
+
47
+
48
+ class TestSobolIndices:
49
+
50
+ dists = [
51
+ stats.uniform(loc=-np.pi, scale=2*np.pi) # type: ignore[attr-defined]
52
+ ] * 3
53
+
54
+ def test_sample_AB(self):
55
+ # (d, n)
56
+ A = np.array(
57
+ [[1, 4, 7, 10],
58
+ [2, 5, 8, 11],
59
+ [3, 6, 9, 12]]
60
+ )
61
+ B = A + 100
62
+ # (d, d, n)
63
+ ref = np.array(
64
+ [[[101, 104, 107, 110],
65
+ [2, 5, 8, 11],
66
+ [3, 6, 9, 12]],
67
+ [[1, 4, 7, 10],
68
+ [102, 105, 108, 111],
69
+ [3, 6, 9, 12]],
70
+ [[1, 4, 7, 10],
71
+ [2, 5, 8, 11],
72
+ [103, 106, 109, 112]]]
73
+ )
74
+ AB = sample_AB(A=A, B=B)
75
+ assert_allclose(AB, ref)
76
+
77
+ @pytest.mark.xslow
78
+ @pytest.mark.xfail_on_32bit("Can't create large array for test")
79
+ @pytest.mark.parametrize(
80
+ 'func',
81
+ [f_ishigami, pytest.param(f_ishigami_vec, marks=pytest.mark.slow)],
82
+ ids=['scalar', 'vector']
83
+ )
84
+ def test_ishigami(self, ishigami_ref_indices, func):
85
+ rng = np.random.default_rng(28631265345463262246170309650372465332)
86
+ res = sobol_indices(
87
+ func=func, n=4096,
88
+ dists=self.dists,
89
+ random_state=rng
90
+ )
91
+
92
+ if func.__name__ == 'f_ishigami_vec':
93
+ ishigami_ref_indices = [
94
+ [ishigami_ref_indices[0], ishigami_ref_indices[0]],
95
+ [ishigami_ref_indices[1], ishigami_ref_indices[1]]
96
+ ]
97
+
98
+ assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
99
+ assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-2)
100
+
101
+ assert res._bootstrap_result is None
102
+ bootstrap_res = res.bootstrap(n_resamples=99)
103
+ assert isinstance(bootstrap_res, BootstrapSobolResult)
104
+ assert isinstance(res._bootstrap_result, BootstrapResult)
105
+
106
+ assert res._bootstrap_result.confidence_interval.low.shape[0] == 2
107
+ assert res._bootstrap_result.confidence_interval.low[1].shape \
108
+ == res.first_order.shape
109
+
110
+ assert bootstrap_res.first_order.confidence_interval.low.shape \
111
+ == res.first_order.shape
112
+ assert bootstrap_res.total_order.confidence_interval.low.shape \
113
+ == res.total_order.shape
114
+
115
+ assert_array_less(
116
+ bootstrap_res.first_order.confidence_interval.low, res.first_order
117
+ )
118
+ assert_array_less(
119
+ res.first_order, bootstrap_res.first_order.confidence_interval.high
120
+ )
121
+ assert_array_less(
122
+ bootstrap_res.total_order.confidence_interval.low, res.total_order
123
+ )
124
+ assert_array_less(
125
+ res.total_order, bootstrap_res.total_order.confidence_interval.high
126
+ )
127
+
128
+ # call again to use previous results and change a param
129
+ assert isinstance(
130
+ res.bootstrap(confidence_level=0.9, n_resamples=99),
131
+ BootstrapSobolResult
132
+ )
133
+ assert isinstance(res._bootstrap_result, BootstrapResult)
134
+
135
+ def test_func_dict(self, ishigami_ref_indices):
136
+ rng = np.random.default_rng(28631265345463262246170309650372465332)
137
+ n = 4096
138
+ dists = [
139
+ stats.uniform(loc=-np.pi, scale=2*np.pi),
140
+ stats.uniform(loc=-np.pi, scale=2*np.pi),
141
+ stats.uniform(loc=-np.pi, scale=2*np.pi)
142
+ ]
143
+
144
+ A, B = sample_A_B(n=n, dists=dists, random_state=rng)
145
+ AB = sample_AB(A=A, B=B)
146
+
147
+ func = {
148
+ 'f_A': f_ishigami(A).reshape(1, -1),
149
+ 'f_B': f_ishigami(B).reshape(1, -1),
150
+ 'f_AB': f_ishigami(AB).reshape((3, 1, -1))
151
+ }
152
+
153
+ res = sobol_indices(
154
+ func=func, n=n,
155
+ dists=dists,
156
+ random_state=rng
157
+ )
158
+ assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
159
+
160
+ res = sobol_indices(
161
+ func=func, n=n,
162
+ random_state=rng
163
+ )
164
+ assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
165
+
166
+ def test_method(self, ishigami_ref_indices):
167
+ def jansen_sobol(f_A, f_B, f_AB):
168
+ """Jansen for S and Sobol' for St.
169
+
170
+ From Saltelli2010, table 2 formulations (c) and (e)."""
171
+ var = np.var([f_A, f_B], axis=(0, -1))
172
+
173
+ s = (var - 0.5*np.mean((f_B - f_AB)**2, axis=-1)) / var
174
+ st = np.mean(f_A*(f_A - f_AB), axis=-1) / var
175
+
176
+ return s.T, st.T
177
+
178
+ rng = np.random.default_rng(28631265345463262246170309650372465332)
179
+ res = sobol_indices(
180
+ func=f_ishigami, n=4096,
181
+ dists=self.dists,
182
+ method=jansen_sobol,
183
+ random_state=rng
184
+ )
185
+
186
+ assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
187
+ assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-2)
188
+
189
+ def jansen_sobol_typed(
190
+ f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray
191
+ ) -> tuple[np.ndarray, np.ndarray]:
192
+ return jansen_sobol(f_A, f_B, f_AB)
193
+
194
+ _ = sobol_indices(
195
+ func=f_ishigami, n=8,
196
+ dists=self.dists,
197
+ method=jansen_sobol_typed,
198
+ random_state=rng
199
+ )
200
+
201
+ def test_normalization(self, ishigami_ref_indices):
202
+ rng = np.random.default_rng(28631265345463262246170309650372465332)
203
+ res = sobol_indices(
204
+ func=lambda x: f_ishigami(x) + 1000, n=4096,
205
+ dists=self.dists,
206
+ random_state=rng
207
+ )
208
+
209
+ assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
210
+ assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-2)
211
+
212
+ def test_constant_function(self, ishigami_ref_indices):
213
+
214
+ def f_ishigami_vec_const(x):
215
+ """Output of shape (3, n)."""
216
+ res = f_ishigami(x)
217
+ return res, res * 0 + 10, res
218
+
219
+ rng = np.random.default_rng(28631265345463262246170309650372465332)
220
+ res = sobol_indices(
221
+ func=f_ishigami_vec_const, n=4096,
222
+ dists=self.dists,
223
+ random_state=rng
224
+ )
225
+
226
+ ishigami_vec_indices = [
227
+ [ishigami_ref_indices[0], [0, 0, 0], ishigami_ref_indices[0]],
228
+ [ishigami_ref_indices[1], [0, 0, 0], ishigami_ref_indices[1]]
229
+ ]
230
+
231
+ assert_allclose(res.first_order, ishigami_vec_indices[0], atol=1e-2)
232
+ assert_allclose(res.total_order, ishigami_vec_indices[1], atol=1e-2)
233
+
234
+ @pytest.mark.xfail_on_32bit("Can't create large array for test")
235
+ def test_more_converged(self, ishigami_ref_indices):
236
+ rng = np.random.default_rng(28631265345463262246170309650372465332)
237
+ res = sobol_indices(
238
+ func=f_ishigami, n=2**19, # 524288
239
+ dists=self.dists,
240
+ random_state=rng
241
+ )
242
+
243
+ assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-4)
244
+ assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-4)
245
+
246
+ def test_raises(self):
247
+
248
+ message = r"Each distribution in `dists` must have method `ppf`"
249
+ with pytest.raises(ValueError, match=message):
250
+ sobol_indices(n=0, func=f_ishigami, dists="uniform")
251
+
252
+ with pytest.raises(ValueError, match=message):
253
+ sobol_indices(n=0, func=f_ishigami, dists=[lambda x: x])
254
+
255
+ message = r"The balance properties of Sobol'"
256
+ with pytest.raises(ValueError, match=message):
257
+ sobol_indices(n=7, func=f_ishigami, dists=[stats.uniform()])
258
+
259
+ with pytest.raises(ValueError, match=message):
260
+ sobol_indices(n=4.1, func=f_ishigami, dists=[stats.uniform()])
261
+
262
+ message = r"'toto' is not a valid 'method'"
263
+ with pytest.raises(ValueError, match=message):
264
+ sobol_indices(n=0, func=f_ishigami, method='toto')
265
+
266
+ message = r"must have the following signature"
267
+ with pytest.raises(ValueError, match=message):
268
+ sobol_indices(n=0, func=f_ishigami, method=lambda x: x)
269
+
270
+ message = r"'dists' must be defined when 'func' is a callable"
271
+ with pytest.raises(ValueError, match=message):
272
+ sobol_indices(n=0, func=f_ishigami)
273
+
274
+ def func_wrong_shape_output(x):
275
+ return x.reshape(-1, 1)
276
+
277
+ message = r"'func' output should have a shape"
278
+ with pytest.raises(ValueError, match=message):
279
+ sobol_indices(
280
+ n=2, func=func_wrong_shape_output, dists=[stats.uniform()]
281
+ )
282
+
283
+ message = r"When 'func' is a dictionary"
284
+ with pytest.raises(ValueError, match=message):
285
+ sobol_indices(
286
+ n=2, func={'f_A': [], 'f_AB': []}, dists=[stats.uniform()]
287
+ )
288
+
289
+ with pytest.raises(ValueError, match=message):
290
+ # f_B malformed
291
+ sobol_indices(
292
+ n=2,
293
+ func={'f_A': [1, 2], 'f_B': [3], 'f_AB': [5, 6, 7, 8]},
294
+ )
295
+
296
+ with pytest.raises(ValueError, match=message):
297
+ # f_AB malformed
298
+ sobol_indices(
299
+ n=2,
300
+ func={'f_A': [1, 2], 'f_B': [3, 4], 'f_AB': [5, 6, 7]},
301
+ )
parrot/lib/python3.10/site-packages/scipy/stats/tests/test_survival.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import numpy as np
3
+ from numpy.testing import assert_equal, assert_allclose
4
+ from scipy import stats
5
+ from scipy.stats import _survival
6
+
7
+
8
+ def _kaplan_meier_reference(times, censored):
9
+ # This is a very straightforward implementation of the Kaplan-Meier
10
+ # estimator that does almost everything differently from the implementation
11
+ # in stats.ecdf.
12
+
13
+ # Begin by sorting the raw data. Note that the order of death and loss
14
+ # at a given time matters: death happens first. See [2] page 461:
15
+ # "These conventions may be paraphrased by saying that deaths recorded as
16
+ # of an age t are treated as if they occurred slightly before t, and losses
17
+ # recorded as of an age t are treated as occurring slightly after t."
18
+ # We implement this by sorting the data first by time, then by `censored`,
19
+ # (which is 0 when there is a death and 1 when there is only a loss).
20
+ dtype = [('time', float), ('censored', int)]
21
+ data = np.array([(t, d) for t, d in zip(times, censored)], dtype=dtype)
22
+ data = np.sort(data, order=('time', 'censored'))
23
+ times = data['time']
24
+ died = np.logical_not(data['censored'])
25
+
26
+ m = times.size
27
+ n = np.arange(m, 0, -1) # number at risk
28
+ sf = np.cumprod((n - died) / n)
29
+
30
+ # Find the indices of the *last* occurrence of unique times. The
31
+ # corresponding entries of `times` and `sf` are what we want.
32
+ _, indices = np.unique(times[::-1], return_index=True)
33
+ ref_times = times[-indices - 1]
34
+ ref_sf = sf[-indices - 1]
35
+ return ref_times, ref_sf
36
+
37
+
38
+ class TestSurvival:
39
+
40
+ @staticmethod
41
+ def get_random_sample(rng, n_unique):
42
+ # generate random sample
43
+ unique_times = rng.random(n_unique)
44
+ # convert to `np.int32` to resolve `np.repeat` failure in 32-bit CI
45
+ repeats = rng.integers(1, 4, n_unique).astype(np.int32)
46
+ times = rng.permuted(np.repeat(unique_times, repeats))
47
+ censored = rng.random(size=times.size) > rng.random()
48
+ sample = stats.CensoredData.right_censored(times, censored)
49
+ return sample, times, censored
50
+
51
+ def test_input_validation(self):
52
+ message = '`sample` must be a one-dimensional sequence.'
53
+ with pytest.raises(ValueError, match=message):
54
+ stats.ecdf([[1]])
55
+ with pytest.raises(ValueError, match=message):
56
+ stats.ecdf(1)
57
+
58
+ message = '`sample` must not contain nan'
59
+ with pytest.raises(ValueError, match=message):
60
+ stats.ecdf([np.nan])
61
+
62
+ message = 'Currently, only uncensored and right-censored data...'
63
+ with pytest.raises(NotImplementedError, match=message):
64
+ stats.ecdf(stats.CensoredData.left_censored([1], censored=[True]))
65
+
66
+ message = 'method` must be one of...'
67
+ res = stats.ecdf([1, 2, 3])
68
+ with pytest.raises(ValueError, match=message):
69
+ res.cdf.confidence_interval(method='ekki-ekki')
70
+ with pytest.raises(ValueError, match=message):
71
+ res.sf.confidence_interval(method='shrubbery')
72
+
73
+ message = 'confidence_level` must be a scalar between 0 and 1'
74
+ with pytest.raises(ValueError, match=message):
75
+ res.cdf.confidence_interval(-1)
76
+ with pytest.raises(ValueError, match=message):
77
+ res.sf.confidence_interval([0.5, 0.6])
78
+
79
+ message = 'The confidence interval is undefined at some observations.'
80
+ with pytest.warns(RuntimeWarning, match=message):
81
+ ci = res.cdf.confidence_interval()
82
+
83
+ message = 'Confidence interval bounds do not implement...'
84
+ with pytest.raises(NotImplementedError, match=message):
85
+ ci.low.confidence_interval()
86
+ with pytest.raises(NotImplementedError, match=message):
87
+ ci.high.confidence_interval()
88
+
89
+ def test_edge_cases(self):
90
+ res = stats.ecdf([])
91
+ assert_equal(res.cdf.quantiles, [])
92
+ assert_equal(res.cdf.probabilities, [])
93
+
94
+ res = stats.ecdf([1])
95
+ assert_equal(res.cdf.quantiles, [1])
96
+ assert_equal(res.cdf.probabilities, [1])
97
+
98
+ def test_unique(self):
99
+ # Example with unique observations; `stats.ecdf` ref. [1] page 80
100
+ sample = [6.23, 5.58, 7.06, 6.42, 5.20]
101
+ res = stats.ecdf(sample)
102
+ ref_x = np.sort(np.unique(sample))
103
+ ref_cdf = np.arange(1, 6) / 5
104
+ ref_sf = 1 - ref_cdf
105
+ assert_equal(res.cdf.quantiles, ref_x)
106
+ assert_equal(res.cdf.probabilities, ref_cdf)
107
+ assert_equal(res.sf.quantiles, ref_x)
108
+ assert_equal(res.sf.probabilities, ref_sf)
109
+
110
+ def test_nonunique(self):
111
+ # Example with non-unique observations; `stats.ecdf` ref. [1] page 82
112
+ sample = [0, 2, 1, 2, 3, 4]
113
+ res = stats.ecdf(sample)
114
+ ref_x = np.sort(np.unique(sample))
115
+ ref_cdf = np.array([1/6, 2/6, 4/6, 5/6, 1])
116
+ ref_sf = 1 - ref_cdf
117
+ assert_equal(res.cdf.quantiles, ref_x)
118
+ assert_equal(res.cdf.probabilities, ref_cdf)
119
+ assert_equal(res.sf.quantiles, ref_x)
120
+ assert_equal(res.sf.probabilities, ref_sf)
121
+
122
+ def test_evaluate_methods(self):
123
+ # Test CDF and SF `evaluate` methods
124
+ rng = np.random.default_rng(1162729143302572461)
125
+ sample, _, _ = self.get_random_sample(rng, 15)
126
+ res = stats.ecdf(sample)
127
+ x = res.cdf.quantiles
128
+ xr = x + np.diff(x, append=x[-1]+1)/2 # right shifted points
129
+
130
+ assert_equal(res.cdf.evaluate(x), res.cdf.probabilities)
131
+ assert_equal(res.cdf.evaluate(xr), res.cdf.probabilities)
132
+ assert_equal(res.cdf.evaluate(x[0]-1), 0) # CDF starts at 0
133
+ assert_equal(res.cdf.evaluate([-np.inf, np.inf]), [0, 1])
134
+
135
+ assert_equal(res.sf.evaluate(x), res.sf.probabilities)
136
+ assert_equal(res.sf.evaluate(xr), res.sf.probabilities)
137
+ assert_equal(res.sf.evaluate(x[0]-1), 1) # SF starts at 1
138
+ assert_equal(res.sf.evaluate([-np.inf, np.inf]), [1, 0])
139
+
140
+ # ref. [1] page 91
141
+ t1 = [37, 43, 47, 56, 60, 62, 71, 77, 80, 81] # times
142
+ d1 = [0, 0, 1, 1, 0, 0, 0, 1, 1, 1] # 1 means deaths (not censored)
143
+ r1 = [1, 1, 0.875, 0.75, 0.75, 0.75, 0.75, 0.5, 0.25, 0] # reference SF
144
+
145
+ # https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html
146
+ t2 = [8, 12, 26, 14, 21, 27, 8, 32, 20, 40]
147
+ d2 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]
148
+ r2 = [0.9, 0.788, 0.675, 0.675, 0.54, 0.405, 0.27, 0.27, 0.27]
149
+ t3 = [33, 28, 41, 48, 48, 25, 37, 48, 25, 43]
150
+ d3 = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
151
+ r3 = [1, 0.875, 0.75, 0.75, 0.6, 0.6, 0.6]
152
+
153
+ # https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/bs704_survival4.html
154
+ t4 = [24, 3, 11, 19, 24, 13, 14, 2, 18, 17,
155
+ 24, 21, 12, 1, 10, 23, 6, 5, 9, 17]
156
+ d4 = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1]
157
+ r4 = [0.95, 0.95, 0.897, 0.844, 0.844, 0.844, 0.844, 0.844, 0.844,
158
+ 0.844, 0.76, 0.676, 0.676, 0.676, 0.676, 0.507, 0.507]
159
+
160
+ # https://www.real-statistics.com/survival-analysis/kaplan-meier-procedure/confidence-interval-for-the-survival-function/
161
+ t5 = [3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11]
162
+ d5 = [1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1]
163
+ r5 = [0.944, 0.889, 0.722, 0.542, 0.542, 0.542, 0.361, 0.181, 0.181, 0.181]
164
+
165
+ @pytest.mark.parametrize("case", [(t1, d1, r1), (t2, d2, r2), (t3, d3, r3),
166
+ (t4, d4, r4), (t5, d5, r5)])
167
+ def test_right_censored_against_examples(self, case):
168
+ # test `ecdf` against other implementations on example problems
169
+ times, died, ref = case
170
+ sample = stats.CensoredData.right_censored(times, np.logical_not(died))
171
+ res = stats.ecdf(sample)
172
+ assert_allclose(res.sf.probabilities, ref, atol=1e-3)
173
+ assert_equal(res.sf.quantiles, np.sort(np.unique(times)))
174
+
175
+ # test reference implementation against other implementations
176
+ res = _kaplan_meier_reference(times, np.logical_not(died))
177
+ assert_equal(res[0], np.sort(np.unique(times)))
178
+ assert_allclose(res[1], ref, atol=1e-3)
179
+
180
+ @pytest.mark.parametrize('seed', [182746786639392128, 737379171436494115,
181
+ 576033618403180168, 308115465002673650])
182
+ def test_right_censored_against_reference_implementation(self, seed):
183
+ # test `ecdf` against reference implementation on random problems
184
+ rng = np.random.default_rng(seed)
185
+ n_unique = rng.integers(10, 100)
186
+ sample, times, censored = self.get_random_sample(rng, n_unique)
187
+ res = stats.ecdf(sample)
188
+ ref = _kaplan_meier_reference(times, censored)
189
+ assert_allclose(res.sf.quantiles, ref[0])
190
+ assert_allclose(res.sf.probabilities, ref[1])
191
+
192
+ # If all observations are uncensored, the KM estimate should match
193
+ # the usual estimate for uncensored data
194
+ sample = stats.CensoredData(uncensored=times)
195
+ res = _survival._ecdf_right_censored(sample) # force Kaplan-Meier
196
+ ref = stats.ecdf(times)
197
+ assert_equal(res[0], ref.sf.quantiles)
198
+ assert_allclose(res[1], ref.cdf.probabilities, rtol=1e-14)
199
+ assert_allclose(res[2], ref.sf.probabilities, rtol=1e-14)
200
+
201
+ def test_right_censored_ci(self):
202
+ # test "greenwood" confidence interval against example 4 (URL above).
203
+ times, died = self.t4, self.d4
204
+ sample = stats.CensoredData.right_censored(times, np.logical_not(died))
205
+ res = stats.ecdf(sample)
206
+ ref_allowance = [0.096, 0.096, 0.135, 0.162, 0.162, 0.162, 0.162,
207
+ 0.162, 0.162, 0.162, 0.214, 0.246, 0.246, 0.246,
208
+ 0.246, 0.341, 0.341]
209
+
210
+ sf_ci = res.sf.confidence_interval()
211
+ cdf_ci = res.cdf.confidence_interval()
212
+ allowance = res.sf.probabilities - sf_ci.low.probabilities
213
+
214
+ assert_allclose(allowance, ref_allowance, atol=1e-3)
215
+ assert_allclose(sf_ci.low.probabilities,
216
+ np.clip(res.sf.probabilities - allowance, 0, 1))
217
+ assert_allclose(sf_ci.high.probabilities,
218
+ np.clip(res.sf.probabilities + allowance, 0, 1))
219
+ assert_allclose(cdf_ci.low.probabilities,
220
+ np.clip(res.cdf.probabilities - allowance, 0, 1))
221
+ assert_allclose(cdf_ci.high.probabilities,
222
+ np.clip(res.cdf.probabilities + allowance, 0, 1))
223
+
224
+ # test "log-log" confidence interval against Mathematica
225
+ # e = {24, 3, 11, 19, 24, 13, 14, 2, 18, 17, 24, 21, 12, 1, 10, 23, 6, 5,
226
+ # 9, 17}
227
+ # ci = {1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0}
228
+ # R = EventData[e, ci]
229
+ # S = SurvivalModelFit[R]
230
+ # S["PointwiseIntervals", ConfidenceLevel->0.95,
231
+ # ConfidenceTransform->"LogLog"]
232
+
233
+ ref_low = [0.694743, 0.694743, 0.647529, 0.591142, 0.591142, 0.591142,
234
+ 0.591142, 0.591142, 0.591142, 0.591142, 0.464605, 0.370359,
235
+ 0.370359, 0.370359, 0.370359, 0.160489, 0.160489]
236
+ ref_high = [0.992802, 0.992802, 0.973299, 0.947073, 0.947073, 0.947073,
237
+ 0.947073, 0.947073, 0.947073, 0.947073, 0.906422, 0.856521,
238
+ 0.856521, 0.856521, 0.856521, 0.776724, 0.776724]
239
+ sf_ci = res.sf.confidence_interval(method='log-log')
240
+ assert_allclose(sf_ci.low.probabilities, ref_low, atol=1e-6)
241
+ assert_allclose(sf_ci.high.probabilities, ref_high, atol=1e-6)
242
+
243
+ def test_right_censored_ci_example_5(self):
244
+ # test "exponential greenwood" confidence interval against example 5
245
+ times, died = self.t5, self.d5
246
+ sample = stats.CensoredData.right_censored(times, np.logical_not(died))
247
+ res = stats.ecdf(sample)
248
+ lower = np.array([0.66639, 0.624174, 0.456179, 0.287822, 0.287822,
249
+ 0.287822, 0.128489, 0.030957, 0.030957, 0.030957])
250
+ upper = np.array([0.991983, 0.970995, 0.87378, 0.739467, 0.739467,
251
+ 0.739467, 0.603133, 0.430365, 0.430365, 0.430365])
252
+
253
+ sf_ci = res.sf.confidence_interval(method='log-log')
254
+ cdf_ci = res.cdf.confidence_interval(method='log-log')
255
+
256
+ assert_allclose(sf_ci.low.probabilities, lower, atol=1e-5)
257
+ assert_allclose(sf_ci.high.probabilities, upper, atol=1e-5)
258
+ assert_allclose(cdf_ci.low.probabilities, 1-upper, atol=1e-5)
259
+ assert_allclose(cdf_ci.high.probabilities, 1-lower, atol=1e-5)
260
+
261
+ # Test against R's `survival` library `survfit` function, 90%CI
262
+ # library(survival)
263
+ # options(digits=16)
264
+ # time = c(3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11)
265
+ # status = c(1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1)
266
+ # res = survfit(Surv(time, status)
267
+ # ~1, conf.type = "log-log", conf.int = 0.90)
268
+ # res$time; res$lower; res$upper
269
+ low = [0.74366748406861172, 0.68582332289196246, 0.50596835651480121,
270
+ 0.32913131413336727, 0.32913131413336727, 0.32913131413336727,
271
+ 0.15986912028781664, 0.04499539918147757, 0.04499539918147757,
272
+ 0.04499539918147757]
273
+ high = [0.9890291867238429, 0.9638835422144144, 0.8560366823086629,
274
+ 0.7130167643978450, 0.7130167643978450, 0.7130167643978450,
275
+ 0.5678602982997164, 0.3887616766886558, 0.3887616766886558,
276
+ 0.3887616766886558]
277
+ sf_ci = res.sf.confidence_interval(method='log-log',
278
+ confidence_level=0.9)
279
+ assert_allclose(sf_ci.low.probabilities, low)
280
+ assert_allclose(sf_ci.high.probabilities, high)
281
+
282
+ # And with conf.type = "plain"
283
+ low = [0.8556383113628162, 0.7670478794850761, 0.5485720663578469,
284
+ 0.3441515412527123, 0.3441515412527123, 0.3441515412527123,
285
+ 0.1449184105424544, 0., 0., 0.]
286
+ high = [1., 1., 0.8958723780865975, 0.7391817920806210,
287
+ 0.7391817920806210, 0.7391817920806210, 0.5773038116797676,
288
+ 0.3642270254596720, 0.3642270254596720, 0.3642270254596720]
289
+ sf_ci = res.sf.confidence_interval(confidence_level=0.9)
290
+ assert_allclose(sf_ci.low.probabilities, low)
291
+ assert_allclose(sf_ci.high.probabilities, high)
292
+
293
+ def test_right_censored_ci_nans(self):
294
+ # test `ecdf` confidence interval on a problem that results in NaNs
295
+ times, died = self.t1, self.d1
296
+ sample = stats.CensoredData.right_censored(times, np.logical_not(died))
297
+ res = stats.ecdf(sample)
298
+
299
+ # Reference values generated with Matlab
300
+ # format long
301
+ # t = [37 43 47 56 60 62 71 77 80 81];
302
+ # d = [0 0 1 1 0 0 0 1 1 1];
303
+ # censored = ~d1;
304
+ # [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Alpha', 0.05);
305
+ x = [37, 47, 56, 77, 80, 81]
306
+ flo = [np.nan, 0, 0, 0.052701464070711, 0.337611126231790, np.nan]
307
+ fup = [np.nan, 0.35417230377, 0.5500569798, 0.9472985359, 1.0, np.nan]
308
+ i = np.searchsorted(res.cdf.quantiles, x)
309
+
310
+ message = "The confidence interval is undefined at some observations"
311
+ with pytest.warns(RuntimeWarning, match=message):
312
+ ci = res.cdf.confidence_interval()
313
+
314
+ # Matlab gives NaN as the first element of the CIs. Mathematica agrees,
315
+ # but R's survfit does not. It makes some sense, but it's not what the
316
+ # formula gives, so skip that element.
317
+ assert_allclose(ci.low.probabilities[i][1:], flo[1:])
318
+ assert_allclose(ci.high.probabilities[i][1:], fup[1:])
319
+
320
+ # [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Function',
321
+ # 'survivor', 'Alpha', 0.05);
322
+ flo = [np.nan, 0.64582769623, 0.449943020228, 0.05270146407, 0, np.nan]
323
+ fup = [np.nan, 1.0, 1.0, 0.947298535929289, 0.662388873768210, np.nan]
324
+ i = np.searchsorted(res.cdf.quantiles, x)
325
+
326
+ with pytest.warns(RuntimeWarning, match=message):
327
+ ci = res.sf.confidence_interval()
328
+
329
+ assert_allclose(ci.low.probabilities[i][1:], flo[1:])
330
+ assert_allclose(ci.high.probabilities[i][1:], fup[1:])
331
+
332
+ # With the same data, R's `survival` library `survfit` function
333
+ # doesn't produce the leading NaN
334
+ # library(survival)
335
+ # options(digits=16)
336
+ # time = c(37, 43, 47, 56, 60, 62, 71, 77, 80, 81)
337
+ # status = c(0, 0, 1, 1, 0, 0, 0, 1, 1, 1)
338
+ # res = survfit(Surv(time, status)
339
+ # ~1, conf.type = "plain", conf.int = 0.95)
340
+ # res$time
341
+ # res$lower
342
+ # res$upper
343
+ low = [1., 1., 0.64582769623233816, 0.44994302022779326,
344
+ 0.44994302022779326, 0.44994302022779326, 0.44994302022779326,
345
+ 0.05270146407071086, 0., np.nan]
346
+ high = [1., 1., 1., 1., 1., 1., 1., 0.9472985359292891,
347
+ 0.6623888737682101, np.nan]
348
+ assert_allclose(ci.low.probabilities, low)
349
+ assert_allclose(ci.high.probabilities, high)
350
+
351
+ # It does with conf.type="log-log", as do we
352
+ with pytest.warns(RuntimeWarning, match=message):
353
+ ci = res.sf.confidence_interval(method='log-log')
354
+ low = [np.nan, np.nan, 0.38700001403202522, 0.31480711370551911,
355
+ 0.31480711370551911, 0.31480711370551911, 0.31480711370551911,
356
+ 0.08048821148507734, 0.01049958986680601, np.nan]
357
+ high = [np.nan, np.nan, 0.9813929658789660, 0.9308983170906275,
358
+ 0.9308983170906275, 0.9308983170906275, 0.9308983170906275,
359
+ 0.8263946341076415, 0.6558775085110887, np.nan]
360
+ assert_allclose(ci.low.probabilities, low)
361
+ assert_allclose(ci.high.probabilities, high)
362
+
363
+ def test_right_censored_against_uncensored(self):
364
+ rng = np.random.default_rng(7463952748044886637)
365
+ sample = rng.integers(10, 100, size=1000)
366
+ censored = np.zeros_like(sample)
367
+ censored[np.argmax(sample)] = True
368
+ res = stats.ecdf(sample)
369
+ ref = stats.ecdf(stats.CensoredData.right_censored(sample, censored))
370
+ assert_equal(res.sf.quantiles, ref.sf.quantiles)
371
+ assert_equal(res.sf._n, ref.sf._n)
372
+ assert_equal(res.sf._d[:-1], ref.sf._d[:-1]) # difference @ [-1]
373
+ assert_allclose(res.sf._sf[:-1], ref.sf._sf[:-1], rtol=1e-14)
374
+
375
+ def test_plot_iv(self):
376
+ rng = np.random.default_rng(1769658657308472721)
377
+ n_unique = rng.integers(10, 100)
378
+ sample, _, _ = self.get_random_sample(rng, n_unique)
379
+ res = stats.ecdf(sample)
380
+
381
+ try:
382
+ import matplotlib.pyplot as plt # noqa: F401
383
+ res.sf.plot() # no other errors occur
384
+ except (ModuleNotFoundError, ImportError):
385
+ # Avoid trying to call MPL with numpy 2.0-dev, because that fails
386
+ # too often due to ABI mismatches and is hard to avoid. This test
387
+ # will work fine again once MPL has done a 2.0-compatible release.
388
+ if not np.__version__.startswith('2.0.0.dev0'):
389
+ message = r"matplotlib must be installed to use method `plot`."
390
+ with pytest.raises(ModuleNotFoundError, match=message):
391
+ res.sf.plot()
392
+
393
+
394
+ class TestLogRank:
395
+
396
+ @pytest.mark.parametrize(
397
+ "x, y, statistic, pvalue",
398
+ # Results validate with R
399
+ # library(survival)
400
+ # options(digits=16)
401
+ #
402
+ # futime_1 <- c(8, 12, 26, 14, 21, 27, 8, 32, 20, 40)
403
+ # fustat_1 <- c(1, 1, 1, 1, 1, 1, 0, 0, 0, 0)
404
+ # rx_1 <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
405
+ #
406
+ # futime_2 <- c(33, 28, 41, 48, 48, 25, 37, 48, 25, 43)
407
+ # fustat_2 <- c(1, 1, 1, 0, 0, 0, 0, 0, 0, 0)
408
+ # rx_2 <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
409
+ #
410
+ # futime <- c(futime_1, futime_2)
411
+ # fustat <- c(fustat_1, fustat_2)
412
+ # rx <- c(rx_1, rx_2)
413
+ #
414
+ # survdiff(formula = Surv(futime, fustat) ~ rx)
415
+ #
416
+ # Also check against another library which handle alternatives
417
+ # library(nph)
418
+ # logrank.test(futime, fustat, rx, alternative = "two.sided")
419
+ # res["test"]
420
+ [(
421
+ # https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html
422
+ # uncensored, censored
423
+ [[8, 12, 26, 14, 21, 27], [8, 32, 20, 40]],
424
+ [[33, 28, 41], [48, 48, 25, 37, 48, 25, 43]],
425
+ # chi2, ["two-sided", "less", "greater"]
426
+ 6.91598157449,
427
+ [0.008542873404, 0.9957285632979385, 0.004271436702061537]
428
+ ),
429
+ (
430
+ # https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html
431
+ [[19, 6, 5, 4], [20, 19, 17, 14]],
432
+ [[16, 21, 7], [21, 15, 18, 18, 5]],
433
+ 0.835004855038,
434
+ [0.3608293039, 0.8195853480676912, 0.1804146519323088]
435
+ ),
436
+ (
437
+ # Bland, Altman, "The logrank test", BMJ, 2004
438
+ # https://www.bmj.com/content/328/7447/1073.short
439
+ [[6, 13, 21, 30, 37, 38, 49, 50, 63, 79, 86, 98, 202, 219],
440
+ [31, 47, 80, 82, 82, 149]],
441
+ [[10, 10, 12, 13, 14, 15, 16, 17, 18, 20, 24, 24, 25, 28, 30,
442
+ 33, 35, 37, 40, 40, 46, 48, 76, 81, 82, 91, 112, 181],
443
+ [34, 40, 70]],
444
+ 7.49659416854,
445
+ [0.006181578637, 0.003090789318730882, 0.9969092106812691]
446
+ )]
447
+ )
448
+ def test_log_rank(self, x, y, statistic, pvalue):
449
+ x = stats.CensoredData(uncensored=x[0], right=x[1])
450
+ y = stats.CensoredData(uncensored=y[0], right=y[1])
451
+
452
+ for i, alternative in enumerate(["two-sided", "less", "greater"]):
453
+ res = stats.logrank(x=x, y=y, alternative=alternative)
454
+
455
+ # we return z and use the normal distribution while other framework
456
+ # return z**2. The p-value are directly comparable, but we have to
457
+ # square the statistic
458
+ assert_allclose(res.statistic**2, statistic, atol=1e-10)
459
+ assert_allclose(res.pvalue, pvalue[i], atol=1e-10)
460
+
461
+ def test_raises(self):
462
+ sample = stats.CensoredData([1, 2])
463
+
464
+ msg = r"`y` must be"
465
+ with pytest.raises(ValueError, match=msg):
466
+ stats.logrank(x=sample, y=[[1, 2]])
467
+
468
+ msg = r"`x` must be"
469
+ with pytest.raises(ValueError, match=msg):
470
+ stats.logrank(x=[[1, 2]], y=sample)