diff --git a/.gitattributes b/.gitattributes index f5591c2265c43ff5ffbd6b8f3821f010665e0759..92cdc6e8fd567806b3f60c87a7e5c5303171b071 100644 --- a/.gitattributes +++ b/.gitattributes @@ -339,3 +339,4 @@ parrot/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups. parrot/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py new file mode 100644 index 0000000000000000000000000000000000000000..c624bb8c79be23ad515d003649f9efe9ea3e775d --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py @@ -0,0 +1,795 @@ +import builtins +from warnings import catch_warnings, simplefilter +import numpy as np +from operator import index +from collections import namedtuple + +__all__ = ['binned_statistic', + 'binned_statistic_2d', + 'binned_statistic_dd'] + + +BinnedStatisticResult = namedtuple('BinnedStatisticResult', + ('statistic', 'bin_edges', 'binnumber')) + + +def binned_statistic(x, values, statistic='mean', + bins=10, range=None): + """ + Compute a binned statistic for one or more sets of data. + + This is a generalization of a histogram function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values (or set of values) within each bin. + + Parameters + ---------- + x : (N,) array_like + A sequence of values to be binned. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `x`, or a set of sequences - each the same shape as + `x`. If `values` is a set of sequences, the statistic will be computed + on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'std' : compute the standard deviation within each bin. This + is implicitly calculated with ddof=0. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : int or sequence of scalars, optional + If `bins` is an int, it defines the number of equal-width bins in the + given range (10 by default). If `bins` is a sequence, it defines the + bin edges, including the rightmost edge, allowing for non-uniform bin + widths. Values in `x` that are smaller than lowest bin edge are + assigned to bin number 0, values beyond the highest bin are assigned to + ``bins[-1]``. If the bin edges are specified, the number of bins will + be, (nx = len(bins)-1). + range : (float, float) or [(float, float)], optional + The lower and upper range of the bins. If not provided, range + is simply ``(x.min(), x.max())``. Values outside the range are + ignored. + + Returns + ------- + statistic : array + The values of the selected statistic in each bin. + bin_edges : array of dtype float + Return the bin edges ``(length(statistic)+1)``. + binnumber: 1-D ndarray of ints + Indices of the bins (corresponding to `bin_edges`) in which each value + of `x` belongs. Same length as `values`. A binnumber of `i` means the + corresponding value is between (bin_edges[i-1], bin_edges[i]). + + See Also + -------- + numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, + but excluding 2) and the second ``[2, 3)``. The last bin, however, is + ``[3, 4]``, which *includes* 4. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + First some basic examples: + + Create two evenly spaced bins in the range of the given sample, and sum the + corresponding values in each of those bins: + + >>> values = [1.0, 1.0, 2.0, 1.5, 3.0] + >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) + BinnedStatisticResult(statistic=array([4. , 4.5]), + bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2])) + + Multiple arrays of values can also be passed. The statistic is calculated + on each set independently: + + >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]] + >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) + BinnedStatisticResult(statistic=array([[4. , 4.5], + [8. , 9. ]]), bin_edges=array([1., 4., 7.]), + binnumber=array([1, 1, 1, 2, 2])) + + >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean', + ... bins=3) + BinnedStatisticResult(statistic=array([1., 2., 4.]), + bin_edges=array([1., 2., 3., 4.]), + binnumber=array([1, 2, 1, 2, 3])) + + As a second example, we now generate some random data of sailing boat speed + as a function of wind speed, and then determine how fast our boat is for + certain wind speeds: + + >>> rng = np.random.default_rng() + >>> windspeed = 8 * rng.random(500) + >>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500) + >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed, + ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7]) + >>> plt.figure() + >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data') + >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5, + ... label='binned statistic of data') + >>> plt.legend() + + Now we can use ``binnumber`` to select all datapoints with a windspeed + below 1: + + >>> low_boatspeed = boatspeed[binnumber == 0] + + As a final example, we will use ``bin_edges`` and ``binnumber`` to make a + plot of a distribution that shows the mean and distribution around that + mean per bin, on top of a regular histogram and the probability + distribution function: + + >>> x = np.linspace(0, 5, num=500) + >>> x_pdf = stats.maxwell.pdf(x) + >>> samples = stats.maxwell.rvs(size=10000) + + >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf, + ... statistic='mean', bins=25) + >>> bin_width = (bin_edges[1] - bin_edges[0]) + >>> bin_centers = bin_edges[1:] - bin_width/2 + + >>> plt.figure() + >>> plt.hist(samples, bins=50, density=True, histtype='stepfilled', + ... alpha=0.2, label='histogram of data') + >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf') + >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2, + ... label='binned statistic of data') + >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5) + >>> plt.legend(fontsize=10) + >>> plt.show() + + """ + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1: + bins = [np.asarray(bins, float)] + + if range is not None: + if len(range) == 2: + range = [range] + + medians, edges, binnumbers = binned_statistic_dd( + [x], values, statistic, bins, range) + + return BinnedStatisticResult(medians, edges[0], binnumbers) + + +BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult', + ('statistic', 'x_edge', 'y_edge', + 'binnumber')) + + +def binned_statistic_2d(x, y, values, statistic='mean', + bins=10, range=None, expand_binnumbers=False): + """ + Compute a bidimensional binned statistic for one or more sets of data. + + This is a generalization of a histogram2d function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values (or set of values) within each bin. + + Parameters + ---------- + x : (N,) array_like + A sequence of values to be binned along the first dimension. + y : (N,) array_like + A sequence of values to be binned along the second dimension. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `x`, or a list of sequences - each with the same + shape as `x`. If `values` is such a list, the statistic will be + computed on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'std' : compute the standard deviation within each bin. This + is implicitly calculated with ddof=0. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : int or [int, int] or array_like or [array, array], optional + The bin specification: + + * the number of bins for the two dimensions (nx = ny = bins), + * the number of bins in each dimension (nx, ny = bins), + * the bin edges for the two dimensions (x_edge = y_edge = bins), + * the bin edges in each dimension (x_edge, y_edge = bins). + + If the bin edges are specified, the number of bins will be, + (nx = len(x_edge)-1, ny = len(y_edge)-1). + + range : (2,2) array_like, optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be + considered outliers and not tallied in the histogram. + expand_binnumbers : bool, optional + 'False' (default): the returned `binnumber` is a shape (N,) array of + linearized bin indices. + 'True': the returned `binnumber` is 'unraveled' into a shape (2,N) + ndarray, where each row gives the bin numbers in the corresponding + dimension. + See the `binnumber` returned value, and the `Examples` section. + + .. versionadded:: 0.17.0 + + Returns + ------- + statistic : (nx, ny) ndarray + The values of the selected statistic in each two-dimensional bin. + x_edge : (nx + 1) ndarray + The bin edges along the first dimension. + y_edge : (ny + 1) ndarray + The bin edges along the second dimension. + binnumber : (N,) array of ints or (2,N) ndarray of ints + This assigns to each element of `sample` an integer that represents the + bin in which this observation falls. The representation depends on the + `expand_binnumbers` argument. See `Notes` for details. + + + See Also + -------- + numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd + + Notes + ----- + Binedges: + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, + but excluding 2) and the second ``[2, 3)``. The last bin, however, is + ``[3, 4]``, which *includes* 4. + + `binnumber`: + This returned argument assigns to each element of `sample` an integer that + represents the bin in which it belongs. The representation depends on the + `expand_binnumbers` argument. If 'False' (default): The returned + `binnumber` is a shape (N,) array of linearized indices mapping each + element of `sample` to its corresponding bin (using row-major ordering). + Note that the returned linearized bin indices are used for an array with + extra bins on the outer binedges to capture values outside of the defined + bin bounds. + If 'True': The returned `binnumber` is a shape (2,N) ndarray where + each row indicates bin placements for each dimension respectively. In each + dimension, a binnumber of `i` means the corresponding value is between + (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import stats + + Calculate the counts with explicit bin-edges: + + >>> x = [0.1, 0.1, 0.1, 0.6] + >>> y = [2.1, 2.6, 2.1, 2.1] + >>> binx = [0.0, 0.5, 1.0] + >>> biny = [2.0, 2.5, 3.0] + >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny]) + >>> ret.statistic + array([[2., 1.], + [1., 0.]]) + + The bin in which each sample is placed is given by the `binnumber` + returned parameter. By default, these are the linearized bin indices: + + >>> ret.binnumber + array([5, 6, 5, 9]) + + The bin indices can also be expanded into separate entries for each + dimension using the `expand_binnumbers` parameter: + + >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny], + ... expand_binnumbers=True) + >>> ret.binnumber + array([[1, 1, 1, 2], + [1, 2, 1, 1]]) + + Which shows that the first three elements belong in the xbin 1, and the + fourth into xbin 2; and so on for y. + + """ + + # This code is based on np.histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + xedges = yedges = np.asarray(bins, float) + bins = [xedges, yedges] + + medians, edges, binnumbers = binned_statistic_dd( + [x, y], values, statistic, bins, range, + expand_binnumbers=expand_binnumbers) + + return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers) + + +BinnedStatisticddResult = namedtuple('BinnedStatisticddResult', + ('statistic', 'bin_edges', + 'binnumber')) + + +def _bincount(x, weights): + if np.iscomplexobj(weights): + a = np.bincount(x, np.real(weights)) + b = np.bincount(x, np.imag(weights)) + z = a + b*1j + + else: + z = np.bincount(x, weights) + return z + + +def binned_statistic_dd(sample, values, statistic='mean', + bins=10, range=None, expand_binnumbers=False, + binned_statistic_result=None): + """ + Compute a multidimensional binned statistic for a set of data. + + This is a generalization of a histogramdd function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values within each bin. + + Parameters + ---------- + sample : array_like + Data to histogram passed as a sequence of N arrays of length D, or + as an (N,D) array. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `sample`, or a list of sequences - each with the + same shape as `sample`. If `values` is such a list, the statistic + will be computed on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'std' : compute the standard deviation within each bin. This + is implicitly calculated with ddof=0. If the number of values + within a given bin is 0 or 1, the computed standard deviation value + will be 0 for the bin. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : sequence or positive int, optional + The bin specification must be in one of the following forms: + + * A sequence of arrays describing the bin edges along each dimension. + * The number of bins for each dimension (nx, ny, ... = bins). + * The number of bins for all dimensions (nx = ny = ... = bins). + range : sequence, optional + A sequence of lower and upper bin edges to be used if the edges are + not given explicitly in `bins`. Defaults to the minimum and maximum + values along each dimension. + expand_binnumbers : bool, optional + 'False' (default): the returned `binnumber` is a shape (N,) array of + linearized bin indices. + 'True': the returned `binnumber` is 'unraveled' into a shape (D,N) + ndarray, where each row gives the bin numbers in the corresponding + dimension. + See the `binnumber` returned value, and the `Examples` section of + `binned_statistic_2d`. + binned_statistic_result : binnedStatisticddResult + Result of a previous call to the function in order to reuse bin edges + and bin numbers with new values and/or a different statistic. + To reuse bin numbers, `expand_binnumbers` must have been set to False + (the default) + + .. versionadded:: 0.17.0 + + Returns + ------- + statistic : ndarray, shape(nx1, nx2, nx3,...) + The values of the selected statistic in each two-dimensional bin. + bin_edges : list of ndarrays + A list of D arrays describing the (nxi + 1) bin edges for each + dimension. + binnumber : (N,) array of ints or (D,N) ndarray of ints + This assigns to each element of `sample` an integer that represents the + bin in which this observation falls. The representation depends on the + `expand_binnumbers` argument. See `Notes` for details. + + + See Also + -------- + numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d + + Notes + ----- + Binedges: + All but the last (righthand-most) bin is half-open in each dimension. In + other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is + ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The + last bin, however, is ``[3, 4]``, which *includes* 4. + + `binnumber`: + This returned argument assigns to each element of `sample` an integer that + represents the bin in which it belongs. The representation depends on the + `expand_binnumbers` argument. If 'False' (default): The returned + `binnumber` is a shape (N,) array of linearized indices mapping each + element of `sample` to its corresponding bin (using row-major ordering). + If 'True': The returned `binnumber` is a shape (D,N) ndarray where + each row indicates bin placements for each dimension respectively. In each + dimension, a binnumber of `i` means the corresponding value is between + (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.mplot3d import Axes3D + + Take an array of 600 (x, y) coordinates as an example. + `binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot + of dimension `D+1` is required. + + >>> mu = np.array([0., 1.]) + >>> sigma = np.array([[1., -0.5],[-0.5, 1.5]]) + >>> multinormal = stats.multivariate_normal(mu, sigma) + >>> data = multinormal.rvs(size=600, random_state=235412) + >>> data.shape + (600, 2) + + Create bins and count how many arrays fall in each bin: + + >>> N = 60 + >>> x = np.linspace(-3, 3, N) + >>> y = np.linspace(-3, 4, N) + >>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y], + ... statistic='count') + >>> bincounts = ret.statistic + + Set the volume and the location of bars: + + >>> dx = x[1] - x[0] + >>> dy = y[1] - y[0] + >>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2) + >>> z = 0 + + >>> bincounts = bincounts.ravel() + >>> x = x.ravel() + >>> y = y.ravel() + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111, projection='3d') + >>> with np.errstate(divide='ignore'): # silence random axes3d warning + ... ax.bar3d(x, y, z, dx, dy, bincounts) + + Reuse bin numbers and bin edges with new values: + + >>> ret2 = stats.binned_statistic_dd(data, -np.arange(600), + ... binned_statistic_result=ret, + ... statistic='mean') + """ + known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max'] + if not callable(statistic) and statistic not in known_stats: + raise ValueError(f'invalid statistic {statistic!r}') + + try: + bins = index(bins) + except TypeError: + # bins is not an integer + pass + # If bins was an integer-like object, now it is an actual Python int. + + # NOTE: for _bin_edges(), see e.g. gh-11365 + if isinstance(bins, int) and not np.isfinite(sample).all(): + raise ValueError(f'{sample!r} contains non-finite values.') + + # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`) + # `Dlen` is the length of elements along each dimension. + # This code is based on np.histogramdd + try: + # `sample` is an ND-array. + Dlen, Ndim = sample.shape + except (AttributeError, ValueError): + # `sample` is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + Dlen, Ndim = sample.shape + + # Store initial shape of `values` to preserve it in the output + values = np.asarray(values) + input_shape = list(values.shape) + # Make sure that `values` is 2D to iterate over rows + values = np.atleast_2d(values) + Vdim, Vlen = values.shape + + # Make sure `values` match `sample` + if statistic != 'count' and Vlen != Dlen: + raise AttributeError('The number of `values` elements must match the ' + 'length of each `sample` dimension.') + + try: + M = len(bins) + if M != Ndim: + raise AttributeError('The dimension of bins must be equal ' + 'to the dimension of the sample x.') + except TypeError: + bins = Ndim * [bins] + + if binned_statistic_result is None: + nbin, edges, dedges = _bin_edges(sample, bins, range) + binnumbers = _bin_numbers(sample, nbin, edges, dedges) + else: + edges = binned_statistic_result.bin_edges + nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)]) + # +1 for outlier bins + dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)] + binnumbers = binned_statistic_result.binnumber + + # Avoid overflow with double precision. Complex `values` -> `complex128`. + result_type = np.result_type(values, np.float64) + result = np.empty([Vdim, nbin.prod()], dtype=result_type) + + if statistic in {'mean', np.mean}: + result.fill(np.nan) + flatcount = _bincount(binnumbers, None) + a = flatcount.nonzero() + for vv in builtins.range(Vdim): + flatsum = _bincount(binnumbers, values[vv]) + result[vv, a] = flatsum[a] / flatcount[a] + elif statistic in {'std', np.std}: + result.fill(np.nan) + flatcount = _bincount(binnumbers, None) + a = flatcount.nonzero() + for vv in builtins.range(Vdim): + flatsum = _bincount(binnumbers, values[vv]) + delta = values[vv] - flatsum[binnumbers] / flatcount[binnumbers] + std = np.sqrt( + _bincount(binnumbers, delta*np.conj(delta))[a] / flatcount[a] + ) + result[vv, a] = std + result = np.real(result) + elif statistic == 'count': + result = np.empty([Vdim, nbin.prod()], dtype=np.float64) + result.fill(0) + flatcount = _bincount(binnumbers, None) + a = np.arange(len(flatcount)) + result[:, a] = flatcount[np.newaxis, :] + elif statistic in {'sum', np.sum}: + result.fill(0) + for vv in builtins.range(Vdim): + flatsum = _bincount(binnumbers, values[vv]) + a = np.arange(len(flatsum)) + result[vv, a] = flatsum + elif statistic in {'median', np.median}: + result.fill(np.nan) + for vv in builtins.range(Vdim): + i = np.lexsort((values[vv], binnumbers)) + _, j, counts = np.unique(binnumbers[i], + return_index=True, return_counts=True) + mid = j + (counts - 1) / 2 + mid_a = values[vv, i][np.floor(mid).astype(int)] + mid_b = values[vv, i][np.ceil(mid).astype(int)] + medians = (mid_a + mid_b) / 2 + result[vv, binnumbers[i][j]] = medians + elif statistic in {'min', np.min}: + result.fill(np.nan) + for vv in builtins.range(Vdim): + i = np.argsort(values[vv])[::-1] # Reversed so the min is last + result[vv, binnumbers[i]] = values[vv, i] + elif statistic in {'max', np.max}: + result.fill(np.nan) + for vv in builtins.range(Vdim): + i = np.argsort(values[vv]) + result[vv, binnumbers[i]] = values[vv, i] + elif callable(statistic): + with np.errstate(invalid='ignore'), catch_warnings(): + simplefilter("ignore", RuntimeWarning) + try: + null = statistic([]) + except Exception: + null = np.nan + if np.iscomplexobj(null): + result = result.astype(np.complex128) + result.fill(null) + try: + _calc_binned_statistic( + Vdim, binnumbers, result, values, statistic + ) + except ValueError: + result = result.astype(np.complex128) + _calc_binned_statistic( + Vdim, binnumbers, result, values, statistic + ) + + # Shape into a proper matrix + result = result.reshape(np.append(Vdim, nbin)) + + # Remove outliers (indices 0 and -1 for each bin-dimension). + core = tuple([slice(None)] + Ndim * [slice(1, -1)]) + result = result[core] + + # Unravel binnumbers into an ndarray, each row the bins for each dimension + if expand_binnumbers and Ndim > 1: + binnumbers = np.asarray(np.unravel_index(binnumbers, nbin)) + + if np.any(result.shape[1:] != nbin - 2): + raise RuntimeError('Internal Shape Error') + + # Reshape to have output (`result`) match input (`values`) shape + result = result.reshape(input_shape[:-1] + list(nbin-2)) + + return BinnedStatisticddResult(result, edges, binnumbers) + + +def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func): + unique_bin_numbers = np.unique(bin_numbers) + for vv in builtins.range(Vdim): + bin_map = _create_binned_data(bin_numbers, unique_bin_numbers, + values, vv) + for i in unique_bin_numbers: + stat = stat_func(np.array(bin_map[i])) + if np.iscomplexobj(stat) and not np.iscomplexobj(result): + raise ValueError("The statistic function returns complex ") + result[vv, i] = stat + + +def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv): + """ Create hashmap of bin ids to values in bins + key: bin number + value: list of binned data + """ + bin_map = dict() + for i in unique_bin_numbers: + bin_map[i] = [] + for i in builtins.range(len(bin_numbers)): + bin_map[bin_numbers[i]].append(values[vv, i]) + return bin_map + + +def _bin_edges(sample, bins=None, range=None): + """ Create edge arrays + """ + Dlen, Ndim = sample.shape + + nbin = np.empty(Ndim, int) # Number of bins in each dimension + edges = Ndim * [None] # Bin edges for each dim (will be 2D array) + dedges = Ndim * [None] # Spacing between edges (will be 2D array) + + # Select range for each dimension + # Used only if number of bins is given. + if range is None: + smin = np.atleast_1d(np.array(sample.min(axis=0), float)) + smax = np.atleast_1d(np.array(sample.max(axis=0), float)) + else: + if len(range) != Ndim: + raise ValueError( + f"range given for {len(range)} dimensions; {Ndim} required") + smin = np.empty(Ndim) + smax = np.empty(Ndim) + for i in builtins.range(Ndim): + if range[i][1] < range[i][0]: + raise ValueError( + "In {}range, start must be <= stop".format( + f"dimension {i + 1} of " if Ndim > 1 else "")) + smin[i], smax[i] = range[i] + + # Make sure the bins have a finite width. + for i in builtins.range(len(smin)): + if smin[i] == smax[i]: + smin[i] = smin[i] - .5 + smax[i] = smax[i] + .5 + + # Preserve sample floating point precision in bin edges + edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating) + else float) + + # Create edge arrays + for i in builtins.range(Ndim): + if np.isscalar(bins[i]): + nbin[i] = bins[i] + 2 # +2 for outlier bins + edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1, + dtype=edges_dtype) + else: + edges[i] = np.asarray(bins[i], edges_dtype) + nbin[i] = len(edges[i]) + 1 # +1 for outlier bins + dedges[i] = np.diff(edges[i]) + + nbin = np.asarray(nbin) + + return nbin, edges, dedges + + +def _bin_numbers(sample, nbin, edges, dedges): + """Compute the bin number each sample falls into, in each dimension + """ + Dlen, Ndim = sample.shape + + sampBin = [ + np.digitize(sample[:, i], edges[i]) + for i in range(Ndim) + ] + + # Using `digitize`, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right + # edge to be counted in the last bin, and not as an outlier. + for i in range(Ndim): + # Find the rounding precision + dedges_min = dedges[i].min() + if dedges_min == 0: + raise ValueError('The smallest edge difference is numerically 0.') + decimal = int(-np.log10(dedges_min)) + 6 + # Find which points are on the rightmost edge. + on_edge = np.where((sample[:, i] >= edges[i][-1]) & + (np.around(sample[:, i], decimal) == + np.around(edges[i][-1], decimal)))[0] + # Shift these points one bin to the left. + sampBin[i][on_edge] -= 1 + + # Compute the sample indices in the flattened statistic matrix. + binnumbers = np.ravel_multi_index(sampBin, nbin) + + return binnumbers diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_censored_data.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_censored_data.py new file mode 100644 index 0000000000000000000000000000000000000000..f6fee500f1d97db0bae9ebff26824d4d894c7f39 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_censored_data.py @@ -0,0 +1,459 @@ +import numpy as np + + +def _validate_1d(a, name, allow_inf=False): + if np.ndim(a) != 1: + raise ValueError(f'`{name}` must be a one-dimensional sequence.') + if np.isnan(a).any(): + raise ValueError(f'`{name}` must not contain nan.') + if not allow_inf and np.isinf(a).any(): + raise ValueError(f'`{name}` must contain only finite values.') + + +def _validate_interval(interval): + interval = np.asarray(interval) + if interval.shape == (0,): + # The input was a sequence with length 0. + interval = interval.reshape((0, 2)) + if interval.ndim != 2 or interval.shape[-1] != 2: + raise ValueError('`interval` must be a two-dimensional array with ' + 'shape (m, 2), where m is the number of ' + 'interval-censored values, but got shape ' + f'{interval.shape}') + + if np.isnan(interval).any(): + raise ValueError('`interval` must not contain nan.') + if np.isinf(interval).all(axis=1).any(): + raise ValueError('In each row in `interval`, both values must not' + ' be infinite.') + if (interval[:, 0] > interval[:, 1]).any(): + raise ValueError('In each row of `interval`, the left value must not' + ' exceed the right value.') + + uncensored_mask = interval[:, 0] == interval[:, 1] + left_mask = np.isinf(interval[:, 0]) + right_mask = np.isinf(interval[:, 1]) + interval_mask = np.isfinite(interval).all(axis=1) & ~uncensored_mask + + uncensored2 = interval[uncensored_mask, 0] + left2 = interval[left_mask, 1] + right2 = interval[right_mask, 0] + interval2 = interval[interval_mask] + + return uncensored2, left2, right2, interval2 + + +def _validate_x_censored(x, censored): + x = np.asarray(x) + if x.ndim != 1: + raise ValueError('`x` must be one-dimensional.') + censored = np.asarray(censored) + if censored.ndim != 1: + raise ValueError('`censored` must be one-dimensional.') + if (~np.isfinite(x)).any(): + raise ValueError('`x` must not contain nan or inf.') + if censored.size != x.size: + raise ValueError('`x` and `censored` must have the same length.') + return x, censored.astype(bool) + + +class CensoredData: + """ + Instances of this class represent censored data. + + Instances may be passed to the ``fit`` method of continuous + univariate SciPy distributions for maximum likelihood estimation. + The *only* method of the univariate continuous distributions that + understands `CensoredData` is the ``fit`` method. An instance of + `CensoredData` can not be passed to methods such as ``pdf`` and + ``cdf``. + + An observation is said to be *censored* when the precise value is unknown, + but it has a known upper and/or lower bound. The conventional terminology + is: + + * left-censored: an observation is below a certain value but it is + unknown by how much. + * right-censored: an observation is above a certain value but it is + unknown by how much. + * interval-censored: an observation lies somewhere on an interval between + two values. + + Left-, right-, and interval-censored data can be represented by + `CensoredData`. + + For convenience, the class methods ``left_censored`` and + ``right_censored`` are provided to create a `CensoredData` + instance from a single one-dimensional array of measurements + and a corresponding boolean array to indicate which measurements + are censored. The class method ``interval_censored`` accepts two + one-dimensional arrays that hold the lower and upper bounds of the + intervals. + + Parameters + ---------- + uncensored : array_like, 1D + Uncensored observations. + left : array_like, 1D + Left-censored observations. + right : array_like, 1D + Right-censored observations. + interval : array_like, 2D, with shape (m, 2) + Interval-censored observations. Each row ``interval[k, :]`` + represents the interval for the kth interval-censored observation. + + Notes + ----- + In the input array `interval`, the lower bound of the interval may + be ``-inf``, and the upper bound may be ``inf``, but at least one must be + finite. When the lower bound is ``-inf``, the row represents a left- + censored observation, and when the upper bound is ``inf``, the row + represents a right-censored observation. If the length of an interval + is 0 (i.e. ``interval[k, 0] == interval[k, 1]``, the observation is + treated as uncensored. So one can represent all the types of censored + and uncensored data in ``interval``, but it is generally more convenient + to use `uncensored`, `left` and `right` for uncensored, left-censored and + right-censored observations, respectively. + + Examples + -------- + In the most general case, a censored data set may contain values that + are left-censored, right-censored, interval-censored, and uncensored. + For example, here we create a data set with five observations. Two + are uncensored (values 1 and 1.5), one is a left-censored observation + of 0, one is a right-censored observation of 10 and one is + interval-censored in the interval [2, 3]. + + >>> import numpy as np + >>> from scipy.stats import CensoredData + >>> data = CensoredData(uncensored=[1, 1.5], left=[0], right=[10], + ... interval=[[2, 3]]) + >>> print(data) + CensoredData(5 values: 2 not censored, 1 left-censored, + 1 right-censored, 1 interval-censored) + + Equivalently, + + >>> data = CensoredData(interval=[[1, 1], + ... [1.5, 1.5], + ... [-np.inf, 0], + ... [10, np.inf], + ... [2, 3]]) + >>> print(data) + CensoredData(5 values: 2 not censored, 1 left-censored, + 1 right-censored, 1 interval-censored) + + A common case is to have a mix of uncensored observations and censored + observations that are all right-censored (or all left-censored). For + example, consider an experiment in which six devices are started at + various times and left running until they fail. Assume that time is + measured in hours, and the experiment is stopped after 30 hours, even + if all the devices have not failed by that time. We might end up with + data such as this:: + + Device Start-time Fail-time Time-to-failure + 1 0 13 13 + 2 2 24 22 + 3 5 22 17 + 4 8 23 15 + 5 10 *** >20 + 6 12 *** >18 + + Two of the devices had not failed when the experiment was stopped; + the observations of the time-to-failure for these two devices are + right-censored. We can represent this data with + + >>> data = CensoredData(uncensored=[13, 22, 17, 15], right=[20, 18]) + >>> print(data) + CensoredData(6 values: 4 not censored, 2 right-censored) + + Alternatively, we can use the method `CensoredData.right_censored` to + create a representation of this data. The time-to-failure observations + are put the list ``ttf``. The ``censored`` list indicates which values + in ``ttf`` are censored. + + >>> ttf = [13, 22, 17, 15, 20, 18] + >>> censored = [False, False, False, False, True, True] + + Pass these lists to `CensoredData.right_censored` to create an + instance of `CensoredData`. + + >>> data = CensoredData.right_censored(ttf, censored) + >>> print(data) + CensoredData(6 values: 4 not censored, 2 right-censored) + + If the input data is interval censored and already stored in two + arrays, one holding the low end of the intervals and another + holding the high ends, the class method ``interval_censored`` can + be used to create the `CensoredData` instance. + + This example creates an instance with four interval-censored values. + The intervals are [10, 11], [0.5, 1], [2, 3], and [12.5, 13.5]. + + >>> a = [10, 0.5, 2, 12.5] # Low ends of the intervals + >>> b = [11, 1.0, 3, 13.5] # High ends of the intervals + >>> data = CensoredData.interval_censored(low=a, high=b) + >>> print(data) + CensoredData(4 values: 0 not censored, 4 interval-censored) + + Finally, we create and censor some data from the `weibull_min` + distribution, and then fit `weibull_min` to that data. We'll assume + that the location parameter is known to be 0. + + >>> from scipy.stats import weibull_min + >>> rng = np.random.default_rng() + + Create the random data set. + + >>> x = weibull_min.rvs(2.5, loc=0, scale=30, size=250, random_state=rng) + >>> x[x > 40] = 40 # Right-censor values greater or equal to 40. + + Create the `CensoredData` instance with the `right_censored` method. + The censored values are those where the value is 40. + + >>> data = CensoredData.right_censored(x, x == 40) + >>> print(data) + CensoredData(250 values: 215 not censored, 35 right-censored) + + 35 values have been right-censored. + + Fit `weibull_min` to the censored data. We expect to shape and scale + to be approximately 2.5 and 30, respectively. + + >>> weibull_min.fit(data, floc=0) + (2.3575922823897315, 0, 30.40650074451254) + + """ + + def __init__(self, uncensored=None, *, left=None, right=None, + interval=None): + if uncensored is None: + uncensored = [] + if left is None: + left = [] + if right is None: + right = [] + if interval is None: + interval = np.empty((0, 2)) + + _validate_1d(uncensored, 'uncensored') + _validate_1d(left, 'left') + _validate_1d(right, 'right') + uncensored2, left2, right2, interval2 = _validate_interval(interval) + + self._uncensored = np.concatenate((uncensored, uncensored2)) + self._left = np.concatenate((left, left2)) + self._right = np.concatenate((right, right2)) + # Note that by construction, the private attribute _interval + # will be a 2D array that contains only finite values representing + # intervals with nonzero but finite length. + self._interval = interval2 + + def __repr__(self): + uncensored_str = " ".join(np.array_repr(self._uncensored).split()) + left_str = " ".join(np.array_repr(self._left).split()) + right_str = " ".join(np.array_repr(self._right).split()) + interval_str = " ".join(np.array_repr(self._interval).split()) + return (f"CensoredData(uncensored={uncensored_str}, left={left_str}, " + f"right={right_str}, interval={interval_str})") + + def __str__(self): + num_nc = len(self._uncensored) + num_lc = len(self._left) + num_rc = len(self._right) + num_ic = len(self._interval) + n = num_nc + num_lc + num_rc + num_ic + parts = [f'{num_nc} not censored'] + if num_lc > 0: + parts.append(f'{num_lc} left-censored') + if num_rc > 0: + parts.append(f'{num_rc} right-censored') + if num_ic > 0: + parts.append(f'{num_ic} interval-censored') + return f'CensoredData({n} values: ' + ', '.join(parts) + ')' + + # This is not a complete implementation of the arithmetic operators. + # All we need is subtracting a scalar and dividing by a scalar. + + def __sub__(self, other): + return CensoredData(uncensored=self._uncensored - other, + left=self._left - other, + right=self._right - other, + interval=self._interval - other) + + def __truediv__(self, other): + return CensoredData(uncensored=self._uncensored / other, + left=self._left / other, + right=self._right / other, + interval=self._interval / other) + + def __len__(self): + """ + The number of values (censored and not censored). + """ + return (len(self._uncensored) + len(self._left) + len(self._right) + + len(self._interval)) + + def num_censored(self): + """ + Number of censored values. + """ + return len(self._left) + len(self._right) + len(self._interval) + + @classmethod + def right_censored(cls, x, censored): + """ + Create a `CensoredData` instance of right-censored data. + + Parameters + ---------- + x : array_like + `x` is the array of observed data or measurements. + `x` must be a one-dimensional sequence of finite numbers. + censored : array_like of bool + `censored` must be a one-dimensional sequence of boolean + values. If ``censored[k]`` is True, the corresponding value + in `x` is right-censored. That is, the value ``x[k]`` + is the lower bound of the true (but unknown) value. + + Returns + ------- + data : `CensoredData` + An instance of `CensoredData` that represents the + collection of uncensored and right-censored values. + + Examples + -------- + >>> from scipy.stats import CensoredData + + Two uncensored values (4 and 10) and two right-censored values + (24 and 25). + + >>> data = CensoredData.right_censored([4, 10, 24, 25], + ... [False, False, True, True]) + >>> data + CensoredData(uncensored=array([ 4., 10.]), + left=array([], dtype=float64), right=array([24., 25.]), + interval=array([], shape=(0, 2), dtype=float64)) + >>> print(data) + CensoredData(4 values: 2 not censored, 2 right-censored) + """ + x, censored = _validate_x_censored(x, censored) + return cls(uncensored=x[~censored], right=x[censored]) + + @classmethod + def left_censored(cls, x, censored): + """ + Create a `CensoredData` instance of left-censored data. + + Parameters + ---------- + x : array_like + `x` is the array of observed data or measurements. + `x` must be a one-dimensional sequence of finite numbers. + censored : array_like of bool + `censored` must be a one-dimensional sequence of boolean + values. If ``censored[k]`` is True, the corresponding value + in `x` is left-censored. That is, the value ``x[k]`` + is the upper bound of the true (but unknown) value. + + Returns + ------- + data : `CensoredData` + An instance of `CensoredData` that represents the + collection of uncensored and left-censored values. + + Examples + -------- + >>> from scipy.stats import CensoredData + + Two uncensored values (0.12 and 0.033) and two left-censored values + (both 1e-3). + + >>> data = CensoredData.left_censored([0.12, 0.033, 1e-3, 1e-3], + ... [False, False, True, True]) + >>> data + CensoredData(uncensored=array([0.12 , 0.033]), + left=array([0.001, 0.001]), right=array([], dtype=float64), + interval=array([], shape=(0, 2), dtype=float64)) + >>> print(data) + CensoredData(4 values: 2 not censored, 2 left-censored) + """ + x, censored = _validate_x_censored(x, censored) + return cls(uncensored=x[~censored], left=x[censored]) + + @classmethod + def interval_censored(cls, low, high): + """ + Create a `CensoredData` instance of interval-censored data. + + This method is useful when all the data is interval-censored, and + the low and high ends of the intervals are already stored in + separate one-dimensional arrays. + + Parameters + ---------- + low : array_like + The one-dimensional array containing the low ends of the + intervals. + high : array_like + The one-dimensional array containing the high ends of the + intervals. + + Returns + ------- + data : `CensoredData` + An instance of `CensoredData` that represents the + collection of censored values. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import CensoredData + + ``a`` and ``b`` are the low and high ends of a collection of + interval-censored values. + + >>> a = [0.5, 2.0, 3.0, 5.5] + >>> b = [1.0, 2.5, 3.5, 7.0] + >>> data = CensoredData.interval_censored(low=a, high=b) + >>> print(data) + CensoredData(4 values: 0 not censored, 4 interval-censored) + """ + _validate_1d(low, 'low', allow_inf=True) + _validate_1d(high, 'high', allow_inf=True) + if len(low) != len(high): + raise ValueError('`low` and `high` must have the same length.') + interval = np.column_stack((low, high)) + uncensored, left, right, interval = _validate_interval(interval) + return cls(uncensored=uncensored, left=left, right=right, + interval=interval) + + def _uncensor(self): + """ + This function is used when a non-censored version of the data + is needed to create a rough estimate of the parameters of a + distribution via the method of moments or some similar method. + The data is "uncensored" by taking the given endpoints as the + data for the left- or right-censored data, and the mean for the + interval-censored data. + """ + data = np.concatenate((self._uncensored, self._left, self._right, + self._interval.mean(axis=1))) + return data + + def _supported(self, a, b): + """ + Return a subset of self containing the values that are in + (or overlap with) the interval (a, b). + """ + uncensored = self._uncensored + uncensored = uncensored[(a < uncensored) & (uncensored < b)] + left = self._left + left = left[a < left] + right = self._right + right = right[right < b] + interval = self._interval + interval = interval[(a < interval[:, 1]) & (interval[:, 0] < b)] + return CensoredData(uncensored, left=left, right=right, + interval=interval) diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_constants.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..374fadda992e135c025a658e9f9dcec9263c0eb9 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_constants.py @@ -0,0 +1,39 @@ +""" +Statistics-related constants. + +""" +import numpy as np + + +# The smallest representable positive number such that 1.0 + _EPS != 1.0. +_EPS = np.finfo(float).eps + +# The largest [in magnitude] usable floating value. +_XMAX = np.finfo(float).max + +# The log of the largest usable floating value; useful for knowing +# when exp(something) will overflow +_LOGXMAX = np.log(_XMAX) + +# The smallest [in magnitude] usable (i.e. not subnormal) double precision +# floating value. +_XMIN = np.finfo(float).tiny + +# The log of the smallest [in magnitude] usable (i.e not subnormal) +# double precision floating value. +_LOGXMIN = np.log(_XMIN) + +# -special.psi(1) +_EULER = 0.577215664901532860606512090082402431042 + +# special.zeta(3, 1) Apery's constant +_ZETA3 = 1.202056903159594285399738161511449990765 + +# sqrt(pi) +_SQRT_PI = 1.772453850905516027298167483341145182798 + +# sqrt(2/pi) +_SQRT_2_OVER_PI = 0.7978845608028654 + +# log(sqrt(2/pi)) +_LOG_SQRT_2_OVER_PI = -0.22579135264472744 diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_covariance.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..2dde85d0bac9c5f38e9a97ca71194132f3a865af --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_covariance.py @@ -0,0 +1,633 @@ +from functools import cached_property + +import numpy as np +from scipy import linalg +from scipy.stats import _multivariate + + +__all__ = ["Covariance"] + + +class Covariance: + """ + Representation of a covariance matrix + + Calculations involving covariance matrices (e.g. data whitening, + multivariate normal function evaluation) are often performed more + efficiently using a decomposition of the covariance matrix instead of the + covariance matrix itself. This class allows the user to construct an + object representing a covariance matrix using any of several + decompositions and perform calculations using a common interface. + + .. note:: + + The `Covariance` class cannot be instantiated directly. Instead, use + one of the factory methods (e.g. `Covariance.from_diagonal`). + + Examples + -------- + The `Covariance` class is used by calling one of its + factory methods to create a `Covariance` object, then pass that + representation of the `Covariance` matrix as a shape parameter of a + multivariate distribution. + + For instance, the multivariate normal distribution can accept an array + representing a covariance matrix: + + >>> from scipy import stats + >>> import numpy as np + >>> d = [1, 2, 3] + >>> A = np.diag(d) # a diagonal covariance matrix + >>> x = [4, -2, 5] # a point of interest + >>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=A) + >>> dist.pdf(x) + 4.9595685102808205e-08 + + but the calculations are performed in a very generic way that does not + take advantage of any special properties of the covariance matrix. Because + our covariance matrix is diagonal, we can use ``Covariance.from_diagonal`` + to create an object representing the covariance matrix, and + `multivariate_normal` can use this to compute the probability density + function more efficiently. + + >>> cov = stats.Covariance.from_diagonal(d) + >>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=cov) + >>> dist.pdf(x) + 4.9595685102808205e-08 + + """ + def __init__(self): + message = ("The `Covariance` class cannot be instantiated directly. " + "Please use one of the factory methods " + "(e.g. `Covariance.from_diagonal`).") + raise NotImplementedError(message) + + @staticmethod + def from_diagonal(diagonal): + r""" + Return a representation of a covariance matrix from its diagonal. + + Parameters + ---------- + diagonal : array_like + The diagonal elements of a diagonal matrix. + + Notes + ----- + Let the diagonal elements of a diagonal covariance matrix :math:`D` be + stored in the vector :math:`d`. + + When all elements of :math:`d` are strictly positive, whitening of a + data point :math:`x` is performed by computing + :math:`x \cdot d^{-1/2}`, where the inverse square root can be taken + element-wise. + :math:`\log\det{D}` is calculated as :math:`-2 \sum(\log{d})`, + where the :math:`\log` operation is performed element-wise. + + This `Covariance` class supports singular covariance matrices. When + computing ``_log_pdet``, non-positive elements of :math:`d` are + ignored. Whitening is not well defined when the point to be whitened + does not lie in the span of the columns of the covariance matrix. The + convention taken here is to treat the inverse square root of + non-positive elements of :math:`d` as zeros. + + Examples + -------- + Prepare a symmetric positive definite covariance matrix ``A`` and a + data point ``x``. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> n = 5 + >>> A = np.diag(rng.random(n)) + >>> x = rng.random(size=n) + + Extract the diagonal from ``A`` and create the `Covariance` object. + + >>> d = np.diag(A) + >>> cov = stats.Covariance.from_diagonal(d) + + Compare the functionality of the `Covariance` object against a + reference implementations. + + >>> res = cov.whiten(x) + >>> ref = np.diag(d**-0.5) @ x + >>> np.allclose(res, ref) + True + >>> res = cov.log_pdet + >>> ref = np.linalg.slogdet(A)[-1] + >>> np.allclose(res, ref) + True + + """ + return CovViaDiagonal(diagonal) + + @staticmethod + def from_precision(precision, covariance=None): + r""" + Return a representation of a covariance from its precision matrix. + + Parameters + ---------- + precision : array_like + The precision matrix; that is, the inverse of a square, symmetric, + positive definite covariance matrix. + covariance : array_like, optional + The square, symmetric, positive definite covariance matrix. If not + provided, this may need to be calculated (e.g. to evaluate the + cumulative distribution function of + `scipy.stats.multivariate_normal`) by inverting `precision`. + + Notes + ----- + Let the covariance matrix be :math:`A`, its precision matrix be + :math:`P = A^{-1}`, and :math:`L` be the lower Cholesky factor such + that :math:`L L^T = P`. + Whitening of a data point :math:`x` is performed by computing + :math:`x^T L`. :math:`\log\det{A}` is calculated as + :math:`-2tr(\log{L})`, where the :math:`\log` operation is performed + element-wise. + + This `Covariance` class does not support singular covariance matrices + because the precision matrix does not exist for a singular covariance + matrix. + + Examples + -------- + Prepare a symmetric positive definite precision matrix ``P`` and a + data point ``x``. (If the precision matrix is not already available, + consider the other factory methods of the `Covariance` class.) + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> n = 5 + >>> P = rng.random(size=(n, n)) + >>> P = P @ P.T # a precision matrix must be positive definite + >>> x = rng.random(size=n) + + Create the `Covariance` object. + + >>> cov = stats.Covariance.from_precision(P) + + Compare the functionality of the `Covariance` object against + reference implementations. + + >>> res = cov.whiten(x) + >>> ref = x @ np.linalg.cholesky(P) + >>> np.allclose(res, ref) + True + >>> res = cov.log_pdet + >>> ref = -np.linalg.slogdet(P)[-1] + >>> np.allclose(res, ref) + True + + """ + return CovViaPrecision(precision, covariance) + + @staticmethod + def from_cholesky(cholesky): + r""" + Representation of a covariance provided via the (lower) Cholesky factor + + Parameters + ---------- + cholesky : array_like + The lower triangular Cholesky factor of the covariance matrix. + + Notes + ----- + Let the covariance matrix be :math:`A` and :math:`L` be the lower + Cholesky factor such that :math:`L L^T = A`. + Whitening of a data point :math:`x` is performed by computing + :math:`L^{-1} x`. :math:`\log\det{A}` is calculated as + :math:`2tr(\log{L})`, where the :math:`\log` operation is performed + element-wise. + + This `Covariance` class does not support singular covariance matrices + because the Cholesky decomposition does not exist for a singular + covariance matrix. + + Examples + -------- + Prepare a symmetric positive definite covariance matrix ``A`` and a + data point ``x``. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> n = 5 + >>> A = rng.random(size=(n, n)) + >>> A = A @ A.T # make the covariance symmetric positive definite + >>> x = rng.random(size=n) + + Perform the Cholesky decomposition of ``A`` and create the + `Covariance` object. + + >>> L = np.linalg.cholesky(A) + >>> cov = stats.Covariance.from_cholesky(L) + + Compare the functionality of the `Covariance` object against + reference implementation. + + >>> from scipy.linalg import solve_triangular + >>> res = cov.whiten(x) + >>> ref = solve_triangular(L, x, lower=True) + >>> np.allclose(res, ref) + True + >>> res = cov.log_pdet + >>> ref = np.linalg.slogdet(A)[-1] + >>> np.allclose(res, ref) + True + + """ + return CovViaCholesky(cholesky) + + @staticmethod + def from_eigendecomposition(eigendecomposition): + r""" + Representation of a covariance provided via eigendecomposition + + Parameters + ---------- + eigendecomposition : sequence + A sequence (nominally a tuple) containing the eigenvalue and + eigenvector arrays as computed by `scipy.linalg.eigh` or + `numpy.linalg.eigh`. + + Notes + ----- + Let the covariance matrix be :math:`A`, let :math:`V` be matrix of + eigenvectors, and let :math:`W` be the diagonal matrix of eigenvalues + such that `V W V^T = A`. + + When all of the eigenvalues are strictly positive, whitening of a + data point :math:`x` is performed by computing + :math:`x^T (V W^{-1/2})`, where the inverse square root can be taken + element-wise. + :math:`\log\det{A}` is calculated as :math:`tr(\log{W})`, + where the :math:`\log` operation is performed element-wise. + + This `Covariance` class supports singular covariance matrices. When + computing ``_log_pdet``, non-positive eigenvalues are ignored. + Whitening is not well defined when the point to be whitened + does not lie in the span of the columns of the covariance matrix. The + convention taken here is to treat the inverse square root of + non-positive eigenvalues as zeros. + + Examples + -------- + Prepare a symmetric positive definite covariance matrix ``A`` and a + data point ``x``. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> n = 5 + >>> A = rng.random(size=(n, n)) + >>> A = A @ A.T # make the covariance symmetric positive definite + >>> x = rng.random(size=n) + + Perform the eigendecomposition of ``A`` and create the `Covariance` + object. + + >>> w, v = np.linalg.eigh(A) + >>> cov = stats.Covariance.from_eigendecomposition((w, v)) + + Compare the functionality of the `Covariance` object against + reference implementations. + + >>> res = cov.whiten(x) + >>> ref = x @ (v @ np.diag(w**-0.5)) + >>> np.allclose(res, ref) + True + >>> res = cov.log_pdet + >>> ref = np.linalg.slogdet(A)[-1] + >>> np.allclose(res, ref) + True + + """ + return CovViaEigendecomposition(eigendecomposition) + + def whiten(self, x): + """ + Perform a whitening transformation on data. + + "Whitening" ("white" as in "white noise", in which each frequency has + equal magnitude) transforms a set of random variables into a new set of + random variables with unit-diagonal covariance. When a whitening + transform is applied to a sample of points distributed according to + a multivariate normal distribution with zero mean, the covariance of + the transformed sample is approximately the identity matrix. + + Parameters + ---------- + x : array_like + An array of points. The last dimension must correspond with the + dimensionality of the space, i.e., the number of columns in the + covariance matrix. + + Returns + ------- + x_ : array_like + The transformed array of points. + + References + ---------- + .. [1] "Whitening Transformation". Wikipedia. + https://en.wikipedia.org/wiki/Whitening_transformation + .. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of + coloring linear transformation". Transactions of VSB 18.2 + (2018): 31-35. :doi:`10.31490/tces-2018-0013` + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> n = 3 + >>> A = rng.random(size=(n, n)) + >>> cov_array = A @ A.T # make matrix symmetric positive definite + >>> precision = np.linalg.inv(cov_array) + >>> cov_object = stats.Covariance.from_precision(precision) + >>> x = rng.multivariate_normal(np.zeros(n), cov_array, size=(10000)) + >>> x_ = cov_object.whiten(x) + >>> np.cov(x_, rowvar=False) # near-identity covariance + array([[0.97862122, 0.00893147, 0.02430451], + [0.00893147, 0.96719062, 0.02201312], + [0.02430451, 0.02201312, 0.99206881]]) + + """ + return self._whiten(np.asarray(x)) + + def colorize(self, x): + """ + Perform a colorizing transformation on data. + + "Colorizing" ("color" as in "colored noise", in which different + frequencies may have different magnitudes) transforms a set of + uncorrelated random variables into a new set of random variables with + the desired covariance. When a coloring transform is applied to a + sample of points distributed according to a multivariate normal + distribution with identity covariance and zero mean, the covariance of + the transformed sample is approximately the covariance matrix used + in the coloring transform. + + Parameters + ---------- + x : array_like + An array of points. The last dimension must correspond with the + dimensionality of the space, i.e., the number of columns in the + covariance matrix. + + Returns + ------- + x_ : array_like + The transformed array of points. + + References + ---------- + .. [1] "Whitening Transformation". Wikipedia. + https://en.wikipedia.org/wiki/Whitening_transformation + .. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of + coloring linear transformation". Transactions of VSB 18.2 + (2018): 31-35. :doi:`10.31490/tces-2018-0013` + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng(1638083107694713882823079058616272161) + >>> n = 3 + >>> A = rng.random(size=(n, n)) + >>> cov_array = A @ A.T # make matrix symmetric positive definite + >>> cholesky = np.linalg.cholesky(cov_array) + >>> cov_object = stats.Covariance.from_cholesky(cholesky) + >>> x = rng.multivariate_normal(np.zeros(n), np.eye(n), size=(10000)) + >>> x_ = cov_object.colorize(x) + >>> cov_data = np.cov(x_, rowvar=False) + >>> np.allclose(cov_data, cov_array, rtol=3e-2) + True + """ + return self._colorize(np.asarray(x)) + + @property + def log_pdet(self): + """ + Log of the pseudo-determinant of the covariance matrix + """ + return np.array(self._log_pdet, dtype=float)[()] + + @property + def rank(self): + """ + Rank of the covariance matrix + """ + return np.array(self._rank, dtype=int)[()] + + @property + def covariance(self): + """ + Explicit representation of the covariance matrix + """ + return self._covariance + + @property + def shape(self): + """ + Shape of the covariance array + """ + return self._shape + + def _validate_matrix(self, A, name): + A = np.atleast_2d(A) + m, n = A.shape[-2:] + if m != n or A.ndim != 2 or not (np.issubdtype(A.dtype, np.integer) or + np.issubdtype(A.dtype, np.floating)): + message = (f"The input `{name}` must be a square, " + "two-dimensional array of real numbers.") + raise ValueError(message) + return A + + def _validate_vector(self, A, name): + A = np.atleast_1d(A) + if A.ndim != 1 or not (np.issubdtype(A.dtype, np.integer) or + np.issubdtype(A.dtype, np.floating)): + message = (f"The input `{name}` must be a one-dimensional array " + "of real numbers.") + raise ValueError(message) + return A + + +class CovViaPrecision(Covariance): + + def __init__(self, precision, covariance=None): + precision = self._validate_matrix(precision, 'precision') + if covariance is not None: + covariance = self._validate_matrix(covariance, 'covariance') + message = "`precision.shape` must equal `covariance.shape`." + if precision.shape != covariance.shape: + raise ValueError(message) + + self._chol_P = np.linalg.cholesky(precision) + self._log_pdet = -2*np.log(np.diag(self._chol_P)).sum(axis=-1) + self._rank = precision.shape[-1] # must be full rank if invertible + self._precision = precision + self._cov_matrix = covariance + self._shape = precision.shape + self._allow_singular = False + + def _whiten(self, x): + return x @ self._chol_P + + @cached_property + def _covariance(self): + n = self._shape[-1] + return (linalg.cho_solve((self._chol_P, True), np.eye(n)) + if self._cov_matrix is None else self._cov_matrix) + + def _colorize(self, x): + return linalg.solve_triangular(self._chol_P.T, x.T, lower=False).T + + +def _dot_diag(x, d): + # If d were a full diagonal matrix, x @ d would always do what we want. + # Special treatment is needed for n-dimensional `d` in which each row + # includes only the diagonal elements of a covariance matrix. + return x * d if x.ndim < 2 else x * np.expand_dims(d, -2) + + +class CovViaDiagonal(Covariance): + + def __init__(self, diagonal): + diagonal = self._validate_vector(diagonal, 'diagonal') + + i_zero = diagonal <= 0 + positive_diagonal = np.array(diagonal, dtype=np.float64) + + positive_diagonal[i_zero] = 1 # ones don't affect determinant + self._log_pdet = np.sum(np.log(positive_diagonal), axis=-1) + + psuedo_reciprocals = 1 / np.sqrt(positive_diagonal) + psuedo_reciprocals[i_zero] = 0 + + self._sqrt_diagonal = np.sqrt(diagonal) + self._LP = psuedo_reciprocals + self._rank = positive_diagonal.shape[-1] - i_zero.sum(axis=-1) + self._covariance = np.apply_along_axis(np.diag, -1, diagonal) + self._i_zero = i_zero + self._shape = self._covariance.shape + self._allow_singular = True + + def _whiten(self, x): + return _dot_diag(x, self._LP) + + def _colorize(self, x): + return _dot_diag(x, self._sqrt_diagonal) + + def _support_mask(self, x): + """ + Check whether x lies in the support of the distribution. + """ + return ~np.any(_dot_diag(x, self._i_zero), axis=-1) + + +class CovViaCholesky(Covariance): + + def __init__(self, cholesky): + L = self._validate_matrix(cholesky, 'cholesky') + + self._factor = L + self._log_pdet = 2*np.log(np.diag(self._factor)).sum(axis=-1) + self._rank = L.shape[-1] # must be full rank for cholesky + self._shape = L.shape + self._allow_singular = False + + @cached_property + def _covariance(self): + return self._factor @ self._factor.T + + def _whiten(self, x): + res = linalg.solve_triangular(self._factor, x.T, lower=True).T + return res + + def _colorize(self, x): + return x @ self._factor.T + + +class CovViaEigendecomposition(Covariance): + + def __init__(self, eigendecomposition): + eigenvalues, eigenvectors = eigendecomposition + eigenvalues = self._validate_vector(eigenvalues, 'eigenvalues') + eigenvectors = self._validate_matrix(eigenvectors, 'eigenvectors') + message = ("The shapes of `eigenvalues` and `eigenvectors` " + "must be compatible.") + try: + eigenvalues = np.expand_dims(eigenvalues, -2) + eigenvectors, eigenvalues = np.broadcast_arrays(eigenvectors, + eigenvalues) + eigenvalues = eigenvalues[..., 0, :] + except ValueError: + raise ValueError(message) + + i_zero = eigenvalues <= 0 + positive_eigenvalues = np.array(eigenvalues, dtype=np.float64) + + positive_eigenvalues[i_zero] = 1 # ones don't affect determinant + self._log_pdet = np.sum(np.log(positive_eigenvalues), axis=-1) + + psuedo_reciprocals = 1 / np.sqrt(positive_eigenvalues) + psuedo_reciprocals[i_zero] = 0 + + self._LP = eigenvectors * psuedo_reciprocals + self._LA = eigenvectors * np.sqrt(eigenvalues) + self._rank = positive_eigenvalues.shape[-1] - i_zero.sum(axis=-1) + self._w = eigenvalues + self._v = eigenvectors + self._shape = eigenvectors.shape + self._null_basis = eigenvectors * i_zero + # This is only used for `_support_mask`, not to decide whether + # the covariance is singular or not. + self._eps = _multivariate._eigvalsh_to_eps(eigenvalues) * 10**3 + self._allow_singular = True + + def _whiten(self, x): + return x @ self._LP + + def _colorize(self, x): + return x @ self._LA.T + + @cached_property + def _covariance(self): + return (self._v * self._w) @ self._v.T + + def _support_mask(self, x): + """ + Check whether x lies in the support of the distribution. + """ + residual = np.linalg.norm(x @ self._null_basis, axis=-1) + in_support = residual < self._eps + return in_support + + +class CovViaPSD(Covariance): + """ + Representation of a covariance provided via an instance of _PSD + """ + + def __init__(self, psd): + self._LP = psd.U + self._log_pdet = psd.log_pdet + self._rank = psd.rank + self._covariance = psd._M + self._shape = psd._M.shape + self._psd = psd + self._allow_singular = False # by default + + def _whiten(self, x): + return x @ self._LP + + def _support_mask(self, x): + return self._psd._support_mask(x) diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_crosstab.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_crosstab.py new file mode 100644 index 0000000000000000000000000000000000000000..6c267ff85eafca6b805909d8fbc241f03cd4acbc --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_crosstab.py @@ -0,0 +1,204 @@ +import numpy as np +from scipy.sparse import coo_matrix +from scipy._lib._bunch import _make_tuple_bunch + + +CrosstabResult = _make_tuple_bunch( + "CrosstabResult", ["elements", "count"] +) + + +def crosstab(*args, levels=None, sparse=False): + """ + Return table of counts for each possible unique combination in ``*args``. + + When ``len(args) > 1``, the array computed by this function is + often referred to as a *contingency table* [1]_. + + The arguments must be sequences with the same length. The second return + value, `count`, is an integer array with ``len(args)`` dimensions. If + `levels` is None, the shape of `count` is ``(n0, n1, ...)``, where ``nk`` + is the number of unique elements in ``args[k]``. + + Parameters + ---------- + *args : sequences + A sequence of sequences whose unique aligned elements are to be + counted. The sequences in args must all be the same length. + levels : sequence, optional + If `levels` is given, it must be a sequence that is the same length as + `args`. Each element in `levels` is either a sequence or None. If it + is a sequence, it gives the values in the corresponding sequence in + `args` that are to be counted. If any value in the sequences in `args` + does not occur in the corresponding sequence in `levels`, that value + is ignored and not counted in the returned array `count`. The default + value of `levels` for ``args[i]`` is ``np.unique(args[i])`` + sparse : bool, optional + If True, return a sparse matrix. The matrix will be an instance of + the `scipy.sparse.coo_matrix` class. Because SciPy's sparse matrices + must be 2-d, only two input sequences are allowed when `sparse` is + True. Default is False. + + Returns + ------- + res : CrosstabResult + An object containing the following attributes: + + elements : tuple of numpy.ndarrays. + Tuple of length ``len(args)`` containing the arrays of elements + that are counted in `count`. These can be interpreted as the + labels of the corresponding dimensions of `count`. If `levels` was + given, then if ``levels[i]`` is not None, ``elements[i]`` will + hold the values given in ``levels[i]``. + count : numpy.ndarray or scipy.sparse.coo_matrix + Counts of the unique elements in ``zip(*args)``, stored in an + array. Also known as a *contingency table* when ``len(args) > 1``. + + See Also + -------- + numpy.unique + + Notes + ----- + .. versionadded:: 1.7.0 + + References + ---------- + .. [1] "Contingency table", http://en.wikipedia.org/wiki/Contingency_table + + Examples + -------- + >>> from scipy.stats.contingency import crosstab + + Given the lists `a` and `x`, create a contingency table that counts the + frequencies of the corresponding pairs. + + >>> a = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B'] + >>> x = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z'] + >>> res = crosstab(a, x) + >>> avals, xvals = res.elements + >>> avals + array(['A', 'B'], dtype='>> xvals + array(['X', 'Y', 'Z'], dtype='>> res.count + array([[2, 3, 0], + [1, 0, 4]]) + + So `('A', 'X')` occurs twice, `('A', 'Y')` occurs three times, etc. + + Higher dimensional contingency tables can be created. + + >>> p = [0, 0, 0, 0, 1, 1, 1, 0, 0, 1] + >>> res = crosstab(a, x, p) + >>> res.count + array([[[2, 0], + [2, 1], + [0, 0]], + [[1, 0], + [0, 0], + [1, 3]]]) + >>> res.count.shape + (2, 3, 2) + + The values to be counted can be set by using the `levels` argument. + It allows the elements of interest in each input sequence to be + given explicitly instead finding the unique elements of the sequence. + + For example, suppose one of the arguments is an array containing the + answers to a survey question, with integer values 1 to 4. Even if the + value 1 does not occur in the data, we want an entry for it in the table. + + >>> q1 = [2, 3, 3, 2, 4, 4, 2, 3, 4, 4, 4, 3, 3, 3, 4] # 1 does not occur. + >>> q2 = [4, 4, 2, 2, 2, 4, 1, 1, 2, 2, 4, 2, 2, 2, 4] # 3 does not occur. + >>> options = [1, 2, 3, 4] + >>> res = crosstab(q1, q2, levels=(options, options)) + >>> res.count + array([[0, 0, 0, 0], + [1, 1, 0, 1], + [1, 4, 0, 1], + [0, 3, 0, 3]]) + + If `levels` is given, but an element of `levels` is None, the unique values + of the corresponding argument are used. For example, + + >>> res = crosstab(q1, q2, levels=(None, options)) + >>> res.elements + [array([2, 3, 4]), [1, 2, 3, 4]] + >>> res.count + array([[1, 1, 0, 1], + [1, 4, 0, 1], + [0, 3, 0, 3]]) + + If we want to ignore the pairs where 4 occurs in ``q2``, we can + give just the values [1, 2] to `levels`, and the 4 will be ignored: + + >>> res = crosstab(q1, q2, levels=(None, [1, 2])) + >>> res.elements + [array([2, 3, 4]), [1, 2]] + >>> res.count + array([[1, 1], + [1, 4], + [0, 3]]) + + Finally, let's repeat the first example, but return a sparse matrix: + + >>> res = crosstab(a, x, sparse=True) + >>> res.count + + >>> res.count.toarray() + array([[2, 3, 0], + [1, 0, 4]]) + + """ + nargs = len(args) + if nargs == 0: + raise TypeError("At least one input sequence is required.") + + len0 = len(args[0]) + if not all(len(a) == len0 for a in args[1:]): + raise ValueError("All input sequences must have the same length.") + + if sparse and nargs != 2: + raise ValueError("When `sparse` is True, only two input sequences " + "are allowed.") + + if levels is None: + # Call np.unique with return_inverse=True on each argument. + actual_levels, indices = zip(*[np.unique(a, return_inverse=True) + for a in args]) + else: + # `levels` is not None... + if len(levels) != nargs: + raise ValueError('len(levels) must equal the number of input ' + 'sequences') + + args = [np.asarray(arg) for arg in args] + mask = np.zeros((nargs, len0), dtype=np.bool_) + inv = np.zeros((nargs, len0), dtype=np.intp) + actual_levels = [] + for k, (levels_list, arg) in enumerate(zip(levels, args)): + if levels_list is None: + levels_list, inv[k, :] = np.unique(arg, return_inverse=True) + mask[k, :] = True + else: + q = arg == np.asarray(levels_list).reshape(-1, 1) + mask[k, :] = np.any(q, axis=0) + qnz = q.T.nonzero() + inv[k, qnz[0]] = qnz[1] + actual_levels.append(levels_list) + + mask_all = mask.all(axis=0) + indices = tuple(inv[:, mask_all]) + + if sparse: + count = coo_matrix((np.ones(len(indices[0]), dtype=int), + (indices[0], indices[1]))) + count.sum_duplicates() + else: + shape = [len(u) for u in actual_levels] + count = np.zeros(shape, dtype=int) + np.add.at(count, indices, 1) + + return CrosstabResult(actual_levels, count) diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py new file mode 100644 index 0000000000000000000000000000000000000000..68e8d03dd0aa3fa980d9fa9cb2d1a229d68d07a0 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py @@ -0,0 +1,4137 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +from scipy._lib._util import getfullargspec_no_self as _getfullargspec + +import sys +import keyword +import re +import types +import warnings +from itertools import zip_longest + +from scipy._lib import doccer +from ._distr_params import distcont, distdiscrete +from scipy._lib._util import check_random_state + +from scipy.special import comb, entr + + +# for root finding for continuous distribution ppf, and maximum likelihood +# estimation +from scipy import optimize + +# for functions of continuous distributions (e.g. moments, entropy, cdf) +from scipy import integrate + +# to approximate the pdf of a continuous distribution given its cdf +from scipy._lib._finite_differences import _derivative + +# for scipy.stats.entropy. Attempts to import just that function or file +# have cause import problems +from scipy import stats + +from numpy import (arange, putmask, ones, shape, ndarray, zeros, floor, + logical_and, log, sqrt, place, argmax, vectorize, asarray, + nan, inf, isinf, empty) + +import numpy as np +from ._constants import _XMAX, _LOGXMAX +from ._censored_data import CensoredData +from scipy.stats._warnings_errors import FitError + +# These are the docstring parts used for substitution in specific +# distribution docstrings + +docheaders = {'methods': """\nMethods\n-------\n""", + 'notes': """\nNotes\n-----\n""", + 'examples': """\nExamples\n--------\n"""} + +_doc_rvs = """\ +rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None) + Random variates. +""" +_doc_pdf = """\ +pdf(x, %(shapes)s, loc=0, scale=1) + Probability density function. +""" +_doc_logpdf = """\ +logpdf(x, %(shapes)s, loc=0, scale=1) + Log of the probability density function. +""" +_doc_pmf = """\ +pmf(k, %(shapes)s, loc=0, scale=1) + Probability mass function. +""" +_doc_logpmf = """\ +logpmf(k, %(shapes)s, loc=0, scale=1) + Log of the probability mass function. +""" +_doc_cdf = """\ +cdf(x, %(shapes)s, loc=0, scale=1) + Cumulative distribution function. +""" +_doc_logcdf = """\ +logcdf(x, %(shapes)s, loc=0, scale=1) + Log of the cumulative distribution function. +""" +_doc_sf = """\ +sf(x, %(shapes)s, loc=0, scale=1) + Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate). +""" # noqa: E501 +_doc_logsf = """\ +logsf(x, %(shapes)s, loc=0, scale=1) + Log of the survival function. +""" +_doc_ppf = """\ +ppf(q, %(shapes)s, loc=0, scale=1) + Percent point function (inverse of ``cdf`` --- percentiles). +""" +_doc_isf = """\ +isf(q, %(shapes)s, loc=0, scale=1) + Inverse survival function (inverse of ``sf``). +""" +_doc_moment = """\ +moment(order, %(shapes)s, loc=0, scale=1) + Non-central moment of the specified order. +""" +_doc_stats = """\ +stats(%(shapes)s, loc=0, scale=1, moments='mv') + Mean('m'), variance('v'), skew('s'), and/or kurtosis('k'). +""" +_doc_entropy = """\ +entropy(%(shapes)s, loc=0, scale=1) + (Differential) entropy of the RV. +""" +_doc_fit = """\ +fit(data) + Parameter estimates for generic data. + See `scipy.stats.rv_continuous.fit `__ for detailed documentation of the + keyword arguments. +""" # noqa: E501 +_doc_expect = """\ +expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds) + Expected value of a function (of one argument) with respect to the distribution. +""" # noqa: E501 +_doc_expect_discrete = """\ +expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False) + Expected value of a function (of one argument) with respect to the distribution. +""" +_doc_median = """\ +median(%(shapes)s, loc=0, scale=1) + Median of the distribution. +""" +_doc_mean = """\ +mean(%(shapes)s, loc=0, scale=1) + Mean of the distribution. +""" +_doc_var = """\ +var(%(shapes)s, loc=0, scale=1) + Variance of the distribution. +""" +_doc_std = """\ +std(%(shapes)s, loc=0, scale=1) + Standard deviation of the distribution. +""" +_doc_interval = """\ +interval(confidence, %(shapes)s, loc=0, scale=1) + Confidence interval with equal areas around the median. +""" +_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf, + _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf, + _doc_logsf, _doc_ppf, _doc_isf, _doc_moment, + _doc_stats, _doc_entropy, _doc_fit, + _doc_expect, _doc_median, + _doc_mean, _doc_var, _doc_std, _doc_interval]) + +_doc_default_longsummary = """\ +As an instance of the `rv_continuous` class, `%(name)s` object inherits from it +a collection of generic methods (see below for the full list), +and completes them with details specific for this particular distribution. +""" + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape, +location, and scale parameters returning a "frozen" continuous RV object: + +rv = %(name)s(%(shapes)s, loc=0, scale=1) + - Frozen RV object with the same methods but holding the given shape, + location, and scale fixed. +""" +_doc_default_example = """\ +Examples +-------- +>>> import numpy as np +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate the first four moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability density function (``pdf``): + +>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s), 100) +>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s), +... 'r-', lw=5, alpha=0.6, label='%(name)s pdf') + +Alternatively, the distribution object can be called (as a function) +to fix the shape, location and scale parameters. This returns a "frozen" +RV object holding the given parameters fixed. + +Freeze the distribution and display the frozen ``pdf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') + +Check accuracy of ``cdf`` and ``ppf``: + +>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s) +>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) + +And compare the histogram: + +>>> ax.hist(r, density=True, bins='auto', histtype='stepfilled', alpha=0.2) +>>> ax.set_xlim([x[0], x[-1]]) +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +""" + +_doc_default_locscale = """\ +The probability density above is defined in the "standardized" form. To shift +and/or scale the distribution use the ``loc`` and ``scale`` parameters. +Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically +equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with +``y = (x - loc) / scale``. Note that shifting the location of a distribution +does not make it a "noncentral" distribution; noncentral generalizations of +some distributions are available in separate classes. +""" + +_doc_default = ''.join([_doc_default_longsummary, + _doc_allmethods, + '\n', + _doc_default_example]) + +_doc_default_before_notes = ''.join([_doc_default_longsummary, + _doc_allmethods]) + +docdict = { + 'rvs': _doc_rvs, + 'pdf': _doc_pdf, + 'logpdf': _doc_logpdf, + 'cdf': _doc_cdf, + 'logcdf': _doc_logcdf, + 'sf': _doc_sf, + 'logsf': _doc_logsf, + 'ppf': _doc_ppf, + 'isf': _doc_isf, + 'stats': _doc_stats, + 'entropy': _doc_entropy, + 'fit': _doc_fit, + 'moment': _doc_moment, + 'expect': _doc_expect, + 'interval': _doc_interval, + 'mean': _doc_mean, + 'std': _doc_std, + 'var': _doc_var, + 'median': _doc_median, + 'allmethods': _doc_allmethods, + 'longsummary': _doc_default_longsummary, + 'frozennote': _doc_default_frozen_note, + 'example': _doc_default_example, + 'default': _doc_default, + 'before_notes': _doc_default_before_notes, + 'after_notes': _doc_default_locscale +} + +# Reuse common content between continuous and discrete docs, change some +# minor bits. +docdict_discrete = docdict.copy() + +docdict_discrete['pmf'] = _doc_pmf +docdict_discrete['logpmf'] = _doc_logpmf +docdict_discrete['expect'] = _doc_expect_discrete +_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf', + 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median', + 'mean', 'var', 'std', 'interval'] +for obj in _doc_disc_methods: + docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '') + +_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf'] +for obj in _doc_disc_methods_err_varname: + docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ') + +docdict_discrete.pop('pdf') +docdict_discrete.pop('logpdf') + +_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods]) +docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods + +docdict_discrete['longsummary'] = _doc_default_longsummary.replace( + 'rv_continuous', 'rv_discrete') + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape and +location parameters returning a "frozen" discrete RV object: + +rv = %(name)s(%(shapes)s, loc=0) + - Frozen RV object with the same methods but holding the given shape and + location fixed. +""" +docdict_discrete['frozennote'] = _doc_default_frozen_note + +_doc_default_discrete_example = """\ +Examples +-------- +>>> import numpy as np +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate the first four moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability mass function (``pmf``): + +>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s)) +>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf') +>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5) + +Alternatively, the distribution object can be called (as a function) +to fix the shape and location. This returns a "frozen" RV object holding +the given parameters fixed. + +Freeze the distribution and display the frozen ``pmf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1, +... label='frozen pmf') +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +Check accuracy of ``cdf`` and ``ppf``: + +>>> prob = %(name)s.cdf(x, %(shapes)s) +>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) +""" + + +_doc_default_discrete_locscale = """\ +The probability mass function above is defined in the "standardized" form. +To shift distribution use the ``loc`` parameter. +Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically +equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``. +""" + +docdict_discrete['example'] = _doc_default_discrete_example +docdict_discrete['after_notes'] = _doc_default_discrete_locscale + +_doc_default_before_notes = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods']]) +docdict_discrete['before_notes'] = _doc_default_before_notes + +_doc_default_disc = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods'], + docdict_discrete['frozennote'], + docdict_discrete['example']]) +docdict_discrete['default'] = _doc_default_disc + +# clean up all the separate docstring elements, we do not need them anymore +for obj in [s for s in dir() if s.startswith('_doc_')]: + exec('del ' + obj) +del obj + + +def _moment(data, n, mu=None): + if mu is None: + mu = data.mean() + return ((data - mu)**n).mean() + + +def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args): + if (n == 0): + return 1.0 + elif (n == 1): + if mu is None: + val = moment_func(1, *args) + else: + val = mu + elif (n == 2): + if mu2 is None or mu is None: + val = moment_func(2, *args) + else: + val = mu2 + mu*mu + elif (n == 3): + if g1 is None or mu2 is None or mu is None: + val = moment_func(3, *args) + else: + mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment + val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment + elif (n == 4): + if g1 is None or g2 is None or mu2 is None or mu is None: + val = moment_func(4, *args) + else: + mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment + mu3 = g1*np.power(mu2, 1.5) # 3rd central moment + val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu + else: + val = moment_func(n, *args) + + return val + + +def _skew(data): + """ + skew is third central moment / variance**(1.5) + """ + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m3 = ((data - mu)**3).mean() + return m3 / np.power(m2, 1.5) + + +def _kurtosis(data): + """Fisher's excess kurtosis is fourth central moment / variance**2 - 3.""" + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m4 = ((data - mu)**4).mean() + return m4 / m2**2 - 3 + +def _vectorize_rvs_over_shapes(_rvs1): + """Decorator that vectorizes _rvs method to work on ndarray shapes""" + # _rvs1 must be a _function_ that accepts _scalar_ args as positional + # arguments, `size` and `random_state` as keyword arguments. + # _rvs1 must return a random variate array with shape `size`. If `size` is + # None, _rvs1 must return a scalar. + # When applied to _rvs1, this decorator broadcasts ndarray args + # and loops over them, calling _rvs1 for each set of scalar args. + # For usage example, see _nchypergeom_gen + def _rvs(*args, size, random_state): + _rvs1_size, _rvs1_indices = _check_shape(args[0].shape, size) + + size = np.array(size) + _rvs1_size = np.array(_rvs1_size) + _rvs1_indices = np.array(_rvs1_indices) + + if np.all(_rvs1_indices): # all args are scalars + return _rvs1(*args, size, random_state) + + out = np.empty(size) + + # out.shape can mix dimensions associated with arg_shape and _rvs1_size + # Sort them to arg_shape + _rvs1_size for easy indexing of dimensions + # corresponding with the different sets of scalar args + j0 = np.arange(out.ndim) + j1 = np.hstack((j0[~_rvs1_indices], j0[_rvs1_indices])) + out = np.moveaxis(out, j1, j0) + + for i in np.ndindex(*size[~_rvs1_indices]): + # arg can be squeezed because singleton dimensions will be + # associated with _rvs1_size, not arg_shape per _check_shape + out[i] = _rvs1(*[np.squeeze(arg)[i] for arg in args], + _rvs1_size, random_state) + + return np.moveaxis(out, j0, j1) # move axes back before returning + return _rvs + + +def _fit_determine_optimizer(optimizer): + if not callable(optimizer) and isinstance(optimizer, str): + if not optimizer.startswith('fmin_'): + optimizer = "fmin_"+optimizer + if optimizer == 'fmin_': + optimizer = 'fmin' + try: + optimizer = getattr(optimize, optimizer) + except AttributeError as e: + raise ValueError("%s is not a valid optimizer" % optimizer) from e + return optimizer + +def _isintegral(x): + return x == np.round(x) + +def _sum_finite(x): + """ + For a 1D array x, return a tuple containing the sum of the + finite values of x and the number of nonfinite values. + + This is a utility function used when evaluating the negative + loglikelihood for a distribution and an array of samples. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import _sum_finite + >>> tot, nbad = _sum_finite(np.array([-2, -np.inf, 5, 1])) + >>> tot + 4.0 + >>> nbad + 1 + """ + finite_x = np.isfinite(x) + bad_count = finite_x.size - np.count_nonzero(finite_x) + return np.sum(x[finite_x]), bad_count + + +# Frozen RV class +class rv_frozen: + + def __init__(self, dist, *args, **kwds): + self.args = args + self.kwds = kwds + + # create a new instance + self.dist = dist.__class__(**dist._updated_ctor_param()) + + shapes, _, _ = self.dist._parse_args(*args, **kwds) + self.a, self.b = self.dist._get_support(*shapes) + + @property + def random_state(self): + return self.dist._random_state + + @random_state.setter + def random_state(self, seed): + self.dist._random_state = check_random_state(seed) + + def cdf(self, x): + return self.dist.cdf(x, *self.args, **self.kwds) + + def logcdf(self, x): + return self.dist.logcdf(x, *self.args, **self.kwds) + + def ppf(self, q): + return self.dist.ppf(q, *self.args, **self.kwds) + + def isf(self, q): + return self.dist.isf(q, *self.args, **self.kwds) + + def rvs(self, size=None, random_state=None): + kwds = self.kwds.copy() + kwds.update({'size': size, 'random_state': random_state}) + return self.dist.rvs(*self.args, **kwds) + + def sf(self, x): + return self.dist.sf(x, *self.args, **self.kwds) + + def logsf(self, x): + return self.dist.logsf(x, *self.args, **self.kwds) + + def stats(self, moments='mv'): + kwds = self.kwds.copy() + kwds.update({'moments': moments}) + return self.dist.stats(*self.args, **kwds) + + def median(self): + return self.dist.median(*self.args, **self.kwds) + + def mean(self): + return self.dist.mean(*self.args, **self.kwds) + + def var(self): + return self.dist.var(*self.args, **self.kwds) + + def std(self): + return self.dist.std(*self.args, **self.kwds) + + def moment(self, order=None): + return self.dist.moment(order, *self.args, **self.kwds) + + def entropy(self): + return self.dist.entropy(*self.args, **self.kwds) + + def interval(self, confidence=None): + return self.dist.interval(confidence, *self.args, **self.kwds) + + def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds): + # expect method only accepts shape parameters as positional args + # hence convert self.args, self.kwds, also loc/scale + # See the .expect method docstrings for the meaning of + # other parameters. + a, loc, scale = self.dist._parse_args(*self.args, **self.kwds) + if isinstance(self.dist, rv_discrete): + return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds) + else: + return self.dist.expect(func, a, loc, scale, lb, ub, + conditional, **kwds) + + def support(self): + return self.dist.support(*self.args, **self.kwds) + + +class rv_discrete_frozen(rv_frozen): + + def pmf(self, k): + return self.dist.pmf(k, *self.args, **self.kwds) + + def logpmf(self, k): # No error + return self.dist.logpmf(k, *self.args, **self.kwds) + + +class rv_continuous_frozen(rv_frozen): + + def pdf(self, x): + return self.dist.pdf(x, *self.args, **self.kwds) + + def logpdf(self, x): + return self.dist.logpdf(x, *self.args, **self.kwds) + + +def argsreduce(cond, *args): + """Clean arguments to: + + 1. Ensure all arguments are iterable (arrays of dimension at least one + 2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is + True, in 1D. + + Return list of processed arguments. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import argsreduce + >>> rng = np.random.default_rng() + >>> A = rng.random((4, 5)) + >>> B = 2 + >>> C = rng.random((1, 5)) + >>> cond = np.ones(A.shape) + >>> [A1, B1, C1] = argsreduce(cond, A, B, C) + >>> A1.shape + (4, 5) + >>> B1.shape + (1,) + >>> C1.shape + (1, 5) + >>> cond[2,:] = 0 + >>> [A1, B1, C1] = argsreduce(cond, A, B, C) + >>> A1.shape + (15,) + >>> B1.shape + (1,) + >>> C1.shape + (15,) + + """ + # some distributions assume arguments are iterable. + newargs = np.atleast_1d(*args) + + # np.atleast_1d returns an array if only one argument, or a list of arrays + # if more than one argument. + if not isinstance(newargs, (list, tuple)): + newargs = (newargs,) + + if np.all(cond): + # broadcast arrays with cond + *newargs, cond = np.broadcast_arrays(*newargs, cond) + return [arg.ravel() for arg in newargs] + + s = cond.shape + # np.extract returns flattened arrays, which are not broadcastable together + # unless they are either the same size or size == 1. + return [(arg if np.size(arg) == 1 + else np.extract(cond, np.broadcast_to(arg, s))) + for arg in newargs] + + +parse_arg_template = """ +def _parse_args(self, %(shape_arg_str)s %(locscale_in)s): + return (%(shape_arg_str)s), %(locscale_out)s + +def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None): + return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size) + +def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'): + return (%(shape_arg_str)s), %(locscale_out)s, moments +""" + + +class rv_generic: + """Class which encapsulates common functionality between rv_discrete + and rv_continuous. + + """ + + def __init__(self, seed=None): + super().__init__() + + # figure out if _stats signature has 'moments' keyword + sig = _getfullargspec(self._stats) + self._stats_has_moments = ((sig.varkw is not None) or + ('moments' in sig.args) or + ('moments' in sig.kwonlyargs)) + self._random_state = check_random_state(seed) + + @property + def random_state(self): + """Get or set the generator object for generating random variates. + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + """ + return self._random_state + + @random_state.setter + def random_state(self, seed): + self._random_state = check_random_state(seed) + + def __setstate__(self, state): + try: + self.__dict__.update(state) + # attaches the dynamically created methods on each instance. + # if a subclass overrides rv_generic.__setstate__, or implements + # it's own _attach_methods, then it must make sure that + # _attach_argparser_methods is called. + self._attach_methods() + except ValueError: + # reconstitute an old pickle scipy<1.6, that contains + # (_ctor_param, random_state) as state + self._ctor_param = state[0] + self._random_state = state[1] + self.__init__() + + def _attach_methods(self): + """Attaches dynamically created methods to the rv_* instance. + + This method must be overridden by subclasses, and must itself call + _attach_argparser_methods. This method is called in __init__ in + subclasses, and in __setstate__ + """ + raise NotImplementedError + + def _attach_argparser_methods(self): + """ + Generates the argument-parsing functions dynamically and attaches + them to the instance. + + Should be called from `_attach_methods`, typically in __init__ and + during unpickling (__setstate__) + """ + ns = {} + exec(self._parse_arg_template, ns) + # NB: attach to the instance, not class + for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']: + setattr(self, name, types.MethodType(ns[name], self)) + + def _construct_argparser( + self, meths_to_inspect, locscale_in, locscale_out): + """Construct the parser string for the shape arguments. + + This method should be called in __init__ of a class for each + distribution. It creates the `_parse_arg_template` attribute that is + then used by `_attach_argparser_methods` to dynamically create and + attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs` + methods to the instance. + + If self.shapes is a non-empty string, interprets it as a + comma-separated list of shape parameters. + + Otherwise inspects the call signatures of `meths_to_inspect` + and constructs the argument-parsing functions from these. + In this case also sets `shapes` and `numargs`. + """ + + if self.shapes: + # sanitize the user-supplied shapes + if not isinstance(self.shapes, str): + raise TypeError('shapes must be a string.') + + shapes = self.shapes.replace(',', ' ').split() + + for field in shapes: + if keyword.iskeyword(field): + raise SyntaxError('keywords cannot be used as shapes.') + if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field): + raise SyntaxError( + 'shapes must be valid python identifiers') + else: + # find out the call signatures (_pdf, _cdf etc), deduce shape + # arguments. Generic methods only have 'self, x', any further args + # are shapes. + shapes_list = [] + for meth in meths_to_inspect: + shapes_args = _getfullargspec(meth) # NB does not contain self + args = shapes_args.args[1:] # peel off 'x', too + + if args: + shapes_list.append(args) + + # *args or **kwargs are not allowed w/automatic shapes + if shapes_args.varargs is not None: + raise TypeError( + '*args are not allowed w/out explicit shapes') + if shapes_args.varkw is not None: + raise TypeError( + '**kwds are not allowed w/out explicit shapes') + if shapes_args.kwonlyargs: + raise TypeError( + 'kwonly args are not allowed w/out explicit shapes') + if shapes_args.defaults is not None: + raise TypeError('defaults are not allowed for shapes') + + if shapes_list: + shapes = shapes_list[0] + + # make sure the signatures are consistent + for item in shapes_list: + if item != shapes: + raise TypeError('Shape arguments are inconsistent.') + else: + shapes = [] + + # have the arguments, construct the method from template + shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None + dct = dict(shape_arg_str=shapes_str, + locscale_in=locscale_in, + locscale_out=locscale_out, + ) + + # this string is used by _attach_argparser_methods + self._parse_arg_template = parse_arg_template % dct + + self.shapes = ', '.join(shapes) if shapes else None + if not hasattr(self, 'numargs'): + # allows more general subclassing with *args + self.numargs = len(shapes) + + def _construct_doc(self, docdict, shapes_vals=None): + """Construct the instance docstring with string substitutions.""" + tempdict = docdict.copy() + tempdict['name'] = self.name or 'distname' + tempdict['shapes'] = self.shapes or '' + + if shapes_vals is None: + shapes_vals = () + vals = ', '.join('%.3g' % val for val in shapes_vals) + tempdict['vals'] = vals + + tempdict['shapes_'] = self.shapes or '' + if self.shapes and self.numargs == 1: + tempdict['shapes_'] += ',' + + if self.shapes: + tempdict['set_vals_stmt'] = f'>>> {self.shapes} = {vals}' + else: + tempdict['set_vals_stmt'] = '' + + if self.shapes is None: + # remove shapes from call parameters if there are none + for item in ['default', 'before_notes']: + tempdict[item] = tempdict[item].replace( + "\n%(shapes)s : array_like\n shape parameters", "") + for i in range(2): + if self.shapes is None: + # necessary because we use %(shapes)s in two forms (w w/o ", ") + self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") + try: + self.__doc__ = doccer.docformat(self.__doc__, tempdict) + except TypeError as e: + raise Exception("Unable to construct docstring for " + f"distribution \"{self.name}\": {repr(e)}") from e + + # correct for empty shapes + self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')') + + def _construct_default_doc(self, longname=None, + docdict=None, discrete='continuous'): + """Construct instance docstring from the default template.""" + if longname is None: + longname = 'A' + self.__doc__ = ''.join([f'{longname} {discrete} random variable.', + '\n\n%(before_notes)s\n', docheaders['notes'], + '\n%(example)s']) + self._construct_doc(docdict) + + def freeze(self, *args, **kwds): + """Freeze the distribution for the given arguments. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution. Should include all + the non-optional arguments, may include ``loc`` and ``scale``. + + Returns + ------- + rv_frozen : rv_frozen instance + The frozen distribution. + + """ + if isinstance(self, rv_continuous): + return rv_continuous_frozen(self, *args, **kwds) + else: + return rv_discrete_frozen(self, *args, **kwds) + + def __call__(self, *args, **kwds): + return self.freeze(*args, **kwds) + __call__.__doc__ = freeze.__doc__ + + # The actual calculation functions (no basic checking need be done) + # If these are defined, the others won't be looked at. + # Otherwise, the other set can be defined. + def _stats(self, *args, **kwds): + return None, None, None, None + + # Noncentral moments (also known as the moment about the origin). + # Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime". + # The primed mu is a widely used notation for the noncentral moment. + def _munp(self, n, *args): + # Silence floating point warnings from integration. + with np.errstate(all='ignore'): + vals = self.generic_moment(n, *args) + return vals + + def _argcheck_rvs(self, *args, **kwargs): + # Handle broadcasting and size validation of the rvs method. + # Subclasses should not have to override this method. + # The rule is that if `size` is not None, then `size` gives the + # shape of the result (integer values of `size` are treated as + # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.) + # + # `args` is expected to contain the shape parameters (if any), the + # location and the scale in a flat tuple (e.g. if there are two + # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`). + # The only keyword argument expected is 'size'. + size = kwargs.get('size', None) + all_bcast = np.broadcast_arrays(*args) + + def squeeze_left(a): + while a.ndim > 0 and a.shape[0] == 1: + a = a[0] + return a + + # Eliminate trivial leading dimensions. In the convention + # used by numpy's random variate generators, trivial leading + # dimensions are effectively ignored. In other words, when `size` + # is given, trivial leading dimensions of the broadcast parameters + # in excess of the number of dimensions in size are ignored, e.g. + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3) + # array([ 1.00104267, 3.00422496, 4.99799278]) + # If `size` is not given, the exact broadcast shape is preserved: + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]]) + # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]]) + # + all_bcast = [squeeze_left(a) for a in all_bcast] + bcast_shape = all_bcast[0].shape + bcast_ndim = all_bcast[0].ndim + + if size is None: + size_ = bcast_shape + else: + size_ = tuple(np.atleast_1d(size)) + + # Check compatibility of size_ with the broadcast shape of all + # the parameters. This check is intended to be consistent with + # how the numpy random variate generators (e.g. np.random.normal, + # np.random.beta) handle their arguments. The rule is that, if size + # is given, it determines the shape of the output. Broadcasting + # can't change the output size. + + # This is the standard broadcasting convention of extending the + # shape with fewer dimensions with enough dimensions of length 1 + # so that the two shapes have the same number of dimensions. + ndiff = bcast_ndim - len(size_) + if ndiff < 0: + bcast_shape = (1,)*(-ndiff) + bcast_shape + elif ndiff > 0: + size_ = (1,)*ndiff + size_ + + # This compatibility test is not standard. In "regular" broadcasting, + # two shapes are compatible if for each dimension, the lengths are the + # same or one of the lengths is 1. Here, the length of a dimension in + # size_ must not be less than the corresponding length in bcast_shape. + ok = all([bcdim == 1 or bcdim == szdim + for (bcdim, szdim) in zip(bcast_shape, size_)]) + if not ok: + raise ValueError("size does not match the broadcast shape of " + f"the parameters. {size}, {size_}, {bcast_shape}") + + param_bcast = all_bcast[:-2] + loc_bcast = all_bcast[-2] + scale_bcast = all_bcast[-1] + + return param_bcast, loc_bcast, scale_bcast, size_ + + # These are the methods you must define (standard form functions) + # NB: generic _pdf, _logpdf, _cdf are different for + # rv_continuous and rv_discrete hence are defined in there + def _argcheck(self, *args): + """Default check for correct values on args and keywords. + + Returns condition array of 1's where arguments are correct and + 0's where they are not. + + """ + cond = 1 + for arg in args: + cond = logical_and(cond, (asarray(arg) > 0)) + return cond + + def _get_support(self, *args, **kwargs): + """Return the support of the (unscaled, unshifted) distribution. + + *Must* be overridden by distributions which have support dependent + upon the shape parameters of the distribution. Any such override + *must not* set or change any of the class members, as these members + are shared amongst all instances of the distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + a, b : numeric (float, or int or +/-np.inf) + end-points of the distribution's support for the specified + shape parameters. + """ + return self.a, self.b + + def _support_mask(self, x, *args): + a, b = self._get_support(*args) + with np.errstate(invalid='ignore'): + return (a <= x) & (x <= b) + + def _open_support_mask(self, x, *args): + a, b = self._get_support(*args) + with np.errstate(invalid='ignore'): + return (a < x) & (x < b) + + def _rvs(self, *args, size=None, random_state=None): + # This method must handle size being a tuple, and it must + # properly broadcast *args and size. size might be + # an empty tuple, which means a scalar random variate is to be + # generated. + + # Use basic inverse cdf algorithm for RV generation as default. + U = random_state.uniform(size=size) + Y = self._ppf(U, *args) + return Y + + def _logcdf(self, x, *args): + with np.errstate(divide='ignore'): + return log(self._cdf(x, *args)) + + def _sf(self, x, *args): + return 1.0-self._cdf(x, *args) + + def _logsf(self, x, *args): + with np.errstate(divide='ignore'): + return log(self._sf(x, *args)) + + def _ppf(self, q, *args): + return self._ppfvec(q, *args) + + def _isf(self, q, *args): + return self._ppf(1.0-q, *args) # use correct _ppf for subclasses + + # These are actually called, and should not be overwritten if you + # want to keep error checking. + def rvs(self, *args, **kwds): + """Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional + Scale parameter (default=1). + size : int or tuple of ints, optional + Defining number of random variates (default is 1). + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + discrete = kwds.pop('discrete', None) + rndm = kwds.pop('random_state', None) + args, loc, scale, size = self._parse_args_rvs(*args, **kwds) + cond = logical_and(self._argcheck(*args), (scale >= 0)) + if not np.all(cond): + message = ("Domain error in arguments. The `scale` parameter must " + "be positive for all distributions, and many " + "distributions have restrictions on shape parameters. " + f"Please see the `scipy.stats.{self.name}` " + "documentation for details.") + raise ValueError(message) + + if np.all(scale == 0): + return loc*ones(size, 'd') + + # extra gymnastics needed for a custom random_state + if rndm is not None: + random_state_saved = self._random_state + random_state = check_random_state(rndm) + else: + random_state = self._random_state + + vals = self._rvs(*args, size=size, random_state=random_state) + + vals = vals * scale + loc + + # do not forget to restore the _random_state + if rndm is not None: + self._random_state = random_state_saved + + # Cast to int if discrete + if discrete and not isinstance(self, rv_sample): + if size == (): + vals = int(vals) + else: + vals = vals.astype(np.int64) + + return vals + + def stats(self, *args, **kwds): + """Some statistics of the given RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional (continuous RVs only) + scale parameter (default=1) + moments : str, optional + composed of letters ['mvsk'] defining which moments to compute: + 'm' = mean, + 'v' = variance, + 's' = (Fisher's) skew, + 'k' = (Fisher's) kurtosis. + (default is 'mv') + + Returns + ------- + stats : sequence + of requested moments. + + """ + args, loc, scale, moments = self._parse_args_stats(*args, **kwds) + # scale = 1 by construction for discrete RVs + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = [] + default = np.full(shape(cond), fill_value=self.badvalue) + + # Use only entries that are valid in calculation + if np.any(cond): + goodargs = argsreduce(cond, *(args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + + if self._stats_has_moments: + mu, mu2, g1, g2 = self._stats(*goodargs, + **{'moments': moments}) + else: + mu, mu2, g1, g2 = self._stats(*goodargs) + + if 'm' in moments: + if mu is None: + mu = self._munp(1, *goodargs) + out0 = default.copy() + place(out0, cond, mu * scale + loc) + output.append(out0) + + if 'v' in moments: + if mu2 is None: + mu2p = self._munp(2, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + # if mean is inf then var is also inf + with np.errstate(invalid='ignore'): + mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf) + out0 = default.copy() + place(out0, cond, mu2 * scale * scale) + output.append(out0) + + if 's' in moments: + if g1 is None: + mu3p = self._munp(3, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + with np.errstate(invalid='ignore'): + mu2 = mu2p - mu * mu + with np.errstate(invalid='ignore'): + mu3 = (-mu*mu - 3*mu2)*mu + mu3p + g1 = mu3 / np.power(mu2, 1.5) + out0 = default.copy() + place(out0, cond, g1) + output.append(out0) + + if 'k' in moments: + if g2 is None: + mu4p = self._munp(4, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + with np.errstate(invalid='ignore'): + mu2 = mu2p - mu * mu + if g1 is None: + mu3 = None + else: + # (mu2**1.5) breaks down for nan and inf + mu3 = g1 * np.power(mu2, 1.5) + if mu3 is None: + mu3p = self._munp(3, *goodargs) + with np.errstate(invalid='ignore'): + mu3 = (-mu * mu - 3 * mu2) * mu + mu3p + with np.errstate(invalid='ignore'): + mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p + g2 = mu4 / mu2**2.0 - 3.0 + out0 = default.copy() + place(out0, cond, g2) + output.append(out0) + else: # no valid args + output = [default.copy() for _ in moments] + + output = [out[()] for out in output] + if len(output) == 1: + return output[0] + else: + return tuple(output) + + def entropy(self, *args, **kwds): + """Differential entropy of the RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional (continuous distributions only). + Scale parameter (default=1). + + Notes + ----- + Entropy is defined base `e`: + + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import rv_discrete + >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5))) + >>> np.allclose(drv.entropy(), np.log(2.0)) + True + + """ + args, loc, scale = self._parse_args(*args, **kwds) + # NB: for discrete distributions scale=1 by construction in _parse_args + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = zeros(shape(cond0), 'd') + place(output, (1-cond0), self.badvalue) + goodargs = argsreduce(cond0, scale, *args) + goodscale = goodargs[0] + goodargs = goodargs[1:] + place(output, cond0, self.vecentropy(*goodargs) + log(goodscale)) + return output[()] + + def moment(self, order, *args, **kwds): + """non-central moment of distribution of specified order. + + Parameters + ---------- + order : int, order >= 1 + Order of moment. + arg1, arg2, arg3,... : float + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + """ + n = order + shapes, loc, scale = self._parse_args(*args, **kwds) + args = np.broadcast_arrays(*(*shapes, loc, scale)) + *shapes, loc, scale = args + + i0 = np.logical_and(self._argcheck(*shapes), scale > 0) + i1 = np.logical_and(i0, loc == 0) + i2 = np.logical_and(i0, loc != 0) + + args = argsreduce(i0, *shapes, loc, scale) + *shapes, loc, scale = args + + if (floor(n) != n): + raise ValueError("Moment must be an integer.") + if (n < 0): + raise ValueError("Moment must be positive.") + mu, mu2, g1, g2 = None, None, None, None + if (n > 0) and (n < 5): + if self._stats_has_moments: + mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'mvsk'}[n]} + else: + mdict = {} + mu, mu2, g1, g2 = self._stats(*shapes, **mdict) + val = np.empty(loc.shape) # val needs to be indexed by loc + val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes) + + # Convert to transformed X = L + S*Y + # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n) + result = zeros(i0.shape) + place(result, ~i0, self.badvalue) + + if i1.any(): + res1 = scale[loc == 0]**n * val[loc == 0] + place(result, i1, res1) + + if i2.any(): + mom = [mu, mu2, g1, g2] + arrs = [i for i in mom if i is not None] + idx = [i for i in range(4) if mom[i] is not None] + if any(idx): + arrs = argsreduce(loc != 0, *arrs) + j = 0 + for i in idx: + mom[i] = arrs[j] + j += 1 + mu, mu2, g1, g2 = mom + args = argsreduce(loc != 0, *shapes, loc, scale, val) + *shapes, loc, scale, val = args + + res2 = zeros(loc.shape, dtype='d') + fac = scale / loc + for k in range(n): + valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, + shapes) + res2 += comb(n, k, exact=True)*fac**k * valk + res2 += fac**n * val + res2 *= loc**n + place(result, i2, res2) + + return result[()] + + def median(self, *args, **kwds): + """Median of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter, Default is 0. + scale : array_like, optional + Scale parameter, Default is 1. + + Returns + ------- + median : float + The median of the distribution. + + See Also + -------- + rv_discrete.ppf + Inverse of the CDF + + """ + return self.ppf(0.5, *args, **kwds) + + def mean(self, *args, **kwds): + """Mean of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + mean : float + the mean of the distribution + + """ + kwds['moments'] = 'm' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def var(self, *args, **kwds): + """Variance of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + var : float + the variance of the distribution + + """ + kwds['moments'] = 'v' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def std(self, *args, **kwds): + """Standard deviation of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + std : float + standard deviation of the distribution + + """ + kwds['moments'] = 'v' + res = sqrt(self.stats(*args, **kwds)) + return res + + def interval(self, confidence, *args, **kwds): + """Confidence interval with equal areas around the median. + + Parameters + ---------- + confidence : array_like of float + Probability that an rv will be drawn from the returned range. + Each value should be in the range [0, 1]. + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter, Default is 0. + scale : array_like, optional + scale parameter, Default is 1. + + Returns + ------- + a, b : ndarray of float + end-points of range that contain ``100 * alpha %`` of the rv's + possible values. + + Notes + ----- + This is implemented as ``ppf([p_tail, 1-p_tail])``, where + ``ppf`` is the inverse cumulative distribution function and + ``p_tail = (1-confidence)/2``. Suppose ``[c, d]`` is the support of a + discrete distribution; then ``ppf([0, 1]) == (c-1, d)``. Therefore, + when ``confidence=1`` and the distribution is discrete, the left end + of the interval will be beyond the support of the distribution. + For discrete distributions, the interval will limit the probability + in each tail to be less than or equal to ``p_tail`` (usually + strictly less). + + """ + alpha = confidence + + alpha = asarray(alpha) + if np.any((alpha > 1) | (alpha < 0)): + raise ValueError("alpha must be between 0 and 1 inclusive") + q1 = (1.0-alpha)/2 + q2 = (1.0+alpha)/2 + a = self.ppf(q1, *args, **kwds) + b = self.ppf(q2, *args, **kwds) + return a, b + + def support(self, *args, **kwargs): + """Support of the distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter, Default is 0. + scale : array_like, optional + scale parameter, Default is 1. + + Returns + ------- + a, b : array_like + end-points of the distribution's support. + + """ + args, loc, scale = self._parse_args(*args, **kwargs) + arrs = np.broadcast_arrays(*args, loc, scale) + args, loc, scale = arrs[:-2], arrs[-2], arrs[-1] + cond = self._argcheck(*args) & (scale > 0) + _a, _b = self._get_support(*args) + if cond.all(): + return _a * scale + loc, _b * scale + loc + elif cond.ndim == 0: + return self.badvalue, self.badvalue + # promote bounds to at least float to fill in the badvalue + _a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d') + out_a, out_b = _a * scale + loc, _b * scale + loc + place(out_a, 1-cond, self.badvalue) + place(out_b, 1-cond, self.badvalue) + return out_a, out_b + + def nnlf(self, theta, x): + """Negative loglikelihood function. + Notes + ----- + This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the + parameters (including loc and scale). + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = (asarray(x)-loc) / scale + n_log_scale = len(x) * log(scale) + if np.any(~self._support_mask(x, *args)): + return inf + return self._nnlf(x, *args) + n_log_scale + + def _nnlf(self, x, *args): + return -np.sum(self._logpxf(x, *args), axis=0) + + def _nlff_and_penalty(self, x, args, log_fitfun): + # negative log fit function + cond0 = ~self._support_mask(x, *args) + n_bad = np.count_nonzero(cond0, axis=0) + if n_bad > 0: + x = argsreduce(~cond0, x)[0] + logff = log_fitfun(x, *args) + finite_logff = np.isfinite(logff) + n_bad += np.sum(~finite_logff, axis=0) + if n_bad > 0: + penalty = n_bad * log(_XMAX) * 100 + return -np.sum(logff[finite_logff], axis=0) + penalty + return -np.sum(logff, axis=0) + + def _penalized_nnlf(self, theta, x): + """Penalized negative loglikelihood function. + i.e., - sum (log pdf(x, theta), axis=0) + penalty + where theta are the parameters (including loc and scale) + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = asarray((x-loc) / scale) + n_log_scale = len(x) * log(scale) + return self._nlff_and_penalty(x, args, self._logpxf) + n_log_scale + + def _penalized_nlpsf(self, theta, x): + """Penalized negative log product spacing function. + i.e., - sum (log (diff (cdf (x, theta))), axis=0) + penalty + where theta are the parameters (including loc and scale) + Follows reference [1] of scipy.stats.fit + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = (np.sort(x) - loc)/scale + + def log_psf(x, *args): + x, lj = np.unique(x, return_counts=True) # fast for sorted x + cdf_data = self._cdf(x, *args) if x.size else [] + if not (x.size and 1 - cdf_data[-1] <= 0): + cdf = np.concatenate(([0], cdf_data, [1])) + lj = np.concatenate((lj, [1])) + else: + cdf = np.concatenate(([0], cdf_data)) + # here we could use logcdf w/ logsumexp trick to take differences, + # but in the context of the method, it seems unlikely to matter + return lj * np.log(np.diff(cdf) / lj) + + return self._nlff_and_penalty(x, args, log_psf) + + +class _ShapeInfo: + def __init__(self, name, integrality=False, domain=(-np.inf, np.inf), + inclusive=(True, True)): + self.name = name + self.integrality = integrality + + domain = list(domain) + if np.isfinite(domain[0]) and not inclusive[0]: + domain[0] = np.nextafter(domain[0], np.inf) + if np.isfinite(domain[1]) and not inclusive[1]: + domain[1] = np.nextafter(domain[1], -np.inf) + self.domain = domain + + +def _get_fixed_fit_value(kwds, names): + """ + Given names such as `['f0', 'fa', 'fix_a']`, check that there is + at most one non-None value in `kwds` associaed with those names. + Return that value, or None if none of the names occur in `kwds`. + As a side effect, all occurrences of those names in `kwds` are + removed. + """ + vals = [(name, kwds.pop(name)) for name in names if name in kwds] + if len(vals) > 1: + repeated = [name for name, val in vals] + raise ValueError("fit method got multiple keyword arguments to " + "specify the same fixed parameter: " + + ', '.join(repeated)) + return vals[0][1] if vals else None + + +# continuous random variables: implement maybe later +# +# hf --- Hazard Function (PDF / SF) +# chf --- Cumulative hazard function (-log(SF)) +# psf --- Probability sparsity function (reciprocal of the pdf) in +# units of percent-point-function (as a function of q). +# Also, the derivative of the percent-point function. + + +class rv_continuous(rv_generic): + """A generic continuous random variable class meant for subclassing. + + `rv_continuous` is a base class to construct specific distribution classes + and instances for continuous random variables. It cannot be used + directly as a distribution. + + Parameters + ---------- + momtype : int, optional + The type of generic moment calculation to use: 0 for pdf, 1 (default) + for ppf. + a : float, optional + Lower bound of the support of the distribution, default is minus + infinity. + b : float, optional + Upper bound of the support of the distribution, default is plus + infinity. + xtol : float, optional + The tolerance for fixed point calculation for generic ppf. + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example ``"m, n"`` for a + distribution that takes two integers as the two shape arguments for all + its methods. If not provided, shape parameters will be inferred from + the signature of the private methods, ``_pdf`` and ``_cdf`` of the + instance. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + pdf + logpdf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + fit + fit_loc_scale + nnlf + support + + Notes + ----- + Public methods of an instance of a distribution class (e.g., ``pdf``, + ``cdf``) check their arguments and pass valid arguments to private, + computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid + if it is within the support of the distribution. + Whether a shape parameter is valid is decided by an ``_argcheck`` method + (which defaults to checking that its arguments are strictly positive.) + + **Subclassing** + + New random variables can be defined by subclassing the `rv_continuous` class + and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized + to location 0 and scale 1). + + If positive argument checking is not correct for your RV + then you will also need to re-define the ``_argcheck`` method. + + For most of the scipy.stats distributions, the support interval doesn't + depend on the shape parameters. ``x`` being in the support interval is + equivalent to ``self.a <= x <= self.b``. If either of the endpoints of + the support do depend on the shape parameters, then + i) the distribution must implement the ``_get_support`` method; and + ii) those dependent endpoints must be omitted from the distribution's + call to the ``rv_continuous`` initializer. + + Correct, but potentially slow defaults exist for the remaining + methods but for speed and/or accuracy you can over-ride:: + + _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf + + The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``, + applied to a uniform random variate. In order to generate random variates + efficiently, either the default ``_ppf`` needs to be overwritten (e.g. + if the inverse cdf can expressed in an explicit form) or a sampling + method needs to be implemented in a custom ``_rvs`` method. + + If possible, you should override ``_isf``, ``_sf`` or ``_logsf``. + The main reason would be to improve numerical accuracy: for example, + the survival function ``_sf`` is computed as ``1 - _cdf`` which can + result in loss of precision if ``_cdf(x)`` is close to one. + + **Methods that can be overwritten by subclasses** + :: + + _rvs + _pdf + _cdf + _sf + _ppf + _isf + _stats + _munp + _entropy + _argcheck + _get_support + + There are additional (internal and private) generic methods that can + be useful for cross-checking and for debugging, but might work in all + cases when directly called. + + A note on ``shapes``: subclasses need not specify them explicitly. In this + case, `shapes` will be automatically deduced from the signatures of the + overridden methods (`pdf`, `cdf` etc). + If, for some reason, you prefer to avoid relying on introspection, you can + specify ``shapes`` explicitly as an argument to the instance constructor. + + + **Frozen Distributions** + + Normally, you must provide shape parameters (and, optionally, location and + scale parameters to each call of a method of a distribution. + + Alternatively, the object may be called (as a function) to fix the shape, + location, and scale parameters returning a "frozen" continuous RV object: + + rv = generic(, loc=0, scale=1) + `rv_frozen` object with the same methods but holding the given shape, + location, and scale fixed + + **Statistics** + + Statistics are computed using numerical integration by default. + For speed you can redefine this using ``_stats``: + + - take shape parameters and return mu, mu2, g1, g2 + - If you can't compute one of these, return it as None + - Can also be defined with a keyword argument ``moments``, which is a + string composed of "m", "v", "s", and/or "k". + Only the components appearing in string should be computed and + returned in the order "m", "v", "s", or "k" with missing values + returned as None. + + Alternatively, you can override ``_munp``, which takes ``n`` and shape + parameters and returns the n-th non-central moment of the distribution. + + **Deepcopying / Pickling** + + If a distribution or frozen distribution is deepcopied (pickled/unpickled, + etc.), any underlying random number generator is deepcopied with it. An + implication is that if a distribution relies on the singleton RandomState + before copying, it will rely on a copy of that random state after copying, + and ``np.random.seed`` will no longer control the state. + + Examples + -------- + To create a new Gaussian distribution, we would do the following: + + >>> from scipy.stats import rv_continuous + >>> class gaussian_gen(rv_continuous): + ... "Gaussian distribution" + ... def _pdf(self, x): + ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi) + >>> gaussian = gaussian_gen(name='gaussian') + + ``scipy.stats`` distributions are *instances*, so here we subclass + `rv_continuous` and create an instance. With this, we now have + a fully functional distribution with all relevant methods automagically + generated by the framework. + + Note that above we defined a standard normal distribution, with zero mean + and unit variance. Shifting and scaling of the distribution can be done + by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)`` + essentially computes ``y = (x - loc) / scale`` and + ``gaussian._pdf(y) / scale``. + + """ + + def __init__(self, momtype=1, a=None, b=None, xtol=1e-14, + badvalue=None, name=None, longname=None, + shapes=None, seed=None): + + super().__init__(seed) + + # save the ctor parameters, cf generic freeze + self._ctor_param = dict( + momtype=momtype, a=a, b=b, xtol=xtol, + badvalue=badvalue, name=name, longname=longname, + shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + if name is None: + name = 'Distribution' + self.badvalue = badvalue + self.name = name + self.a = a + self.b = b + if a is None: + self.a = -inf + if b is None: + self.b = inf + self.xtol = xtol + self.moment_type = momtype + self.shapes = shapes + + self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf], + locscale_in='loc=0, scale=1', + locscale_out='loc, scale') + self._attach_methods() + + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + docdict=docdict, + discrete='continuous') + else: + dct = dict(distcont) + self._construct_doc(docdict, dct.get(self.name)) + + def __getstate__(self): + dct = self.__dict__.copy() + + # these methods will be remade in __setstate__ + # _random_state attribute is taken care of by rv_generic + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs", + "_cdfvec", "_ppfvec", "vecentropy", "generic_moment"] + [dct.pop(attr, None) for attr in attrs] + return dct + + def _attach_methods(self): + """ + Attaches dynamically created methods to the rv_continuous instance. + """ + # _attach_methods is responsible for calling _attach_argparser_methods + self._attach_argparser_methods() + + # nin correction + self._ppfvec = vectorize(self._ppf_single, otypes='d') + self._ppfvec.nin = self.numargs + 1 + self.vecentropy = vectorize(self._entropy, otypes='d') + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self._cdfvec.nin = self.numargs + 1 + + if self.moment_type == 0: + self.generic_moment = vectorize(self._mom0_sc, otypes='d') + else: + self.generic_moment = vectorize(self._mom1_sc, otypes='d') + # Because of the *args argument of _mom0_sc, vectorize cannot count the + # number of arguments correctly. + self.generic_moment.nin = self.numargs + 1 + + def _updated_ctor_param(self): + """Return the current version of _ctor_param, possibly updated by user. + + Used by freezing. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['xtol'] = self.xtol + dct['badvalue'] = self.badvalue + dct['name'] = self.name + dct['shapes'] = self.shapes + return dct + + def _ppf_to_solve(self, x, q, *args): + return self.cdf(*(x, )+args)-q + + def _ppf_single(self, q, *args): + factor = 10. + left, right = self._get_support(*args) + + if np.isinf(left): + left = min(-factor, right) + while self._ppf_to_solve(left, q, *args) > 0.: + left, right = left * factor, left + # left is now such that cdf(left) <= q + # if right has changed, then cdf(right) > q + + if np.isinf(right): + right = max(factor, left) + while self._ppf_to_solve(right, q, *args) < 0.: + left, right = right, right * factor + # right is now such that cdf(right) >= q + + return optimize.brentq(self._ppf_to_solve, + left, right, args=(q,)+args, xtol=self.xtol) + + # moment from definition + def _mom_integ0(self, x, m, *args): + return x**m * self.pdf(x, *args) + + def _mom0_sc(self, m, *args): + _a, _b = self._get_support(*args) + return integrate.quad(self._mom_integ0, _a, _b, + args=(m,)+args)[0] + + # moment calculated using ppf + def _mom_integ1(self, q, m, *args): + return (self.ppf(q, *args))**m + + def _mom1_sc(self, m, *args): + return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0] + + def _pdf(self, x, *args): + return _derivative(self._cdf, x, dx=1e-5, args=args, order=5) + + # Could also define any of these + def _logpdf(self, x, *args): + p = self._pdf(x, *args) + with np.errstate(divide='ignore'): + return log(p) + + def _logpxf(self, x, *args): + # continuous distributions have PDF, discrete have PMF, but sometimes + # the distinction doesn't matter. This lets us use `_logpxf` for both + # discrete and continuous distributions. + return self._logpdf(x, *args) + + def _cdf_single(self, x, *args): + _a, _b = self._get_support(*args) + return integrate.quad(self._pdf, _a, x, args=args)[0] + + def _cdf(self, x, *args): + return self._cdfvec(x, *args) + + # generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined + # in rv_generic + + def pdf(self, x, *args, **kwds): + """Probability density function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + pdf : ndarray + Probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x, *args) & (scale > 0) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._pdf(*goodargs) / scale) + if output.ndim == 0: + return output[()] + return output + + def logpdf(self, x, *args, **kwds): + """Log of the probability density function at x of the given RV. + + This uses a more numerically accurate calculation if available. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logpdf : array_like + Log of the probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x, *args) & (scale > 0) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._logpdf(*goodargs) - log(scale)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, x, *args, **kwds): + """ + Cumulative distribution function of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `x` + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = (x >= np.asarray(_b)) & cond0 + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._cdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, x, *args, **kwds): + """Log of the cumulative distribution function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = (x >= _b) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, x, *args, **kwds): + """Survival function (1 - `cdf`) at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + sf : array_like + Survival function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = cond0 & (x <= _a) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._sf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, x, *args, **kwds): + """Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as (1 - `cdf`), + evaluated at `x`. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `x`. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = cond0 & (x <= _a) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + lower tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : array_like + quantile corresponding to the lower tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 0) + cond3 = cond0 & (q == 1) + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue) + + lower_bound = _a * scale + loc + upper_bound = _b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._ppf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + upper tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : ndarray or scalar + Quantile corresponding to the upper tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 1) + cond3 = cond0 & (q == 0) + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue) + + lower_bound = _a * scale + loc + upper_bound = _b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._isf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def _unpack_loc_scale(self, theta): + try: + loc = theta[-2] + scale = theta[-1] + args = tuple(theta[:-2]) + except IndexError as e: + raise ValueError("Not enough input arguments.") from e + return loc, scale, args + + def _nnlf_and_penalty(self, x, args): + """ + Compute the penalized negative log-likelihood for the + "standardized" data (i.e. already shifted by loc and + scaled by scale) for the shape parameters in `args`. + + `x` can be a 1D numpy array or a CensoredData instance. + """ + if isinstance(x, CensoredData): + # Filter out the data that is not in the support. + xs = x._supported(*self._get_support(*args)) + n_bad = len(x) - len(xs) + i1, i2 = xs._interval.T + terms = [ + # logpdf of the noncensored data. + self._logpdf(xs._uncensored, *args), + # logcdf of the left-censored data. + self._logcdf(xs._left, *args), + # logsf of the right-censored data. + self._logsf(xs._right, *args), + # log of probability of the interval-censored data. + np.log(self._delta_cdf(i1, i2, *args)), + ] + else: + cond0 = ~self._support_mask(x, *args) + n_bad = np.count_nonzero(cond0) + if n_bad > 0: + x = argsreduce(~cond0, x)[0] + terms = [self._logpdf(x, *args)] + + totals, bad_counts = zip(*[_sum_finite(term) for term in terms]) + total = sum(totals) + n_bad += sum(bad_counts) + + return -total + n_bad * _LOGXMAX * 100 + + def _penalized_nnlf(self, theta, x): + """Penalized negative loglikelihood function. + + i.e., - sum (log pdf(x, theta), axis=0) + penalty + where theta are the parameters (including loc and scale) + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + if isinstance(x, CensoredData): + x = (x - loc) / scale + n_log_scale = (len(x) - x.num_censored()) * log(scale) + else: + x = (x - loc) / scale + n_log_scale = len(x) * log(scale) + + return self._nnlf_and_penalty(x, args) + n_log_scale + + def _fitstart(self, data, args=None): + """Starting point for fit (shape arguments + loc + scale).""" + if args is None: + args = (1.0,)*self.numargs + loc, scale = self._fit_loc_scale_support(data, *args) + return args + (loc, scale) + + def _reduce_func(self, args, kwds, data=None): + """ + Return the (possibly reduced) function to optimize in order to find MLE + estimates for the .fit method. + """ + # Convert fixed shape parameters to the standard numeric form: e.g. for + # stats.beta, shapes='a, b'. To fix `a`, the caller can give a value + # for `f0`, `fa` or 'fix_a'. The following converts the latter two + # into the first (numeric) form. + shapes = [] + if self.shapes: + shapes = self.shapes.replace(',', ' ').split() + for j, s in enumerate(shapes): + key = 'f' + str(j) + names = [key, 'f' + s, 'fix_' + s] + val = _get_fixed_fit_value(kwds, names) + if val is not None: + kwds[key] = val + + args = list(args) + Nargs = len(args) + fixedn = [] + names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] + x0 = [] + for n, key in enumerate(names): + if key in kwds: + fixedn.append(n) + args[n] = kwds.pop(key) + else: + x0.append(args[n]) + + methods = {"mle", "mm"} + method = kwds.pop('method', "mle").lower() + if method == "mm": + n_params = len(shapes) + 2 - len(fixedn) + exponents = (np.arange(1, n_params+1))[:, np.newaxis] + data_moments = np.sum(data[None, :]**exponents/len(data), axis=1) + + def objective(theta, x): + return self._moment_error(theta, x, data_moments) + + elif method == "mle": + objective = self._penalized_nnlf + else: + raise ValueError(f"Method '{method}' not available; " + f"must be one of {methods}") + + if len(fixedn) == 0: + func = objective + restore = None + else: + if len(fixedn) == Nargs: + raise ValueError( + "All parameters fixed. There is nothing to optimize.") + + def restore(args, theta): + # Replace with theta for all numbers not in fixedn + # This allows the non-fixed values to vary, but + # we still call self.nnlf with all parameters. + i = 0 + for n in range(Nargs): + if n not in fixedn: + args[n] = theta[i] + i += 1 + return args + + def func(theta, x): + newtheta = restore(args[:], theta) + return objective(newtheta, x) + + return x0, func, restore, args + + def _moment_error(self, theta, x, data_moments): + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + + dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale) + for i in range(len(data_moments))]) + if np.any(np.isnan(dist_moments)): + raise ValueError("Method of moments encountered a non-finite " + "distribution moment and cannot continue. " + "Consider trying method='MLE'.") + + return (((data_moments - dist_moments) / + np.maximum(np.abs(data_moments), 1e-8))**2).sum() + + def fit(self, data, *args, **kwds): + r""" + Return estimates of shape (if applicable), location, and scale + parameters from data. The default estimation method is Maximum + Likelihood Estimation (MLE), but Method of Moments (MM) + is also available. + + Starting estimates for the fit are given by input arguments; + for any arguments not provided with starting estimates, + ``self._fitstart(data)`` is called to generate such. + + One can hold some parameters fixed to specific values by passing in + keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters) + and ``floc`` and ``fscale`` (for location and scale parameters, + respectively). + + Parameters + ---------- + data : array_like or `CensoredData` instance + Data to use in estimating the distribution parameters. + arg1, arg2, arg3,... : floats, optional + Starting value(s) for any shape-characterizing arguments (those not + provided will be determined by a call to ``_fitstart(data)``). + No default value. + **kwds : floats, optional + - `loc`: initial guess of the distribution's location parameter. + - `scale`: initial guess of the distribution's scale parameter. + + Special keyword arguments are recognized as holding certain + parameters fixed: + + - f0...fn : hold respective shape parameters fixed. + Alternatively, shape parameters to fix can be specified by name. + For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a`` + are equivalent to ``f0``, and ``fb`` and ``fix_b`` are + equivalent to ``f1``. + + - floc : hold location parameter fixed to specified value. + + - fscale : hold scale parameter fixed to specified value. + + - optimizer : The optimizer to use. The optimizer must take + ``func`` and starting position as the first two arguments, + plus ``args`` (for extra arguments to pass to the + function to be optimized) and ``disp``. + The ``fit`` method calls the optimizer with ``disp=0`` to suppress output. + The optimizer must return the estimated parameters. + + - method : The method to use. The default is "MLE" (Maximum + Likelihood Estimate); "MM" (Method of Moments) + is also available. + + Raises + ------ + TypeError, ValueError + If an input is invalid + `~scipy.stats.FitError` + If fitting fails or the fit produced would be invalid + + Returns + ------- + parameter_tuple : tuple of floats + Estimates for any shape parameters (if applicable), followed by + those for location and scale. For most random variables, shape + statistics will be returned, but there are exceptions (e.g. + ``norm``). + + Notes + ----- + With ``method="MLE"`` (default), the fit is computed by minimizing + the negative log-likelihood function. A large, finite penalty + (rather than infinite negative log-likelihood) is applied for + observations beyond the support of the distribution. + + With ``method="MM"``, the fit is computed by minimizing the L2 norm + of the relative errors between the first *k* raw (about zero) data + moments and the corresponding distribution moments, where *k* is the + number of non-fixed parameters. + More precisely, the objective function is:: + + (((data_moments - dist_moments) + / np.maximum(np.abs(data_moments), 1e-8))**2).sum() + + where the constant ``1e-8`` avoids division by zero in case of + vanishing data moments. Typically, this error norm can be reduced to + zero. + Note that the standard method of moments can produce parameters for + which some data are outside the support of the fitted distribution; + this implementation does nothing to prevent this. + + For either method, + the returned answer is not guaranteed to be globally optimal; it + may only be locally optimal, or the optimization may fail altogether. + If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``, + the `fit` method will raise a ``RuntimeError``. + + When passing a ``CensoredData`` instance to ``data``, the log-likelihood + function is defined as: + + .. math:: + + l(\pmb{\theta}; k) & = \sum + \log(f(k_u; \pmb{\theta})) + + \sum + \log(F(k_l; \pmb{\theta})) \\ + & + \sum + \log(1 - F(k_r; \pmb{\theta})) \\ + & + \sum + \log(F(k_{\text{high}, i}; \pmb{\theta}) + - F(k_{\text{low}, i}; \pmb{\theta})) + + where :math:`f` and :math:`F` are the pdf and cdf, respectively, of the + function being fitted, :math:`\pmb{\theta}` is the parameter vector, + :math:`u` are the indices of uncensored observations, + :math:`l` are the indices of left-censored observations, + :math:`r` are the indices of right-censored observations, + subscripts "low"/"high" denote endpoints of interval-censored observations, and + :math:`i` are the indices of interval-censored observations. + + Examples + -------- + + Generate some data to fit: draw random variates from the `beta` + distribution + + >>> import numpy as np + >>> from scipy.stats import beta + >>> a, b = 1., 2. + >>> rng = np.random.default_rng(172786373191770012695001057628748821561) + >>> x = beta.rvs(a, b, size=1000, random_state=rng) + + Now we can fit all four parameters (``a``, ``b``, ``loc`` and + ``scale``): + + >>> a1, b1, loc1, scale1 = beta.fit(x) + >>> a1, b1, loc1, scale1 + (1.0198945204435628, 1.9484708982737828, 4.372241314917588e-05, 0.9979078845964814) + + The fit can be done also using a custom optimizer: + + >>> from scipy.optimize import minimize + >>> def custom_optimizer(func, x0, args=(), disp=0): + ... res = minimize(func, x0, args, method="slsqp", options={"disp": disp}) + ... if res.success: + ... return res.x + ... raise RuntimeError('optimization routine failed') + >>> a1, b1, loc1, scale1 = beta.fit(x, method="MLE", optimizer=custom_optimizer) + >>> a1, b1, loc1, scale1 + (1.0198821087258905, 1.948484145914738, 4.3705304486881485e-05, 0.9979104663953395) + + We can also use some prior knowledge about the dataset: let's keep + ``loc`` and ``scale`` fixed: + + >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1) + >>> loc1, scale1 + (0, 1) + + We can also keep shape parameters fixed by using ``f``-keywords. To + keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or, + equivalently, ``fa=1``: + + >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1) + >>> a1 + 1 + + Not all distributions return estimates for the shape parameters. + ``norm`` for example just returns estimates for location and scale: + + >>> from scipy.stats import norm + >>> x = norm.rvs(a, b, size=1000, random_state=123) + >>> loc1, scale1 = norm.fit(x) + >>> loc1, scale1 + (0.92087172783841631, 2.0015750750324668) + """ # noqa: E501 + method = kwds.get('method', "mle").lower() + + censored = isinstance(data, CensoredData) + if censored: + if method != 'mle': + raise ValueError('For censored data, the method must' + ' be "MLE".') + if data.num_censored() == 0: + # There are no censored values in data, so replace the + # CensoredData instance with a regular array. + data = data._uncensored + censored = False + + Narg = len(args) + if Narg > self.numargs: + raise TypeError("Too many input arguments.") + + # Check the finiteness of data only if data is not an instance of + # CensoredData. The arrays in a CensoredData instance have already + # been validated. + if not censored: + # Note: `ravel()` is called for backwards compatibility. + data = np.asarray(data).ravel() + if not np.isfinite(data).all(): + raise ValueError("The data contains non-finite values.") + + start = [None]*2 + if (Narg < self.numargs) or not ('loc' in kwds and + 'scale' in kwds): + # get distribution specific starting locations + start = self._fitstart(data) + args += start[Narg:-2] + loc = kwds.pop('loc', start[-2]) + scale = kwds.pop('scale', start[-1]) + args += (loc, scale) + x0, func, restore, args = self._reduce_func(args, kwds, data=data) + optimizer = kwds.pop('optimizer', optimize.fmin) + # convert string to function in scipy.optimize + optimizer = _fit_determine_optimizer(optimizer) + # by now kwds must be empty, since everybody took what they needed + if kwds: + raise TypeError("Unknown arguments: %s." % kwds) + + # In some cases, method of moments can be done with fsolve/root + # instead of an optimizer, but sometimes no solution exists, + # especially when the user fixes parameters. Minimizing the sum + # of squares of the error generalizes to these cases. + vals = optimizer(func, x0, args=(data,), disp=0) + obj = func(vals, data) + + if restore is not None: + vals = restore(args, vals) + vals = tuple(vals) + + loc, scale, shapes = self._unpack_loc_scale(vals) + if not (np.all(self._argcheck(*shapes)) and scale > 0): + raise FitError("Optimization converged to parameters that are " + "outside the range allowed by the distribution.") + + if method == 'mm': + if not np.isfinite(obj): + raise FitError("Optimization failed: either a data moment " + "or fitted distribution moment is " + "non-finite.") + + return vals + + def _fit_loc_scale_support(self, data, *args): + """Estimate loc and scale parameters from data accounting for support. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + if isinstance(data, CensoredData): + # For this estimate, "uncensor" the data by taking the + # given endpoints as the data for the left- or right-censored + # data, and the mean for the interval-censored data. + data = data._uncensor() + else: + data = np.asarray(data) + + # Estimate location and scale according to the method of moments. + loc_hat, scale_hat = self.fit_loc_scale(data, *args) + + # Compute the support according to the shape parameters. + self._argcheck(*args) + _a, _b = self._get_support(*args) + a, b = _a, _b + support_width = b - a + + # If the support is empty then return the moment-based estimates. + if support_width <= 0: + return loc_hat, scale_hat + + # Compute the proposed support according to the loc and scale + # estimates. + a_hat = loc_hat + a * scale_hat + b_hat = loc_hat + b * scale_hat + + # Use the moment-based estimates if they are compatible with the data. + data_a = np.min(data) + data_b = np.max(data) + if a_hat < data_a and data_b < b_hat: + return loc_hat, scale_hat + + # Otherwise find other estimates that are compatible with the data. + data_width = data_b - data_a + rel_margin = 0.1 + margin = data_width * rel_margin + + # For a finite interval, both the location and scale + # should have interesting values. + if support_width < np.inf: + loc_hat = (data_a - a) - margin + scale_hat = (data_width + 2 * margin) / support_width + return loc_hat, scale_hat + + # For a one-sided interval, use only an interesting location parameter. + if a > -np.inf: + return (data_a - a) - margin, 1 + elif b < np.inf: + return (data_b - b) + margin, 1 + else: + raise RuntimeError + + def fit_loc_scale(self, data, *args): + """ + Estimate loc and scale parameters from data using 1st and 2nd moments. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + mu, mu2 = self.stats(*args, **{'moments': 'mv'}) + tmp = asarray(data) + muhat = tmp.mean() + mu2hat = tmp.var() + Shat = sqrt(mu2hat / mu2) + with np.errstate(invalid='ignore'): + Lhat = muhat - Shat*mu + if not np.isfinite(Lhat): + Lhat = 0 + if not (np.isfinite(Shat) and (0 < Shat)): + Shat = 1 + return Lhat, Shat + + def _entropy(self, *args): + def integ(x): + val = self._pdf(x, *args) + return entr(val) + + # upper limit is often inf, so suppress warnings when integrating + _a, _b = self._get_support(*args) + with np.errstate(over='ignore'): + h = integrate.quad(integ, _a, _b)[0] + + if not np.isnan(h): + return h + else: + # try with different limits if integration problems + low, upp = self.ppf([1e-10, 1. - 1e-10], *args) + if np.isinf(_b): + upper = upp + else: + upper = _b + if np.isinf(_a): + lower = low + else: + lower = _a + return integrate.quad(integ, lower, upper)[0] + + def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, + conditional=False, **kwds): + """Calculate expected value of a function with respect to the + distribution by numerical integration. + + The expected value of a function ``f(x)`` with respect to a + distribution ``dist`` is defined as:: + + ub + E[f(x)] = Integral(f(x) * dist.pdf(x)), + lb + + where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)`` + distribution. If the bounds ``lb`` and ``ub`` correspond to the + support of the distribution, e.g. ``[-inf, inf]`` in the default + case, then the integral is the unrestricted expectation of ``f(x)``. + Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0`` + outside a finite interval in which case the expectation is + calculated within the finite range ``[lb, ub]``. + + Parameters + ---------- + func : callable, optional + Function for which integral is calculated. Takes only one argument. + The default is the identity mapping f(x) = x. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter (default=0). + scale : float, optional + Scale parameter (default=1). + lb, ub : scalar, optional + Lower and upper bound for integration. Default is set to the + support of the distribution. + conditional : bool, optional + If True, the integral is corrected by the conditional probability + of the integration interval. The return value is the expectation + of the function, conditional on being in the given interval. + Default is False. + + Additional keyword arguments are passed to the integration routine. + + Returns + ------- + expect : float + The calculated expected value. + + Notes + ----- + The integration behavior of this function is inherited from + `scipy.integrate.quad`. Neither this function nor + `scipy.integrate.quad` can verify whether the integral exists or is + finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and + ``cauchy(0).expect()`` returns ``0.0``. + + Likewise, the accuracy of results is not verified by the function. + `scipy.integrate.quad` is typically reliable for integrals that are + numerically favorable, but it is not guaranteed to converge + to a correct value for all possible intervals and integrands. This + function is provided for convenience; for critical applications, + check results against other integration methods. + + The function is not vectorized. + + Examples + -------- + + To understand the effect of the bounds of integration consider + + >>> from scipy.stats import expon + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0) + 0.6321205588285578 + + This is close to + + >>> expon(1).cdf(2.0) - expon(1).cdf(0.0) + 0.6321205588285577 + + If ``conditional=True`` + + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True) + 1.0000000000000002 + + The slight deviation from 1 is due to numerical integration. + + The integrand can be treated as a complex-valued function + by passing ``complex_func=True`` to `scipy.integrate.quad` . + + >>> import numpy as np + >>> from scipy.stats import vonmises + >>> res = vonmises(loc=2, kappa=1).expect(lambda x: np.exp(1j*x), + ... complex_func=True) + >>> res + (-0.18576377217422957+0.40590124735052263j) + + >>> np.angle(res) # location of the (circular) distribution + 2.0 + + """ + lockwds = {'loc': loc, + 'scale': scale} + self._argcheck(*args) + _a, _b = self._get_support(*args) + if func is None: + def fun(x, *args): + return x * self.pdf(x, *args, **lockwds) + else: + def fun(x, *args): + return func(x) * self.pdf(x, *args, **lockwds) + if lb is None: + lb = loc + _a * scale + if ub is None: + ub = loc + _b * scale + + cdf_bounds = self.cdf([lb, ub], *args, **lockwds) + invfac = cdf_bounds[1] - cdf_bounds[0] + + kwds['args'] = args + + # split interval to help integrator w/ infinite support; see gh-8928 + alpha = 0.05 # split body from tails at probability mass `alpha` + inner_bounds = np.array([alpha, 1-alpha]) + cdf_inner_bounds = cdf_bounds[0] + invfac * inner_bounds + c, d = loc + self._ppf(cdf_inner_bounds, *args) * scale + + # Do not silence warnings from integration. + lbc = integrate.quad(fun, lb, c, **kwds)[0] + cd = integrate.quad(fun, c, d, **kwds)[0] + dub = integrate.quad(fun, d, ub, **kwds)[0] + vals = (lbc + cd + dub) + + if conditional: + vals /= invfac + return np.array(vals)[()] # make it a numpy scalar like other methods + + def _param_info(self): + shape_info = self._shape_info() + loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False)) + scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False)) + param_info = shape_info + [loc_info, scale_info] + return param_info + + # For now, _delta_cdf is a private method. + def _delta_cdf(self, x1, x2, *args, loc=0, scale=1): + """ + Compute CDF(x2) - CDF(x1). + + Where x1 is greater than the median, compute SF(x1) - SF(x2), + otherwise compute CDF(x2) - CDF(x1). + + This function is only useful if `dist.sf(x, ...)` has an implementation + that is numerically more accurate than `1 - dist.cdf(x, ...)`. + """ + cdf1 = self.cdf(x1, *args, loc=loc, scale=scale) + # Possible optimizations (needs investigation-these might not be + # better): + # * Use _lazywhere instead of np.where + # * Instead of cdf1 > 0.5, compare x1 to the median. + result = np.where(cdf1 > 0.5, + (self.sf(x1, *args, loc=loc, scale=scale) + - self.sf(x2, *args, loc=loc, scale=scale)), + self.cdf(x2, *args, loc=loc, scale=scale) - cdf1) + if result.ndim == 0: + result = result[()] + return result + + +# Helpers for the discrete distributions +def _drv2_moment(self, n, *args): + """Non-central moment of discrete distribution.""" + def fun(x): + return np.power(x, n) * self._pmf(x, *args) + + _a, _b = self._get_support(*args) + return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc) + + +def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm + _a, _b = self._get_support(*args) + b = _b + a = _a + if isinf(b): # Be sure ending point is > q + b = int(max(100*q, 10)) + while 1: + if b >= _b: + qb = 1.0 + break + qb = self._cdf(b, *args) + if (qb < q): + b += 10 + else: + break + else: + qb = 1.0 + if isinf(a): # be sure starting point < q + a = int(min(-100*q, -10)) + while 1: + if a <= _a: + qb = 0.0 + break + qa = self._cdf(a, *args) + if (qa > q): + a -= 10 + else: + break + else: + qa = self._cdf(a, *args) + + while 1: + if (qa == q): + return a + if (qb == q): + return b + if b <= a+1: + if qa > q: + return a + else: + return b + c = int((a+b)/2.0) + qc = self._cdf(c, *args) + if (qc < q): + if a != c: + a = c + else: + raise RuntimeError('updating stopped, endless loop') + qa = qc + elif (qc > q): + if b != c: + b = c + else: + raise RuntimeError('updating stopped, endless loop') + qb = qc + else: + return c + + +# Must over-ride one of _pmf or _cdf or pass in +# x_k, p(x_k) lists in initialization + + +class rv_discrete(rv_generic): + """A generic discrete random variable class meant for subclassing. + + `rv_discrete` is a base class to construct specific distribution classes + and instances for discrete random variables. It can also be used + to construct an arbitrary distribution defined by a list of support + points and corresponding probabilities. + + Parameters + ---------- + a : float, optional + Lower bound of the support of the distribution, default: 0 + b : float, optional + Upper bound of the support of the distribution, default: plus infinity + moment_tol : float, optional + The tolerance for the generic calculation of moments. + values : tuple of two array_like, optional + ``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero + probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk`` + and ``pk`` must have the same shape, and ``xk`` must be unique. + inc : integer, optional + Increment for the support of the distribution. + Default is 1. (other values have not been tested) + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example "m, n" for a distribution + that takes two integers as the two shape arguments for all its methods + If not provided, shape parameters will be inferred from + the signatures of the private methods, ``_pmf`` and ``_cdf`` of + the instance. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + pmf + logpmf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + support + + Notes + ----- + This class is similar to `rv_continuous`. Whether a shape parameter is + valid is decided by an ``_argcheck`` method (which defaults to checking + that its arguments are strictly positive.) + The main differences are as follows. + + - The support of the distribution is a set of integers. + - Instead of the probability density function, ``pdf`` (and the + corresponding private ``_pdf``), this class defines the + *probability mass function*, `pmf` (and the corresponding + private ``_pmf``.) + - There is no ``scale`` parameter. + - The default implementations of methods (e.g. ``_cdf``) are not designed + for distributions with support that is unbounded below (i.e. + ``a=-np.inf``), so they must be overridden. + + To create a new discrete distribution, we would do the following: + + >>> from scipy.stats import rv_discrete + >>> class poisson_gen(rv_discrete): + ... "Poisson distribution" + ... def _pmf(self, k, mu): + ... return exp(-mu) * mu**k / factorial(k) + + and create an instance:: + + >>> poisson = poisson_gen(name="poisson") + + Note that above we defined the Poisson distribution in the standard form. + Shifting the distribution can be done by providing the ``loc`` parameter + to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)`` + delegates the work to ``poisson._pmf(x-loc, mu)``. + + **Discrete distributions from a list of probabilities** + + Alternatively, you can construct an arbitrary discrete rv defined + on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the + ``values`` keyword argument to the `rv_discrete` constructor. + + **Deepcopying / Pickling** + + If a distribution or frozen distribution is deepcopied (pickled/unpickled, + etc.), any underlying random number generator is deepcopied with it. An + implication is that if a distribution relies on the singleton RandomState + before copying, it will rely on a copy of that random state after copying, + and ``np.random.seed`` will no longer control the state. + + Examples + -------- + Custom made discrete distribution: + + >>> import numpy as np + >>> from scipy import stats + >>> xk = np.arange(7) + >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2) + >>> custm = stats.rv_discrete(name='custm', values=(xk, pk)) + >>> + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r') + >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4) + >>> plt.show() + + Random number generation: + + >>> R = custm.rvs(size=100) + + """ + def __new__(cls, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + if values is not None: + # dispatch to a subclass + return super().__new__(rv_sample) + else: + # business as usual + return super().__new__(cls) + + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + super().__init__(seed) + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.a = a + self.b = b + self.moment_tol = moment_tol + self.inc = inc + self.shapes = shapes + + if values is not None: + raise ValueError("rv_discrete.__init__(..., values != None, ...)") + + self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + self._attach_methods() + self._construct_docstrings(name, longname) + + def __getstate__(self): + dct = self.__dict__.copy() + # these methods will be remade in __setstate__ + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs", + "_cdfvec", "_ppfvec", "generic_moment"] + [dct.pop(attr, None) for attr in attrs] + return dct + + def _attach_methods(self): + """Attaches dynamically created methods to the rv_discrete instance.""" + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self.vecentropy = vectorize(self._entropy) + + # _attach_methods is responsible for calling _attach_argparser_methods + self._attach_argparser_methods() + + # nin correction needs to be after we know numargs + # correct nin for generic moment vectorization + _vec_generic_moment = vectorize(_drv2_moment, otypes='d') + _vec_generic_moment.nin = self.numargs + 2 + self.generic_moment = types.MethodType(_vec_generic_moment, self) + + # correct nin for ppf vectorization + _vppf = vectorize(_drv2_ppfsingle, otypes='d') + _vppf.nin = self.numargs + 2 + self._ppfvec = types.MethodType(_vppf, self) + + # now that self.numargs is defined, we can adjust nin + self._cdfvec.nin = self.numargs + 1 + + def _construct_docstrings(self, name, longname): + if name is None: + name = 'Distribution' + self.name = name + + # generate docstring for subclass instances + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + docdict=docdict_discrete, + discrete='discrete') + else: + dct = dict(distdiscrete) + self._construct_doc(docdict_discrete, dct.get(self.name)) + + # discrete RV do not have the scale parameter, remove it + self.__doc__ = self.__doc__.replace( + '\n scale : array_like, ' + 'optional\n scale parameter (default=1)', '') + + def _updated_ctor_param(self): + """Return the current version of _ctor_param, possibly updated by user. + + Used by freezing. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['badvalue'] = self.badvalue + dct['moment_tol'] = self.moment_tol + dct['inc'] = self.inc + dct['name'] = self.name + dct['shapes'] = self.shapes + return dct + + def _nonzero(self, k, *args): + return floor(k) == k + + def _pmf(self, k, *args): + return self._cdf(k, *args) - self._cdf(k-1, *args) + + def _logpmf(self, k, *args): + return log(self._pmf(k, *args)) + + def _logpxf(self, k, *args): + # continuous distributions have PDF, discrete have PMF, but sometimes + # the distinction doesn't matter. This lets us use `_logpxf` for both + # discrete and continuous distributions. + return self._logpmf(k, *args) + + def _unpack_loc_scale(self, theta): + try: + loc = theta[-1] + scale = 1 + args = tuple(theta[:-1]) + except IndexError as e: + raise ValueError("Not enough input arguments.") from e + return loc, scale, args + + def _cdf_single(self, k, *args): + _a, _b = self._get_support(*args) + m = arange(int(_a), k+1) + return np.sum(self._pmf(m, *args), axis=0) + + def _cdf(self, x, *args): + k = floor(x).astype(np.float64) + return self._cdfvec(k, *args) + + # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic + + def rvs(self, *args, **kwargs): + """Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + size : int or tuple of ints, optional + Defining number of random variates (Default is 1). Note that `size` + has to be given as keyword, not as positional argument. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + kwargs['discrete'] = True + return super().rvs(*args, **kwargs) + + def pmf(self, k, *args, **kwds): + """Probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + pmf : array_like + Probability mass function evaluated at k + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k <= _b) + if not isinstance(self, rv_sample): + cond1 = cond1 & self._nonzero(k, *args) + cond = cond0 & cond1 + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._pmf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logpmf(self, k, *args, **kwds): + """Log of the probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter. Default is 0. + + Returns + ------- + logpmf : array_like + Log of the probability mass function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k <= _b) + if not isinstance(self, rv_sample): + cond1 = cond1 & self._nonzero(k, *args) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logpmf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, k, *args, **kwds): + """Cumulative distribution function of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k >= _b) + cond3 = np.isneginf(k) + cond = cond0 & cond1 & np.isfinite(k) + + output = zeros(shape(cond), 'd') + place(output, cond2*(cond0 == cond0), 1.0) + place(output, cond3*(cond0 == cond0), 0.0) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._cdf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, k, *args, **kwds): + """Log of the cumulative distribution function at k of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k >= _b) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2*(cond0 == cond0), 0.0) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, k, *args, **kwds): + """Survival function (1 - `cdf`) at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + sf : array_like + Survival function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = ((k < _a) | np.isneginf(k)) & cond0 + cond = cond0 & cond1 & np.isfinite(k) + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._sf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, k, *args, **kwds): + """Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as 1 - `cdf`, + evaluated at `k`. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k < _a) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Lower tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : array_like + Quantile corresponding to the lower tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') + # output type 'd' to handle nin and inf + place(output, (q == 0)*(cond == cond), _a-1 + loc) + place(output, cond2, _b + loc) + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._ppf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Upper tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : ndarray or scalar + Quantile corresponding to the upper tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond3 = (q == 0) & cond0 + cond = cond0 & cond1 + + # same problem as with ppf; copied from ppf and changed + output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') + # output type 'd' to handle nin and inf + lower_bound = _a - 1 + loc + upper_bound = _b + loc + place(output, cond2*(cond == cond), lower_bound) + place(output, cond3*(cond == cond), upper_bound) + + # call place only if at least 1 valid argument + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + # PB same as ticket 766 + place(output, cond, self._isf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def _entropy(self, *args): + if hasattr(self, 'pk'): + return stats.entropy(self.pk) + else: + _a, _b = self._get_support(*args) + return _expect(lambda x: entr(self.pmf(x, *args)), + _a, _b, self.ppf(0.5, *args), self.inc) + + def expect(self, func=None, args=(), loc=0, lb=None, ub=None, + conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32): + """ + Calculate expected value of a function with respect to the distribution + for discrete distribution by numerical summation. + + Parameters + ---------- + func : callable, optional + Function for which the expectation value is calculated. + Takes only one argument. + The default is the identity mapping f(k) = k. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter. + Default is 0. + lb, ub : int, optional + Lower and upper bound for the summation, default is set to the + support of the distribution, inclusive (``lb <= k <= ub``). + conditional : bool, optional + If true then the expectation is corrected by the conditional + probability of the summation interval. The return value is the + expectation of the function, `func`, conditional on being in + the given interval (k such that ``lb <= k <= ub``). + Default is False. + maxcount : int, optional + Maximal number of terms to evaluate (to avoid an endless loop for + an infinite sum). Default is 1000. + tolerance : float, optional + Absolute tolerance for the summation. Default is 1e-10. + chunksize : int, optional + Iterate over the support of a distributions in chunks of this size. + Default is 32. + + Returns + ------- + expect : float + Expected value. + + Notes + ----- + For heavy-tailed distributions, the expected value may or + may not exist, + depending on the function, `func`. If it does exist, but the + sum converges + slowly, the accuracy of the result may be rather low. For instance, for + ``zipf(4)``, accuracy for mean, variance in example is only 1e-5. + increasing `maxcount` and/or `chunksize` may improve the result, + but may also make zipf very slow. + + The function is not vectorized. + + """ + if func is None: + def fun(x): + # loc and args from outer scope + return (x+loc)*self._pmf(x, *args) + else: + def fun(x): + # loc and args from outer scope + return func(x+loc)*self._pmf(x, *args) + # used pmf because _pmf does not check support in randint and there + # might be problems(?) with correct self.a, self.b at this stage maybe + # not anymore, seems to work now with _pmf + + _a, _b = self._get_support(*args) + if lb is None: + lb = _a + else: + lb = lb - loc # convert bound for standardized distribution + if ub is None: + ub = _b + else: + ub = ub - loc # convert bound for standardized distribution + if conditional: + invfac = self.sf(lb-1, *args) - self.sf(ub, *args) + else: + invfac = 1.0 + + if isinstance(self, rv_sample): + res = self._expect(fun, lb, ub) + return res / invfac + + # iterate over the support, starting from the median + x0 = self.ppf(0.5, *args) + res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize) + return res / invfac + + def _param_info(self): + shape_info = self._shape_info() + loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False)) + param_info = shape_info + [loc_info] + return param_info + + +def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10, + chunksize=32): + """Helper for computing the expectation value of `fun`.""" + # short-circuit if the support size is small enough + if (ub - lb) <= chunksize: + supp = np.arange(lb, ub+1, inc) + vals = fun(supp) + return np.sum(vals) + + # otherwise, iterate starting from x0 + if x0 < lb: + x0 = lb + if x0 > ub: + x0 = ub + + count, tot = 0, 0. + # iterate over [x0, ub] inclusive + for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', + RuntimeWarning, stacklevel=3) + return tot + + # iterate over [lb, x0) + for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', + RuntimeWarning, stacklevel=3) + break + + return tot + + +def _iter_chunked(x0, x1, chunksize=4, inc=1): + """Iterate from x0 to x1 in chunks of chunksize and steps inc. + + x0 must be finite, x1 need not be. In the latter case, the iterator is + infinite. + Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards + (make sure to set inc < 0.) + + >>> from scipy.stats._distn_infrastructure import _iter_chunked + >>> [x for x in _iter_chunked(2, 5, inc=2)] + [array([2, 4])] + >>> [x for x in _iter_chunked(2, 11, inc=2)] + [array([2, 4, 6, 8]), array([10])] + >>> [x for x in _iter_chunked(2, -5, inc=-2)] + [array([ 2, 0, -2, -4])] + >>> [x for x in _iter_chunked(2, -9, inc=-2)] + [array([ 2, 0, -2, -4]), array([-6, -8])] + + """ + if inc == 0: + raise ValueError('Cannot increment by zero.') + if chunksize <= 0: + raise ValueError('Chunk size must be positive; got %s.' % chunksize) + + s = 1 if inc > 0 else -1 + stepsize = abs(chunksize * inc) + + x = x0 + while (x - x1) * inc < 0: + delta = min(stepsize, abs(x - x1)) + step = delta * s + supp = np.arange(x, x + step, inc) + x += step + yield supp + + +class rv_sample(rv_discrete): + """A 'sample' discrete distribution defined by the support and values. + + The ctor ignores most of the arguments, only needs the `values` argument. + """ + + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + super(rv_discrete, self).__init__(seed) + + if values is None: + raise ValueError("rv_sample.__init__(..., values=None,...)") + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.moment_tol = moment_tol + self.inc = inc + self.shapes = shapes + self.vecentropy = self._entropy + + xk, pk = values + + if np.shape(xk) != np.shape(pk): + raise ValueError("xk and pk must have the same shape.") + if np.less(pk, 0.0).any(): + raise ValueError("All elements of pk must be non-negative.") + if not np.allclose(np.sum(pk), 1): + raise ValueError("The sum of provided pk is not 1.") + if not len(set(np.ravel(xk))) == np.size(xk): + raise ValueError("xk may not contain duplicate values.") + + indx = np.argsort(np.ravel(xk)) + self.xk = np.take(np.ravel(xk), indx, 0) + self.pk = np.take(np.ravel(pk), indx, 0) + self.a = self.xk[0] + self.b = self.xk[-1] + + self.qvals = np.cumsum(self.pk, axis=0) + + self.shapes = ' ' # bypass inspection + + self._construct_argparser(meths_to_inspect=[self._pmf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + + self._attach_methods() + + self._construct_docstrings(name, longname) + + def __getstate__(self): + dct = self.__dict__.copy() + + # these methods will be remade in rv_generic.__setstate__, + # which calls rv_generic._attach_methods + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"] + [dct.pop(attr, None) for attr in attrs] + + return dct + + def _attach_methods(self): + """Attaches dynamically created argparser methods.""" + self._attach_argparser_methods() + + def _get_support(self, *args): + """Return the support of the (unscaled, unshifted) distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + a, b : numeric (float, or int or +/-np.inf) + end-points of the distribution's support. + """ + return self.a, self.b + + def _pmf(self, x): + return np.select([x == k for k in self.xk], + [np.broadcast_arrays(p, x)[0] for p in self.pk], 0) + + def _cdf(self, x): + xx, xxk = np.broadcast_arrays(x[:, None], self.xk) + indx = np.argmax(xxk > xx, axis=-1) - 1 + return self.qvals[indx] + + def _ppf(self, q): + qq, sqq = np.broadcast_arrays(q[..., None], self.qvals) + indx = argmax(sqq >= qq, axis=-1) + return self.xk[indx] + + def _rvs(self, size=None, random_state=None): + # Need to define it explicitly, otherwise .rvs() with size=None + # fails due to explicit broadcasting in _ppf + U = random_state.uniform(size=size) + if size is None: + U = np.array(U, ndmin=1) + Y = self._ppf(U)[0] + else: + Y = self._ppf(U) + return Y + + def _entropy(self): + return stats.entropy(self.pk) + + def generic_moment(self, n): + n = asarray(n) + return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0) + + def _expect(self, fun, lb, ub, *args, **kwds): + # ignore all args, just do a brute force summation + supp = self.xk[(lb <= self.xk) & (self.xk <= ub)] + vals = fun(supp) + return np.sum(vals) + + +def _check_shape(argshape, size): + """ + This is a utility function used by `_rvs()` in the class geninvgauss_gen. + It compares the tuple argshape to the tuple size. + + Parameters + ---------- + argshape : tuple of integers + Shape of the arguments. + size : tuple of integers or integer + Size argument of rvs(). + + Returns + ------- + The function returns two tuples, scalar_shape and bc. + + scalar_shape : tuple + Shape to which the 1-d array of random variates returned by + _rvs_scalar() is converted when it is copied into the + output array of _rvs(). + + bc : tuple of booleans + bc is an tuple the same length as size. bc[j] is True if the data + associated with that index is generated in one call of _rvs_scalar(). + + """ + scalar_shape = [] + bc = [] + for argdim, sizedim in zip_longest(argshape[::-1], size[::-1], + fillvalue=1): + if sizedim > argdim or (argdim == sizedim == 1): + scalar_shape.append(sizedim) + bc.append(True) + else: + bc.append(False) + return tuple(scalar_shape[::-1]), tuple(bc[::-1]) + + +def get_distribution_names(namespace_pairs, rv_base_class): + """Collect names of statistical distributions and their generators. + + Parameters + ---------- + namespace_pairs : sequence + A snapshot of (name, value) pairs in the namespace of a module. + rv_base_class : class + The base class of random variable generator classes in a module. + + Returns + ------- + distn_names : list of strings + Names of the statistical distributions. + distn_gen_names : list of strings + Names of the generators of the statistical distributions. + Note that these are not simply the names of the statistical + distributions, with a _gen suffix added. + + """ + distn_names = [] + distn_gen_names = [] + for name, value in namespace_pairs: + if name.startswith('_'): + continue + if name.endswith('_gen') and issubclass(value, rv_base_class): + distn_gen_names.append(name) + if isinstance(value, rv_base_class): + distn_names.append(name) + return distn_names, distn_gen_names diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_mgc.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_mgc.py new file mode 100644 index 0000000000000000000000000000000000000000..e8eb835cbafb1e974287d6fd601e9bca4471e38b --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_mgc.py @@ -0,0 +1,550 @@ +import warnings +import numpy as np + +from scipy._lib._util import check_random_state, MapWrapper, rng_integers, _contains_nan +from scipy._lib._bunch import _make_tuple_bunch +from scipy.spatial.distance import cdist +from scipy.ndimage import _measurements + +from ._stats import _local_correlations # type: ignore[import-not-found] +from . import distributions + +__all__ = ['multiscale_graphcorr'] + +# FROM MGCPY: https://github.com/neurodata/mgcpy + + +class _ParallelP: + """Helper function to calculate parallel p-value.""" + + def __init__(self, x, y, random_states): + self.x = x + self.y = y + self.random_states = random_states + + def __call__(self, index): + order = self.random_states[index].permutation(self.y.shape[0]) + permy = self.y[order][:, order] + + # calculate permuted stats, store in null distribution + perm_stat = _mgc_stat(self.x, permy)[0] + + return perm_stat + + +def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None): + r"""Helper function that calculates the p-value. See below for uses. + + Parameters + ---------- + x, y : ndarray + `x` and `y` have shapes `(n, p)` and `(n, q)`. + stat : float + The sample test statistic. + reps : int, optional + The number of replications used to estimate the null when using the + permutation test. The default is 1000 replications. + workers : int or map-like callable, optional + If `workers` is an int the population is subdivided into `workers` + sections and evaluated in parallel (uses + `multiprocessing.Pool `). Supply `-1` to use all cores + available to the Process. Alternatively supply a map-like callable, + such as `multiprocessing.Pool.map` for evaluating the population in + parallel. This evaluation is carried out as `workers(func, iterable)`. + Requires that `func` be pickleable. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Returns + ------- + pvalue : float + The sample test p-value. + null_dist : list + The approximated null distribution. + + """ + # generate seeds for each rep (change to new parallel random number + # capabilities in numpy >= 1.17+) + random_state = check_random_state(random_state) + random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32, + size=4, dtype=np.uint32)) for _ in range(reps)] + + # parallelizes with specified workers over number of reps and set seeds + parallelp = _ParallelP(x=x, y=y, random_states=random_states) + with MapWrapper(workers) as mapwrapper: + null_dist = np.array(list(mapwrapper(parallelp, range(reps)))) + + # calculate p-value and significant permutation map through list + pvalue = (1 + (null_dist >= stat).sum()) / (1 + reps) + + return pvalue, null_dist + + +def _euclidean_dist(x): + return cdist(x, x) + + +MGCResult = _make_tuple_bunch('MGCResult', + ['statistic', 'pvalue', 'mgc_dict'], []) + + +def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000, + workers=1, is_twosamp=False, random_state=None): + r"""Computes the Multiscale Graph Correlation (MGC) test statistic. + + Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for + one property (e.g. cloud density), and the :math:`l`-nearest neighbors for + the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is + called the "scale". A priori, however, it is not know which scales will be + most informative. So, MGC computes all distance pairs, and then efficiently + computes the distance correlations for all scales. The local correlations + illustrate which scales are relatively informative about the relationship. + The key, therefore, to successfully discover and decipher relationships + between disparate data modalities is to adaptively determine which scales + are the most informative, and the geometric implication for the most + informative scales. Doing so not only provides an estimate of whether the + modalities are related, but also provides insight into how the + determination was made. This is especially important in high-dimensional + data, where simple visualizations do not reveal relationships to the + unaided human eye. Characterizations of this implementation in particular + have been derived from and benchmarked within in [2]_. + + Parameters + ---------- + x, y : ndarray + If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is + the number of samples and `p` and `q` are the number of dimensions, + then the MGC independence test will be run. Alternatively, ``x`` and + ``y`` can have shapes ``(n, n)`` if they are distance or similarity + matrices, and ``compute_distance`` must be sent to ``None``. If ``x`` + and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired + two-sample MGC test will be run. + compute_distance : callable, optional + A function that computes the distance or similarity among the samples + within each data matrix. Set to ``None`` if ``x`` and ``y`` are + already distance matrices. The default uses the euclidean norm metric. + If you are calling a custom function, either create the distance + matrix before-hand or create a function of the form + ``compute_distance(x)`` where `x` is the data matrix for which + pairwise distances are calculated. + reps : int, optional + The number of replications used to estimate the null when using the + permutation test. The default is ``1000``. + workers : int or map-like callable, optional + If ``workers`` is an int the population is subdivided into ``workers`` + sections and evaluated in parallel (uses ``multiprocessing.Pool + ``). Supply ``-1`` to use all cores available to the + Process. Alternatively supply a map-like callable, such as + ``multiprocessing.Pool.map`` for evaluating the p-value in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + Requires that `func` be pickleable. The default is ``1``. + is_twosamp : bool, optional + If `True`, a two sample test will be run. If ``x`` and ``y`` have + shapes ``(n, p)`` and ``(m, p)``, this optional will be overridden and + set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes + ``(n, p)`` and a two sample test is desired. The default is ``False``. + Note that this will not run if inputs are distance matrices. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Returns + ------- + res : MGCResult + An object containing attributes: + + statistic : float + The sample MGC test statistic within `[-1, 1]`. + pvalue : float + The p-value obtained via permutation. + mgc_dict : dict + Contains additional useful results: + + - mgc_map : ndarray + A 2D representation of the latent geometry of the + relationship. + - opt_scale : (int, int) + The estimated optimal scale as a `(x, y)` pair. + - null_dist : list + The null distribution derived from the permuted matrices. + + See Also + -------- + pearsonr : Pearson correlation coefficient and p-value for testing + non-correlation. + kendalltau : Calculates Kendall's tau. + spearmanr : Calculates a Spearman rank-order correlation coefficient. + + Notes + ----- + A description of the process of MGC and applications on neuroscience data + can be found in [1]_. It is performed using the following steps: + + #. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and + modified to be mean zero columnwise. This results in two + :math:`n \times n` distance matrices :math:`A` and :math:`B` (the + centering and unbiased modification) [3]_. + + #. For all values :math:`k` and :math:`l` from :math:`1, ..., n`, + + * The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs + are calculated for each property. Here, :math:`G_k (i, j)` indicates + the :math:`k`-smallest values of the :math:`i`-th row of :math:`A` + and :math:`H_l (i, j)` indicates the :math:`l` smallested values of + the :math:`i`-th row of :math:`B` + + * Let :math:`\circ` denotes the entry-wise matrix product, then local + correlations are summed and normalized using the following statistic: + + .. math:: + + c^{kl} = \frac{\sum_{ij} A G_k B H_l} + {\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}} + + #. The MGC test statistic is the smoothed optimal local correlation of + :math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)` + (which essentially set all isolated large correlations) as 0 and + connected large correlations the same as before, see [3]_.) MGC is, + + .. math:: + + MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right) + \right) + + The test statistic returns a value between :math:`(-1, 1)` since it is + normalized. + + The p-value returned is calculated using a permutation test. This process + is completed by first randomly permuting :math:`y` to estimate the null + distribution and then calculating the probability of observing a test + statistic, under the null, at least as extreme as the observed test + statistic. + + MGC requires at least 5 samples to run with reliable results. It can also + handle high-dimensional data sets. + In addition, by manipulating the input data matrices, the two-sample + testing problem can be reduced to the independence testing problem [4]_. + Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n` + :math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as + follows: + + .. math:: + + X = [U | V] \in \mathcal{R}^{p \times (n + m)} + Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)} + + Then, the MGC statistic can be calculated as normal. This methodology can + be extended to similar tests such as distance correlation [4]_. + + .. versionadded:: 1.4.0 + + References + ---------- + .. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E., + Maggioni, M., & Shen, C. (2019). Discovering and deciphering + relationships across disparate data modalities. ELife. + .. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A., + Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019). + mgcpy: A Comprehensive High Dimensional Independence Testing Python + Package. :arXiv:`1907.02088` + .. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance + correlation to multiscale graph correlation. Journal of the American + Statistical Association. + .. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of + Distance and Kernel Methods for Hypothesis Testing. + :arXiv:`1806.05514` + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import multiscale_graphcorr + >>> x = np.arange(100) + >>> y = x + >>> res = multiscale_graphcorr(x, y) + >>> res.statistic, res.pvalue + (1.0, 0.001) + + To run an unpaired two-sample test, + + >>> x = np.arange(100) + >>> y = np.arange(79) + >>> res = multiscale_graphcorr(x, y) + >>> res.statistic, res.pvalue # doctest: +SKIP + (0.033258146255703246, 0.023) + + or, if shape of the inputs are the same, + + >>> x = np.arange(100) + >>> y = x + >>> res = multiscale_graphcorr(x, y, is_twosamp=True) + >>> res.statistic, res.pvalue # doctest: +SKIP + (-0.008021809890200488, 1.0) + + """ + if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray): + raise ValueError("x and y must be ndarrays") + + # convert arrays of type (n,) to (n, 1) + if x.ndim == 1: + x = x[:, np.newaxis] + elif x.ndim != 2: + raise ValueError(f"Expected a 2-D array `x`, found shape {x.shape}") + if y.ndim == 1: + y = y[:, np.newaxis] + elif y.ndim != 2: + raise ValueError(f"Expected a 2-D array `y`, found shape {y.shape}") + + nx, px = x.shape + ny, py = y.shape + + # check for NaNs + _contains_nan(x, nan_policy='raise') + _contains_nan(y, nan_policy='raise') + + # check for positive or negative infinity and raise error + if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0: + raise ValueError("Inputs contain infinities") + + if nx != ny: + if px == py: + # reshape x and y for two sample testing + is_twosamp = True + else: + raise ValueError("Shape mismatch, x and y must have shape [n, p] " + "and [n, q] or have shape [n, p] and [m, p].") + + if nx < 5 or ny < 5: + raise ValueError("MGC requires at least 5 samples to give reasonable " + "results.") + + # convert x and y to float + x = x.astype(np.float64) + y = y.astype(np.float64) + + # check if compute_distance_matrix if a callable() + if not callable(compute_distance) and compute_distance is not None: + raise ValueError("Compute_distance must be a function.") + + # check if number of reps exists, integer, or > 0 (if under 1000 raises + # warning) + if not isinstance(reps, int) or reps < 0: + raise ValueError("Number of reps must be an integer greater than 0.") + elif reps < 1000: + msg = ("The number of replications is low (under 1000), and p-value " + "calculations may be unreliable. Use the p-value result, with " + "caution!") + warnings.warn(msg, RuntimeWarning, stacklevel=2) + + if is_twosamp: + if compute_distance is None: + raise ValueError("Cannot run if inputs are distance matrices") + x, y = _two_sample_transform(x, y) + + if compute_distance is not None: + # compute distance matrices for x and y + x = compute_distance(x) + y = compute_distance(y) + + # calculate MGC stat + stat, stat_dict = _mgc_stat(x, y) + stat_mgc_map = stat_dict["stat_mgc_map"] + opt_scale = stat_dict["opt_scale"] + + # calculate permutation MGC p-value + pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers, + random_state=random_state) + + # save all stats (other than stat/p-value) in dictionary + mgc_dict = {"mgc_map": stat_mgc_map, + "opt_scale": opt_scale, + "null_dist": null_dist} + + # create result object with alias for backward compatibility + res = MGCResult(stat, pvalue, mgc_dict) + res.stat = stat + return res + + +def _mgc_stat(distx, disty): + r"""Helper function that calculates the MGC stat. See above for use. + + Parameters + ---------- + distx, disty : ndarray + `distx` and `disty` have shapes `(n, p)` and `(n, q)` or + `(n, n)` and `(n, n)` + if distance matrices. + + Returns + ------- + stat : float + The sample MGC test statistic within `[-1, 1]`. + stat_dict : dict + Contains additional useful additional returns containing the following + keys: + + - stat_mgc_map : ndarray + MGC-map of the statistics. + - opt_scale : (float, float) + The estimated optimal scale as a `(x, y)` pair. + + """ + # calculate MGC map and optimal scale + stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc') + + n, m = stat_mgc_map.shape + if m == 1 or n == 1: + # the global scale at is the statistic calculated at maximial nearest + # neighbors. There is not enough local scale to search over, so + # default to global scale + stat = stat_mgc_map[m - 1][n - 1] + opt_scale = m * n + else: + samp_size = len(distx) - 1 + + # threshold to find connected region of significant local correlations + sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size) + + # maximum within the significant region + stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map) + + stat_dict = {"stat_mgc_map": stat_mgc_map, + "opt_scale": opt_scale} + + return stat, stat_dict + + +def _threshold_mgc_map(stat_mgc_map, samp_size): + r""" + Finds a connected region of significance in the MGC-map by thresholding. + + Parameters + ---------- + stat_mgc_map : ndarray + All local correlations within `[-1,1]`. + samp_size : int + The sample size of original data. + + Returns + ------- + sig_connect : ndarray + A binary matrix with 1's indicating the significant region. + + """ + m, n = stat_mgc_map.shape + + # 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05 + # with varying levels of performance. Threshold is based on a beta + # approximation. + per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant + threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation + threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1 + + # the global scale at is the statistic calculated at maximial nearest + # neighbors. Threshold is the maximum on the global and local scales + threshold = max(threshold, stat_mgc_map[m - 1][n - 1]) + + # find the largest connected component of significant correlations + sig_connect = stat_mgc_map > threshold + if np.sum(sig_connect) > 0: + sig_connect, _ = _measurements.label(sig_connect) + _, label_counts = np.unique(sig_connect, return_counts=True) + + # skip the first element in label_counts, as it is count(zeros) + max_label = np.argmax(label_counts[1:]) + 1 + sig_connect = sig_connect == max_label + else: + sig_connect = np.array([[False]]) + + return sig_connect + + +def _smooth_mgc_map(sig_connect, stat_mgc_map): + """Finds the smoothed maximal within the significant region R. + + If area of R is too small it returns the last local correlation. Otherwise, + returns the maximum within significant_connected_region. + + Parameters + ---------- + sig_connect : ndarray + A binary matrix with 1's indicating the significant region. + stat_mgc_map : ndarray + All local correlations within `[-1, 1]`. + + Returns + ------- + stat : float + The sample MGC statistic within `[-1, 1]`. + opt_scale: (float, float) + The estimated optimal scale as an `(x, y)` pair. + + """ + m, n = stat_mgc_map.shape + + # the global scale at is the statistic calculated at maximial nearest + # neighbors. By default, statistic and optimal scale are global. + stat = stat_mgc_map[m - 1][n - 1] + opt_scale = [m, n] + + if np.linalg.norm(sig_connect) != 0: + # proceed only when the connected region's area is sufficiently large + # 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05 + # with varying levels of performance + if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n): + max_corr = max(stat_mgc_map[sig_connect]) + + # find all scales within significant_connected_region that maximize + # the local correlation + max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect) + + if max_corr >= stat: + stat = max_corr + + k, l = max_corr_index + one_d_indices = k * n + l # 2D to 1D indexing + k = np.max(one_d_indices) // n + l = np.max(one_d_indices) % n + opt_scale = [k+1, l+1] # adding 1s to match R indexing + + return stat, opt_scale + + +def _two_sample_transform(u, v): + """Helper function that concatenates x and y for two sample MGC stat. + + See above for use. + + Parameters + ---------- + u, v : ndarray + `u` and `v` have shapes `(n, p)` and `(m, p)`. + + Returns + ------- + x : ndarray + Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape + `(2n, p)`. + y : ndarray + Label matrix for `x` where 0 refers to samples that comes from `u` and + 1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`. + + """ + nx = u.shape[0] + ny = v.shape[0] + x = np.concatenate([u, v], axis=0) + y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1) + return x, y diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py new file mode 100644 index 0000000000000000000000000000000000000000..baf2e70c9f0fb7d31eebe9b081a22f8446e49780 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py @@ -0,0 +1,482 @@ +import numpy as np + +from scipy.special import ndtri +from scipy.optimize import brentq +from ._discrete_distns import nchypergeom_fisher +from ._common import ConfidenceInterval + + +def _sample_odds_ratio(table): + """ + Given a table [[a, b], [c, d]], compute a*d/(b*c). + + Return nan if the numerator and denominator are 0. + Return inf if just the denominator is 0. + """ + # table must be a 2x2 numpy array. + if table[1, 0] > 0 and table[0, 1] > 0: + oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1]) + elif table[0, 0] == 0 or table[1, 1] == 0: + oddsratio = np.nan + else: + oddsratio = np.inf + return oddsratio + + +def _solve(func): + """ + Solve func(nc) = 0. func must be an increasing function. + """ + # We could just as well call the variable `x` instead of `nc`, but we + # always call this function with functions for which nc (the noncentrality + # parameter) is the variable for which we are solving. + nc = 1.0 + value = func(nc) + if value == 0: + return nc + + # Multiplicative factor by which to increase or decrease nc when + # searching for a bracketing interval. + factor = 2.0 + # Find a bracketing interval. + if value > 0: + nc /= factor + while func(nc) > 0: + nc /= factor + lo = nc + hi = factor*nc + else: + nc *= factor + while func(nc) < 0: + nc *= factor + lo = nc/factor + hi = nc + + # lo and hi bracket the solution for nc. + nc = brentq(func, lo, hi, xtol=1e-13) + return nc + + +def _nc_hypergeom_mean_inverse(x, M, n, N): + """ + For the given noncentral hypergeometric parameters x, M, n,and N + (table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2 + contingency table), find the noncentrality parameter of Fisher's + noncentral hypergeometric distribution whose mean is x. + """ + nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x) + return nc + + +def _hypergeom_params_from_table(table): + # The notation M, n and N is consistent with stats.hypergeom and + # stats.nchypergeom_fisher. + x = table[0, 0] + M = table.sum() + n = table[0].sum() + N = table[:, 0].sum() + return x, M, n, N + + +def _ci_upper(table, alpha): + """ + Compute the upper end of the confidence interval. + """ + if _sample_odds_ratio(table) == np.inf: + return np.inf + + x, M, n, N = _hypergeom_params_from_table(table) + + # nchypergeom_fisher.cdf is a decreasing function of nc, so we negate + # it in the lambda expression. + nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha) + return nc + + +def _ci_lower(table, alpha): + """ + Compute the lower end of the confidence interval. + """ + if _sample_odds_ratio(table) == 0: + return 0 + + x, M, n, N = _hypergeom_params_from_table(table) + + nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha) + return nc + + +def _conditional_oddsratio(table): + """ + Conditional MLE of the odds ratio for the 2x2 contingency table. + """ + x, M, n, N = _hypergeom_params_from_table(table) + # Get the bounds of the support. The support of the noncentral + # hypergeometric distribution with parameters M, n, and N is the same + # for all values of the noncentrality parameter, so we can use 1 here. + lo, hi = nchypergeom_fisher.support(M, n, N, 1) + + # Check if x is at one of the extremes of the support. If so, we know + # the odds ratio is either 0 or inf. + if x == lo: + # x is at the low end of the support. + return 0 + if x == hi: + # x is at the high end of the support. + return np.inf + + nc = _nc_hypergeom_mean_inverse(x, M, n, N) + return nc + + +def _conditional_oddsratio_ci(table, confidence_level=0.95, + alternative='two-sided'): + """ + Conditional exact confidence interval for the odds ratio. + """ + if alternative == 'two-sided': + alpha = 0.5*(1 - confidence_level) + lower = _ci_lower(table, alpha) + upper = _ci_upper(table, alpha) + elif alternative == 'less': + lower = 0.0 + upper = _ci_upper(table, 1 - confidence_level) + else: + # alternative == 'greater' + lower = _ci_lower(table, 1 - confidence_level) + upper = np.inf + + return lower, upper + + +def _sample_odds_ratio_ci(table, confidence_level=0.95, + alternative='two-sided'): + oddsratio = _sample_odds_ratio(table) + log_or = np.log(oddsratio) + se = np.sqrt((1/table).sum()) + if alternative == 'less': + z = ndtri(confidence_level) + loglow = -np.inf + loghigh = log_or + z*se + elif alternative == 'greater': + z = ndtri(confidence_level) + loglow = log_or - z*se + loghigh = np.inf + else: + # alternative is 'two-sided' + z = ndtri(0.5*confidence_level + 0.5) + loglow = log_or - z*se + loghigh = log_or + z*se + + return np.exp(loglow), np.exp(loghigh) + + +class OddsRatioResult: + """ + Result of `scipy.stats.contingency.odds_ratio`. See the + docstring for `odds_ratio` for more details. + + Attributes + ---------- + statistic : float + The computed odds ratio. + + * If `kind` is ``'sample'``, this is sample (or unconditional) + estimate, given by + ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. + * If `kind` is ``'conditional'``, this is the conditional + maximum likelihood estimate for the odds ratio. It is + the noncentrality parameter of Fisher's noncentral + hypergeometric distribution with the same hypergeometric + parameters as `table` and whose mean is ``table[0, 0]``. + + Methods + ------- + confidence_interval : + Confidence interval for the odds ratio. + """ + + def __init__(self, _table, _kind, statistic): + # for now, no need to make _table and _kind public, since this sort of + # information is returned in very few `scipy.stats` results + self._table = _table + self._kind = _kind + self.statistic = statistic + + def __repr__(self): + return f"OddsRatioResult(statistic={self.statistic})" + + def confidence_interval(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the odds ratio. + + Parameters + ---------- + confidence_level: float + Desired confidence level for the confidence interval. + The value must be given as a fraction between 0 and 1. + Default is 0.95 (meaning 95%). + + alternative : {'two-sided', 'less', 'greater'}, optional + The alternative hypothesis of the hypothesis test to which the + confidence interval corresponds. That is, suppose the null + hypothesis is that the true odds ratio equals ``OR`` and the + confidence interval is ``(low, high)``. Then the following options + for `alternative` are available (default is 'two-sided'): + + * 'two-sided': the true odds ratio is not equal to ``OR``. There + is evidence against the null hypothesis at the chosen + `confidence_level` if ``high < OR`` or ``low > OR``. + * 'less': the true odds ratio is less than ``OR``. The ``low`` end + of the confidence interval is 0, and there is evidence against + the null hypothesis at the chosen `confidence_level` if + ``high < OR``. + * 'greater': the true odds ratio is greater than ``OR``. The + ``high`` end of the confidence interval is ``np.inf``, and there + is evidence against the null hypothesis at the chosen + `confidence_level` if ``low > OR``. + + Returns + ------- + ci : ``ConfidenceInterval`` instance + The confidence interval, represented as an object with + attributes ``low`` and ``high``. + + Notes + ----- + When `kind` is ``'conditional'``, the limits of the confidence + interval are the conditional "exact confidence limits" as described + by Fisher [1]_. The conditional odds ratio and confidence interval are + also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_. + + When `kind` is ``'sample'``, the confidence interval is computed + under the assumption that the logarithm of the odds ratio is normally + distributed with standard error given by:: + + se = sqrt(1/a + 1/b + 1/c + 1/d) + + where ``a``, ``b``, ``c`` and ``d`` are the elements of the + contingency table. (See, for example, [2]_, section 3.1.3.2, + or [3]_, section 2.3.3). + + References + ---------- + .. [1] R. A. Fisher (1935), The logic of inductive inference, + Journal of the Royal Statistical Society, Vol. 98, No. 1, + pp. 39-82. + .. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: + Methods, Techniques, and Applications, CRC Press LLC, Boca + Raton, Florida. + .. [3] Alan Agresti, An Introduction to Categorical Data Analysis + (second edition), Wiley, Hoboken, NJ, USA (2007). + """ + if alternative not in ['two-sided', 'less', 'greater']: + raise ValueError("`alternative` must be 'two-sided', 'less' or " + "'greater'.") + + if confidence_level < 0 or confidence_level > 1: + raise ValueError('confidence_level must be between 0 and 1') + + if self._kind == 'conditional': + ci = self._conditional_odds_ratio_ci(confidence_level, alternative) + else: + ci = self._sample_odds_ratio_ci(confidence_level, alternative) + return ci + + def _conditional_odds_ratio_ci(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the conditional odds ratio. + """ + + table = self._table + if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1, + # the odds ratio is NaN and the confidence interval is (0, inf). + ci = (0, np.inf) + else: + ci = _conditional_oddsratio_ci(table, + confidence_level=confidence_level, + alternative=alternative) + return ConfidenceInterval(low=ci[0], high=ci[1]) + + def _sample_odds_ratio_ci(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the sample odds ratio. + """ + if confidence_level < 0 or confidence_level > 1: + raise ValueError('confidence_level must be between 0 and 1') + + table = self._table + if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1, + # the odds ratio is NaN and the confidence interval is (0, inf). + ci = (0, np.inf) + else: + ci = _sample_odds_ratio_ci(table, + confidence_level=confidence_level, + alternative=alternative) + return ConfidenceInterval(low=ci[0], high=ci[1]) + + +def odds_ratio(table, *, kind='conditional'): + r""" + Compute the odds ratio for a 2x2 contingency table. + + Parameters + ---------- + table : array_like of ints + A 2x2 contingency table. Elements must be non-negative integers. + kind : str, optional + Which kind of odds ratio to compute, either the sample + odds ratio (``kind='sample'``) or the conditional odds ratio + (``kind='conditional'``). Default is ``'conditional'``. + + Returns + ------- + result : `~scipy.stats._result_classes.OddsRatioResult` instance + The returned object has two computed attributes: + + statistic : float + * If `kind` is ``'sample'``, this is sample (or unconditional) + estimate, given by + ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. + * If `kind` is ``'conditional'``, this is the conditional + maximum likelihood estimate for the odds ratio. It is + the noncentrality parameter of Fisher's noncentral + hypergeometric distribution with the same hypergeometric + parameters as `table` and whose mean is ``table[0, 0]``. + + The object has the method `confidence_interval` that computes + the confidence interval of the odds ratio. + + See Also + -------- + scipy.stats.fisher_exact + relative_risk + + Notes + ----- + The conditional odds ratio was discussed by Fisher (see "Example 1" + of [1]_). Texts that cover the odds ratio include [2]_ and [3]_. + + .. versionadded:: 1.10.0 + + References + ---------- + .. [1] R. A. Fisher (1935), The logic of inductive inference, + Journal of the Royal Statistical Society, Vol. 98, No. 1, + pp. 39-82. + .. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research. + Volume I - The analysis of case-control studies. IARC Sci Publ. + (32):5-338. PMID: 7216345. (See section 4.2.) + .. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: + Methods, Techniques, and Applications, CRC Press LLC, Boca + Raton, Florida. + .. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of + Cardiovascular Events in Women and Men: A Sex-Specific + Meta-analysis of Randomized Controlled Trials." + JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006. + + Examples + -------- + In epidemiology, individuals are classified as "exposed" or + "unexposed" to some factor or treatment. If the occurrence of some + illness is under study, those who have the illness are often + classified as "cases", and those without it are "noncases". The + counts of the occurrences of these classes gives a contingency + table:: + + exposed unexposed + cases a b + noncases c d + + The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can + be interpreted as the odds of a case occurring in the exposed group, + and ``b/d`` as the odds of a case occurring in the unexposed group. + The sample odds ratio is the ratio of these odds. If the odds ratio + is greater than 1, it suggests that there is a positive association + between being exposed and being a case. + + Interchanging the rows or columns of the contingency table inverts + the odds ratio, so it is important to understand the meaning of labels + given to the rows and columns of the table when interpreting the + odds ratio. + + In [4]_, the use of aspirin to prevent cardiovascular events in women + and men was investigated. The study notably concluded: + + ...aspirin therapy reduced the risk of a composite of + cardiovascular events due to its effect on reducing the risk of + ischemic stroke in women [...] + + The article lists studies of various cardiovascular events. Let's + focus on the ischemic stoke in women. + + The following table summarizes the results of the experiment in which + participants took aspirin or a placebo on a regular basis for several + years. Cases of ischemic stroke were recorded:: + + Aspirin Control/Placebo + Ischemic stroke 176 230 + No stroke 21035 21018 + + The question we ask is "Is there evidence that the aspirin reduces the + risk of ischemic stroke?" + + Compute the odds ratio: + + >>> from scipy.stats.contingency import odds_ratio + >>> res = odds_ratio([[176, 230], [21035, 21018]]) + >>> res.statistic + 0.7646037659999126 + + For this sample, the odds of getting an ischemic stroke for those who have + been taking aspirin are 0.76 times that of those + who have received the placebo. + + To make statistical inferences about the population under study, + we can compute the 95% confidence interval for the odds ratio: + + >>> res.confidence_interval(confidence_level=0.95) + ConfidenceInterval(low=0.6241234078749812, high=0.9354102892100372) + + The 95% confidence interval for the conditional odds ratio is + approximately (0.62, 0.94). + + The fact that the entire 95% confidence interval falls below 1 supports + the authors' conclusion that the aspirin was associated with a + statistically significant reduction in ischemic stroke. + """ + if kind not in ['conditional', 'sample']: + raise ValueError("`kind` must be 'conditional' or 'sample'.") + + c = np.asarray(table) + + if c.shape != (2, 2): + raise ValueError(f"Invalid shape {c.shape}. The input `table` must be " + "of shape (2, 2).") + + if not np.issubdtype(c.dtype, np.integer): + raise ValueError("`table` must be an array of integers, but got " + f"type {c.dtype}") + c = c.astype(np.int64) + + if np.any(c < 0): + raise ValueError("All values in `table` must be nonnegative.") + + if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): + # If both values in a row or column are zero, the p-value is NaN and + # the odds ratio is NaN. + result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan) + return result + + if kind == 'sample': + oddsratio = _sample_odds_ratio(c) + else: # kind is 'conditional' + oddsratio = _conditional_oddsratio(c) + + result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio) + return result diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_relative_risk.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_relative_risk.py new file mode 100644 index 0000000000000000000000000000000000000000..51525fd28adb37c72b12106450e4178c786091b2 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_relative_risk.py @@ -0,0 +1,263 @@ +import operator +from dataclasses import dataclass +import numpy as np +from scipy.special import ndtri +from ._common import ConfidenceInterval + + +def _validate_int(n, bound, name): + msg = f'{name} must be an integer not less than {bound}, but got {n!r}' + try: + n = operator.index(n) + except TypeError: + raise TypeError(msg) from None + if n < bound: + raise ValueError(msg) + return n + + +@dataclass +class RelativeRiskResult: + """ + Result of `scipy.stats.contingency.relative_risk`. + + Attributes + ---------- + relative_risk : float + This is:: + + (exposed_cases/exposed_total) / (control_cases/control_total) + + exposed_cases : int + The number of "cases" (i.e. occurrence of disease or other event + of interest) among the sample of "exposed" individuals. + exposed_total : int + The total number of "exposed" individuals in the sample. + control_cases : int + The number of "cases" among the sample of "control" or non-exposed + individuals. + control_total : int + The total number of "control" individuals in the sample. + + Methods + ------- + confidence_interval : + Compute the confidence interval for the relative risk estimate. + """ + + relative_risk: float + exposed_cases: int + exposed_total: int + control_cases: int + control_total: int + + def confidence_interval(self, confidence_level=0.95): + """ + Compute the confidence interval for the relative risk. + + The confidence interval is computed using the Katz method + (i.e. "Method C" of [1]_; see also [2]_, section 3.1.2). + + Parameters + ---------- + confidence_level : float, optional + The confidence level to use for the confidence interval. + Default is 0.95. + + Returns + ------- + ci : ConfidenceInterval instance + The return value is an object with attributes ``low`` and + ``high`` that hold the confidence interval. + + References + ---------- + .. [1] D. Katz, J. Baptista, S. P. Azen and M. C. Pike, "Obtaining + confidence intervals for the risk ratio in cohort studies", + Biometrics, 34, 469-474 (1978). + .. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology, + CRC Press LLC, Boca Raton, FL, USA (1996). + + + Examples + -------- + >>> from scipy.stats.contingency import relative_risk + >>> result = relative_risk(exposed_cases=10, exposed_total=75, + ... control_cases=12, control_total=225) + >>> result.relative_risk + 2.5 + >>> result.confidence_interval() + ConfidenceInterval(low=1.1261564003469628, high=5.549850800541033) + """ + if not 0 <= confidence_level <= 1: + raise ValueError('confidence_level must be in the interval ' + '[0, 1].') + + # Handle edge cases where either exposed_cases or control_cases + # is zero. We follow the convention of the R function riskratio + # from the epitools library. + if self.exposed_cases == 0 and self.control_cases == 0: + # relative risk is nan. + return ConfidenceInterval(low=np.nan, high=np.nan) + elif self.exposed_cases == 0: + # relative risk is 0. + return ConfidenceInterval(low=0.0, high=np.nan) + elif self.control_cases == 0: + # relative risk is inf + return ConfidenceInterval(low=np.nan, high=np.inf) + + alpha = 1 - confidence_level + z = ndtri(1 - alpha/2) + rr = self.relative_risk + + # Estimate of the variance of log(rr) is + # var(log(rr)) = 1/exposed_cases - 1/exposed_total + + # 1/control_cases - 1/control_total + # and the standard error is the square root of that. + se = np.sqrt(1/self.exposed_cases - 1/self.exposed_total + + 1/self.control_cases - 1/self.control_total) + delta = z*se + katz_lo = rr*np.exp(-delta) + katz_hi = rr*np.exp(delta) + return ConfidenceInterval(low=katz_lo, high=katz_hi) + + +def relative_risk(exposed_cases, exposed_total, control_cases, control_total): + """ + Compute the relative risk (also known as the risk ratio). + + This function computes the relative risk associated with a 2x2 + contingency table ([1]_, section 2.2.3; [2]_, section 3.1.2). Instead + of accepting a table as an argument, the individual numbers that are + used to compute the relative risk are given as separate parameters. + This is to avoid the ambiguity of which row or column of the contingency + table corresponds to the "exposed" cases and which corresponds to the + "control" cases. Unlike, say, the odds ratio, the relative risk is not + invariant under an interchange of the rows or columns. + + Parameters + ---------- + exposed_cases : nonnegative int + The number of "cases" (i.e. occurrence of disease or other event + of interest) among the sample of "exposed" individuals. + exposed_total : positive int + The total number of "exposed" individuals in the sample. + control_cases : nonnegative int + The number of "cases" among the sample of "control" or non-exposed + individuals. + control_total : positive int + The total number of "control" individuals in the sample. + + Returns + ------- + result : instance of `~scipy.stats._result_classes.RelativeRiskResult` + The object has the float attribute ``relative_risk``, which is:: + + rr = (exposed_cases/exposed_total) / (control_cases/control_total) + + The object also has the method ``confidence_interval`` to compute + the confidence interval of the relative risk for a given confidence + level. + + See Also + -------- + odds_ratio + + Notes + ----- + The R package epitools has the function `riskratio`, which accepts + a table with the following layout:: + + disease=0 disease=1 + exposed=0 (ref) n00 n01 + exposed=1 n10 n11 + + With a 2x2 table in the above format, the estimate of the CI is + computed by `riskratio` when the argument method="wald" is given, + or with the function `riskratio.wald`. + + For example, in a test of the incidence of lung cancer among a + sample of smokers and nonsmokers, the "exposed" category would + correspond to "is a smoker" and the "disease" category would + correspond to "has or had lung cancer". + + To pass the same data to ``relative_risk``, use:: + + relative_risk(n11, n10 + n11, n01, n00 + n01) + + .. versionadded:: 1.7.0 + + References + ---------- + .. [1] Alan Agresti, An Introduction to Categorical Data Analysis + (second edition), Wiley, Hoboken, NJ, USA (2007). + .. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology, + CRC Press LLC, Boca Raton, FL, USA (1996). + + Examples + -------- + >>> from scipy.stats.contingency import relative_risk + + This example is from Example 3.1 of [2]_. The results of a heart + disease study are summarized in the following table:: + + High CAT Low CAT Total + -------- ------- ----- + CHD 27 44 71 + No CHD 95 443 538 + + Total 122 487 609 + + CHD is coronary heart disease, and CAT refers to the level of + circulating catecholamine. CAT is the "exposure" variable, and + high CAT is the "exposed" category. So the data from the table + to be passed to ``relative_risk`` is:: + + exposed_cases = 27 + exposed_total = 122 + control_cases = 44 + control_total = 487 + + >>> result = relative_risk(27, 122, 44, 487) + >>> result.relative_risk + 2.4495156482861398 + + Find the confidence interval for the relative risk. + + >>> result.confidence_interval(confidence_level=0.95) + ConfidenceInterval(low=1.5836990926700116, high=3.7886786315466354) + + The interval does not contain 1, so the data supports the statement + that high CAT is associated with greater risk of CHD. + """ + # Relative risk is a trivial calculation. The nontrivial part is in the + # `confidence_interval` method of the RelativeRiskResult class. + + exposed_cases = _validate_int(exposed_cases, 0, "exposed_cases") + exposed_total = _validate_int(exposed_total, 1, "exposed_total") + control_cases = _validate_int(control_cases, 0, "control_cases") + control_total = _validate_int(control_total, 1, "control_total") + + if exposed_cases > exposed_total: + raise ValueError('exposed_cases must not exceed exposed_total.') + if control_cases > control_total: + raise ValueError('control_cases must not exceed control_total.') + + if exposed_cases == 0 and control_cases == 0: + # relative risk is 0/0. + rr = np.nan + elif exposed_cases == 0: + # relative risk is 0/nonzero + rr = 0.0 + elif control_cases == 0: + # relative risk is nonzero/0. + rr = np.inf + else: + p1 = exposed_cases / exposed_total + p2 = control_cases / control_total + rr = p1 / p2 + return RelativeRiskResult(relative_risk=rr, + exposed_cases=exposed_cases, + exposed_total=exposed_total, + control_cases=control_cases, + control_total=control_total) diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_resampling.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_resampling.py new file mode 100644 index 0000000000000000000000000000000000000000..d7d4d50a7b7fc672b4d867847c28b358eb257e72 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_resampling.py @@ -0,0 +1,2255 @@ +from __future__ import annotations + +import warnings +import numpy as np +from itertools import combinations, permutations, product +from collections.abc import Sequence +from dataclasses import dataclass +import inspect + +from scipy._lib._util import check_random_state, _rename_parameter, rng_integers +from scipy._lib._array_api import (array_namespace, is_numpy, xp_minimum, + xp_clip, xp_moveaxis_to_end) +from scipy.special import ndtr, ndtri, comb, factorial + +from ._common import ConfidenceInterval +from ._axis_nan_policy import _broadcast_concatenate, _broadcast_arrays +from ._warnings_errors import DegenerateDataWarning + +__all__ = ['bootstrap', 'monte_carlo_test', 'permutation_test'] + + +def _vectorize_statistic(statistic): + """Vectorize an n-sample statistic""" + # This is a little cleaner than np.nditer at the expense of some data + # copying: concatenate samples together, then use np.apply_along_axis + def stat_nd(*data, axis=0): + lengths = [sample.shape[axis] for sample in data] + split_indices = np.cumsum(lengths)[:-1] + z = _broadcast_concatenate(data, axis) + + # move working axis to position 0 so that new dimensions in the output + # of `statistic` are _prepended_. ("This axis is removed, and replaced + # with new dimensions...") + z = np.moveaxis(z, axis, 0) + + def stat_1d(z): + data = np.split(z, split_indices) + return statistic(*data) + + return np.apply_along_axis(stat_1d, 0, z)[()] + return stat_nd + + +def _jackknife_resample(sample, batch=None): + """Jackknife resample the sample. Only one-sample stats for now.""" + n = sample.shape[-1] + batch_nominal = batch or n + + for k in range(0, n, batch_nominal): + # col_start:col_end are the observations to remove + batch_actual = min(batch_nominal, n-k) + + # jackknife - each row leaves out one observation + j = np.ones((batch_actual, n), dtype=bool) + np.fill_diagonal(j[:, k:k+batch_actual], False) + i = np.arange(n) + i = np.broadcast_to(i, (batch_actual, n)) + i = i[j].reshape((batch_actual, n-1)) + + resamples = sample[..., i] + yield resamples + + +def _bootstrap_resample(sample, n_resamples=None, random_state=None): + """Bootstrap resample the sample.""" + n = sample.shape[-1] + + # bootstrap - each row is a random resample of original observations + i = rng_integers(random_state, 0, n, (n_resamples, n)) + + resamples = sample[..., i] + return resamples + + +def _percentile_of_score(a, score, axis): + """Vectorized, simplified `scipy.stats.percentileofscore`. + Uses logic of the 'mean' value of percentileofscore's kind parameter. + + Unlike `stats.percentileofscore`, the percentile returned is a fraction + in [0, 1]. + """ + B = a.shape[axis] + return ((a < score).sum(axis=axis) + (a <= score).sum(axis=axis)) / (2 * B) + + +def _percentile_along_axis(theta_hat_b, alpha): + """`np.percentile` with different percentile for each slice.""" + # the difference between _percentile_along_axis and np.percentile is that + # np.percentile gets _all_ the qs for each axis slice, whereas + # _percentile_along_axis gets the q corresponding with each axis slice + shape = theta_hat_b.shape[:-1] + alpha = np.broadcast_to(alpha, shape) + percentiles = np.zeros_like(alpha, dtype=np.float64) + for indices, alpha_i in np.ndenumerate(alpha): + if np.isnan(alpha_i): + # e.g. when bootstrap distribution has only one unique element + msg = ( + "The BCa confidence interval cannot be calculated." + " This problem is known to occur when the distribution" + " is degenerate or the statistic is np.min." + ) + warnings.warn(DegenerateDataWarning(msg), stacklevel=3) + percentiles[indices] = np.nan + else: + theta_hat_b_i = theta_hat_b[indices] + percentiles[indices] = np.percentile(theta_hat_b_i, alpha_i) + return percentiles[()] # return scalar instead of 0d array + + +def _bca_interval(data, statistic, axis, alpha, theta_hat_b, batch): + """Bias-corrected and accelerated interval.""" + # closely follows [1] 14.3 and 15.4 (Eq. 15.36) + + # calculate z0_hat + theta_hat = np.asarray(statistic(*data, axis=axis))[..., None] + percentile = _percentile_of_score(theta_hat_b, theta_hat, axis=-1) + z0_hat = ndtri(percentile) + + # calculate a_hat + theta_hat_ji = [] # j is for sample of data, i is for jackknife resample + for j, sample in enumerate(data): + # _jackknife_resample will add an axis prior to the last axis that + # corresponds with the different jackknife resamples. Do the same for + # each sample of the data to ensure broadcastability. We need to + # create a copy of the list containing the samples anyway, so do this + # in the loop to simplify the code. This is not the bottleneck... + samples = [np.expand_dims(sample, -2) for sample in data] + theta_hat_i = [] + for jackknife_sample in _jackknife_resample(sample, batch): + samples[j] = jackknife_sample + broadcasted = _broadcast_arrays(samples, axis=-1) + theta_hat_i.append(statistic(*broadcasted, axis=-1)) + theta_hat_ji.append(theta_hat_i) + + theta_hat_ji = [np.concatenate(theta_hat_i, axis=-1) + for theta_hat_i in theta_hat_ji] + + n_j = [theta_hat_i.shape[-1] for theta_hat_i in theta_hat_ji] + + theta_hat_j_dot = [theta_hat_i.mean(axis=-1, keepdims=True) + for theta_hat_i in theta_hat_ji] + + U_ji = [(n - 1) * (theta_hat_dot - theta_hat_i) + for theta_hat_dot, theta_hat_i, n + in zip(theta_hat_j_dot, theta_hat_ji, n_j)] + + nums = [(U_i**3).sum(axis=-1)/n**3 for U_i, n in zip(U_ji, n_j)] + dens = [(U_i**2).sum(axis=-1)/n**2 for U_i, n in zip(U_ji, n_j)] + a_hat = 1/6 * sum(nums) / sum(dens)**(3/2) + + # calculate alpha_1, alpha_2 + z_alpha = ndtri(alpha) + z_1alpha = -z_alpha + num1 = z0_hat + z_alpha + alpha_1 = ndtr(z0_hat + num1/(1 - a_hat*num1)) + num2 = z0_hat + z_1alpha + alpha_2 = ndtr(z0_hat + num2/(1 - a_hat*num2)) + return alpha_1, alpha_2, a_hat # return a_hat for testing + + +def _bootstrap_iv(data, statistic, vectorized, paired, axis, confidence_level, + alternative, n_resamples, batch, method, bootstrap_result, + random_state): + """Input validation and standardization for `bootstrap`.""" + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if vectorized is None: + vectorized = 'axis' in inspect.signature(statistic).parameters + + if not vectorized: + statistic = _vectorize_statistic(statistic) + + axis_int = int(axis) + if axis != axis_int: + raise ValueError("`axis` must be an integer.") + + n_samples = 0 + try: + n_samples = len(data) + except TypeError: + raise ValueError("`data` must be a sequence of samples.") + + if n_samples == 0: + raise ValueError("`data` must contain at least one sample.") + + message = ("Ignoring the dimension specified by `axis`, arrays in `data` do not " + "have the same shape. Beginning in SciPy 1.16.0, `bootstrap` will " + "explicitly broadcast elements of `data` to the same shape (ignoring " + "`axis`) before performing the calculation. To avoid this warning in " + "the meantime, ensure that all samples have the same shape (except " + "potentially along `axis`).") + data = [np.atleast_1d(sample) for sample in data] + reduced_shapes = set() + for sample in data: + reduced_shape = list(sample.shape) + reduced_shape.pop(axis) + reduced_shapes.add(tuple(reduced_shape)) + if len(reduced_shapes) != 1: + warnings.warn(message, FutureWarning, stacklevel=3) + + data_iv = [] + for sample in data: + if sample.shape[axis_int] <= 1: + raise ValueError("each sample in `data` must contain two or more " + "observations along `axis`.") + sample = np.moveaxis(sample, axis_int, -1) + data_iv.append(sample) + + if paired not in {True, False}: + raise ValueError("`paired` must be `True` or `False`.") + + if paired: + n = data_iv[0].shape[-1] + for sample in data_iv[1:]: + if sample.shape[-1] != n: + message = ("When `paired is True`, all samples must have the " + "same length along `axis`") + raise ValueError(message) + + # to generate the bootstrap distribution for paired-sample statistics, + # resample the indices of the observations + def statistic(i, axis=-1, data=data_iv, unpaired_statistic=statistic): + data = [sample[..., i] for sample in data] + return unpaired_statistic(*data, axis=axis) + + data_iv = [np.arange(n)] + + confidence_level_float = float(confidence_level) + + alternative = alternative.lower() + alternatives = {'two-sided', 'less', 'greater'} + if alternative not in alternatives: + raise ValueError(f"`alternative` must be one of {alternatives}") + + n_resamples_int = int(n_resamples) + if n_resamples != n_resamples_int or n_resamples_int < 0: + raise ValueError("`n_resamples` must be a non-negative integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + methods = {'percentile', 'basic', 'bca'} + method = method.lower() + if method not in methods: + raise ValueError(f"`method` must be in {methods}") + + message = "`bootstrap_result` must have attribute `bootstrap_distribution'" + if (bootstrap_result is not None + and not hasattr(bootstrap_result, "bootstrap_distribution")): + raise ValueError(message) + + message = ("Either `bootstrap_result.bootstrap_distribution.size` or " + "`n_resamples` must be positive.") + if ((not bootstrap_result or + not bootstrap_result.bootstrap_distribution.size) + and n_resamples_int == 0): + raise ValueError(message) + + random_state = check_random_state(random_state) + + return (data_iv, statistic, vectorized, paired, axis_int, + confidence_level_float, alternative, n_resamples_int, batch_iv, + method, bootstrap_result, random_state) + + +@dataclass +class BootstrapResult: + """Result object returned by `scipy.stats.bootstrap`. + + Attributes + ---------- + confidence_interval : ConfidenceInterval + The bootstrap confidence interval as an instance of + `collections.namedtuple` with attributes `low` and `high`. + bootstrap_distribution : ndarray + The bootstrap distribution, that is, the value of `statistic` for + each resample. The last dimension corresponds with the resamples + (e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``). + standard_error : float or ndarray + The bootstrap standard error, that is, the sample standard + deviation of the bootstrap distribution. + + """ + confidence_interval: ConfidenceInterval + bootstrap_distribution: np.ndarray + standard_error: float | np.ndarray + + +def bootstrap(data, statistic, *, n_resamples=9999, batch=None, + vectorized=None, paired=False, axis=0, confidence_level=0.95, + alternative='two-sided', method='BCa', bootstrap_result=None, + random_state=None): + r""" + Compute a two-sided bootstrap confidence interval of a statistic. + + When `method` is ``'percentile'`` and `alternative` is ``'two-sided'``, + a bootstrap confidence interval is computed according to the following + procedure. + + 1. Resample the data: for each sample in `data` and for each of + `n_resamples`, take a random sample of the original sample + (with replacement) of the same size as the original sample. + + 2. Compute the bootstrap distribution of the statistic: for each set of + resamples, compute the test statistic. + + 3. Determine the confidence interval: find the interval of the bootstrap + distribution that is + + - symmetric about the median and + - contains `confidence_level` of the resampled statistic values. + + While the ``'percentile'`` method is the most intuitive, it is rarely + used in practice. Two more common methods are available, ``'basic'`` + ('reverse percentile') and ``'BCa'`` ('bias-corrected and accelerated'); + they differ in how step 3 is performed. + + If the samples in `data` are taken at random from their respective + distributions :math:`n` times, the confidence interval returned by + `bootstrap` will contain the true value of the statistic for those + distributions approximately `confidence_level`:math:`\, \times \, n` times. + + Parameters + ---------- + data : sequence of array-like + Each element of `data` is a sample containing scalar observations from an + underlying distribution. Elements of `data` must be broadcastable to the + same shape (with the possible exception of the dimension specified by `axis`). + + .. versionchanged:: 1.14.0 + `bootstrap` will now emit a ``FutureWarning`` if the shapes of the + elements of `data` are not the same (with the exception of the dimension + specified by `axis`). + Beginning in SciPy 1.16.0, `bootstrap` will explicitly broadcast the + elements to the same shape (except along `axis`) before performing + the calculation. + + statistic : callable + Statistic for which the confidence interval is to be calculated. + `statistic` must be a callable that accepts ``len(data)`` samples + as separate arguments and returns the resulting statistic. + If `vectorized` is set ``True``, + `statistic` must also accept a keyword argument `axis` and be + vectorized to compute the statistic along the provided `axis`. + n_resamples : int, default: ``9999`` + The number of resamples performed to form the bootstrap distribution + of the statistic. + batch : int, optional + The number of resamples to process in each vectorized call to + `statistic`. Memory usage is O( `batch` * ``n`` ), where ``n`` is the + sample size. Default is ``None``, in which case ``batch = n_resamples`` + (or ``batch = max(n_resamples, n)`` for ``method='BCa'``). + vectorized : bool, optional + If `vectorized` is set ``False``, `statistic` will not be passed + keyword argument `axis` and is expected to calculate the statistic + only for 1D samples. If ``True``, `statistic` will be passed keyword + argument `axis` and is expected to calculate the statistic along `axis` + when passed an ND sample array. If ``None`` (default), `vectorized` + will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of + a vectorized statistic typically reduces computation time. + paired : bool, default: ``False`` + Whether the statistic treats corresponding elements of the samples + in `data` as paired. + axis : int, default: ``0`` + The axis of the samples in `data` along which the `statistic` is + calculated. + confidence_level : float, default: ``0.95`` + The confidence level of the confidence interval. + alternative : {'two-sided', 'less', 'greater'}, default: ``'two-sided'`` + Choose ``'two-sided'`` (default) for a two-sided confidence interval, + ``'less'`` for a one-sided confidence interval with the lower bound + at ``-np.inf``, and ``'greater'`` for a one-sided confidence interval + with the upper bound at ``np.inf``. The other bound of the one-sided + confidence intervals is the same as that of a two-sided confidence + interval with `confidence_level` twice as far from 1.0; e.g. the upper + bound of a 95% ``'less'`` confidence interval is the same as the upper + bound of a 90% ``'two-sided'`` confidence interval. + method : {'percentile', 'basic', 'bca'}, default: ``'BCa'`` + Whether to return the 'percentile' bootstrap confidence interval + (``'percentile'``), the 'basic' (AKA 'reverse') bootstrap confidence + interval (``'basic'``), or the bias-corrected and accelerated bootstrap + confidence interval (``'BCa'``). + bootstrap_result : BootstrapResult, optional + Provide the result object returned by a previous call to `bootstrap` + to include the previous bootstrap distribution in the new bootstrap + distribution. This can be used, for example, to change + `confidence_level`, change `method`, or see the effect of performing + additional resampling without repeating computations. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is ``None`` (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + res : BootstrapResult + An object with attributes: + + confidence_interval : ConfidenceInterval + The bootstrap confidence interval as an instance of + `collections.namedtuple` with attributes `low` and `high`. + bootstrap_distribution : ndarray + The bootstrap distribution, that is, the value of `statistic` for + each resample. The last dimension corresponds with the resamples + (e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``). + standard_error : float or ndarray + The bootstrap standard error, that is, the sample standard + deviation of the bootstrap distribution. + + Warns + ----- + `~scipy.stats.DegenerateDataWarning` + Generated when ``method='BCa'`` and the bootstrap distribution is + degenerate (e.g. all elements are identical). + + Notes + ----- + Elements of the confidence interval may be NaN for ``method='BCa'`` if + the bootstrap distribution is degenerate (e.g. all elements are identical). + In this case, consider using another `method` or inspecting `data` for + indications that other analysis may be more appropriate (e.g. all + observations are identical). + + References + ---------- + .. [1] B. Efron and R. J. Tibshirani, An Introduction to the Bootstrap, + Chapman & Hall/CRC, Boca Raton, FL, USA (1993) + .. [2] Nathaniel E. Helwig, "Bootstrap Confidence Intervals", + http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf + .. [3] Bootstrapping (statistics), Wikipedia, + https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 + + Examples + -------- + Suppose we have sampled data from an unknown distribution. + + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> from scipy.stats import norm + >>> dist = norm(loc=2, scale=4) # our "unknown" distribution + >>> data = dist.rvs(size=100, random_state=rng) + + We are interested in the standard deviation of the distribution. + + >>> std_true = dist.std() # the true value of the statistic + >>> print(std_true) + 4.0 + >>> std_sample = np.std(data) # the sample statistic + >>> print(std_sample) + 3.9460644295563863 + + The bootstrap is used to approximate the variability we would expect if we + were to repeatedly sample from the unknown distribution and calculate the + statistic of the sample each time. It does this by repeatedly resampling + values *from the original sample* with replacement and calculating the + statistic of each resample. This results in a "bootstrap distribution" of + the statistic. + + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import bootstrap + >>> data = (data,) # samples must be in a sequence + >>> res = bootstrap(data, np.std, confidence_level=0.9, + ... random_state=rng) + >>> fig, ax = plt.subplots() + >>> ax.hist(res.bootstrap_distribution, bins=25) + >>> ax.set_title('Bootstrap Distribution') + >>> ax.set_xlabel('statistic value') + >>> ax.set_ylabel('frequency') + >>> plt.show() + + The standard error quantifies this variability. It is calculated as the + standard deviation of the bootstrap distribution. + + >>> res.standard_error + 0.24427002125829136 + >>> res.standard_error == np.std(res.bootstrap_distribution, ddof=1) + True + + The bootstrap distribution of the statistic is often approximately normal + with scale equal to the standard error. + + >>> x = np.linspace(3, 5) + >>> pdf = norm.pdf(x, loc=std_sample, scale=res.standard_error) + >>> fig, ax = plt.subplots() + >>> ax.hist(res.bootstrap_distribution, bins=25, density=True) + >>> ax.plot(x, pdf) + >>> ax.set_title('Normal Approximation of the Bootstrap Distribution') + >>> ax.set_xlabel('statistic value') + >>> ax.set_ylabel('pdf') + >>> plt.show() + + This suggests that we could construct a 90% confidence interval on the + statistic based on quantiles of this normal distribution. + + >>> norm.interval(0.9, loc=std_sample, scale=res.standard_error) + (3.5442759991341726, 4.3478528599786) + + Due to central limit theorem, this normal approximation is accurate for a + variety of statistics and distributions underlying the samples; however, + the approximation is not reliable in all cases. Because `bootstrap` is + designed to work with arbitrary underlying distributions and statistics, + it uses more advanced techniques to generate an accurate confidence + interval. + + >>> print(res.confidence_interval) + ConfidenceInterval(low=3.57655333533867, high=4.382043696342881) + + If we sample from the original distribution 100 times and form a bootstrap + confidence interval for each sample, the confidence interval + contains the true value of the statistic approximately 90% of the time. + + >>> n_trials = 100 + >>> ci_contains_true_std = 0 + >>> for i in range(n_trials): + ... data = (dist.rvs(size=100, random_state=rng),) + ... res = bootstrap(data, np.std, confidence_level=0.9, + ... n_resamples=999, random_state=rng) + ... ci = res.confidence_interval + ... if ci[0] < std_true < ci[1]: + ... ci_contains_true_std += 1 + >>> print(ci_contains_true_std) + 88 + + Rather than writing a loop, we can also determine the confidence intervals + for all 100 samples at once. + + >>> data = (dist.rvs(size=(n_trials, 100), random_state=rng),) + >>> res = bootstrap(data, np.std, axis=-1, confidence_level=0.9, + ... n_resamples=999, random_state=rng) + >>> ci_l, ci_u = res.confidence_interval + + Here, `ci_l` and `ci_u` contain the confidence interval for each of the + ``n_trials = 100`` samples. + + >>> print(ci_l[:5]) + [3.86401283 3.33304394 3.52474647 3.54160981 3.80569252] + >>> print(ci_u[:5]) + [4.80217409 4.18143252 4.39734707 4.37549713 4.72843584] + + And again, approximately 90% contain the true value, ``std_true = 4``. + + >>> print(np.sum((ci_l < std_true) & (std_true < ci_u))) + 93 + + `bootstrap` can also be used to estimate confidence intervals of + multi-sample statistics. For example, to get a confidence interval + for the difference between means, we write a function that accepts + two sample arguments and returns only the statistic. The use of the + ``axis`` argument ensures that all mean calculations are perform in + a single vectorized call, which is faster than looping over pairs + of resamples in Python. + + >>> def my_statistic(sample1, sample2, axis=-1): + ... mean1 = np.mean(sample1, axis=axis) + ... mean2 = np.mean(sample2, axis=axis) + ... return mean1 - mean2 + + Here, we use the 'percentile' method with the default 95% confidence level. + + >>> sample1 = norm.rvs(scale=1, size=100, random_state=rng) + >>> sample2 = norm.rvs(scale=2, size=100, random_state=rng) + >>> data = (sample1, sample2) + >>> res = bootstrap(data, my_statistic, method='basic', random_state=rng) + >>> print(my_statistic(sample1, sample2)) + 0.16661030792089523 + >>> print(res.confidence_interval) + ConfidenceInterval(low=-0.29087973240818693, high=0.6371338699912273) + + The bootstrap estimate of the standard error is also available. + + >>> print(res.standard_error) + 0.238323948262459 + + Paired-sample statistics work, too. For example, consider the Pearson + correlation coefficient. + + >>> from scipy.stats import pearsonr + >>> n = 100 + >>> x = np.linspace(0, 10, n) + >>> y = x + rng.uniform(size=n) + >>> print(pearsonr(x, y)[0]) # element 0 is the statistic + 0.9954306665125647 + + We wrap `pearsonr` so that it returns only the statistic, ensuring + that we use the `axis` argument because it is available. + + >>> def my_statistic(x, y, axis=-1): + ... return pearsonr(x, y, axis=axis)[0] + + We call `bootstrap` using ``paired=True``. + + >>> res = bootstrap((x, y), my_statistic, paired=True, random_state=rng) + >>> print(res.confidence_interval) + ConfidenceInterval(low=0.9941504301315878, high=0.996377412215445) + + The result object can be passed back into `bootstrap` to perform additional + resampling: + + >>> len(res.bootstrap_distribution) + 9999 + >>> res = bootstrap((x, y), my_statistic, paired=True, + ... n_resamples=1000, random_state=rng, + ... bootstrap_result=res) + >>> len(res.bootstrap_distribution) + 10999 + + or to change the confidence interval options: + + >>> res2 = bootstrap((x, y), my_statistic, paired=True, + ... n_resamples=0, random_state=rng, bootstrap_result=res, + ... method='percentile', confidence_level=0.9) + >>> np.testing.assert_equal(res2.bootstrap_distribution, + ... res.bootstrap_distribution) + >>> res.confidence_interval + ConfidenceInterval(low=0.9941574828235082, high=0.9963781698210212) + + without repeating computation of the original bootstrap distribution. + + """ + # Input validation + args = _bootstrap_iv(data, statistic, vectorized, paired, axis, + confidence_level, alternative, n_resamples, batch, + method, bootstrap_result, random_state) + (data, statistic, vectorized, paired, axis, confidence_level, + alternative, n_resamples, batch, method, bootstrap_result, + random_state) = args + + theta_hat_b = ([] if bootstrap_result is None + else [bootstrap_result.bootstrap_distribution]) + + batch_nominal = batch or n_resamples or 1 + + for k in range(0, n_resamples, batch_nominal): + batch_actual = min(batch_nominal, n_resamples-k) + # Generate resamples + resampled_data = [] + for sample in data: + resample = _bootstrap_resample(sample, n_resamples=batch_actual, + random_state=random_state) + resampled_data.append(resample) + + # Compute bootstrap distribution of statistic + theta_hat_b.append(statistic(*resampled_data, axis=-1)) + theta_hat_b = np.concatenate(theta_hat_b, axis=-1) + + # Calculate percentile interval + alpha = ((1 - confidence_level)/2 if alternative == 'two-sided' + else (1 - confidence_level)) + if method == 'bca': + interval = _bca_interval(data, statistic, axis=-1, alpha=alpha, + theta_hat_b=theta_hat_b, batch=batch)[:2] + percentile_fun = _percentile_along_axis + else: + interval = alpha, 1-alpha + + def percentile_fun(a, q): + return np.percentile(a=a, q=q, axis=-1) + + # Calculate confidence interval of statistic + ci_l = percentile_fun(theta_hat_b, interval[0]*100) + ci_u = percentile_fun(theta_hat_b, interval[1]*100) + if method == 'basic': # see [3] + theta_hat = statistic(*data, axis=-1) + ci_l, ci_u = 2*theta_hat - ci_u, 2*theta_hat - ci_l + + if alternative == 'less': + ci_l = np.full_like(ci_l, -np.inf) + elif alternative == 'greater': + ci_u = np.full_like(ci_u, np.inf) + + return BootstrapResult(confidence_interval=ConfidenceInterval(ci_l, ci_u), + bootstrap_distribution=theta_hat_b, + standard_error=np.std(theta_hat_b, ddof=1, axis=-1)) + + +def _monte_carlo_test_iv(data, rvs, statistic, vectorized, n_resamples, + batch, alternative, axis): + """Input validation for `monte_carlo_test`.""" + axis_int = int(axis) + if axis != axis_int: + raise ValueError("`axis` must be an integer.") + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if not isinstance(rvs, Sequence): + rvs = (rvs,) + data = (data,) + for rvs_i in rvs: + if not callable(rvs_i): + raise TypeError("`rvs` must be callable or sequence of callables.") + + # At this point, `data` should be a sequence + # If it isn't, the user passed a sequence for `rvs` but not `data` + message = "If `rvs` is a sequence, `len(rvs)` must equal `len(data)`." + try: + len(data) + except TypeError as e: + raise ValueError(message) from e + if not len(rvs) == len(data): + raise ValueError(message) + + if not callable(statistic): + raise TypeError("`statistic` must be callable.") + + if vectorized is None: + try: + signature = inspect.signature(statistic).parameters + except ValueError as e: + message = (f"Signature inspection of {statistic=} failed; " + "pass `vectorize` explicitly.") + raise ValueError(message) from e + vectorized = 'axis' in signature + + xp = array_namespace(*data) + + if not vectorized: + if is_numpy(xp): + statistic_vectorized = _vectorize_statistic(statistic) + else: + message = ("`statistic` must be vectorized (i.e. support an `axis` " + f"argument) when `data` contains {xp.__name__} arrays.") + raise ValueError(message) + else: + statistic_vectorized = statistic + + data = _broadcast_arrays(data, axis, xp=xp) + data_iv = [] + for sample in data: + sample = xp.broadcast_to(sample, (1,)) if sample.ndim == 0 else sample + sample = xp_moveaxis_to_end(sample, axis_int, xp=xp) + data_iv.append(sample) + + n_resamples_int = int(n_resamples) + if n_resamples != n_resamples_int or n_resamples_int <= 0: + raise ValueError("`n_resamples` must be a positive integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + alternatives = {'two-sided', 'greater', 'less'} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f"`alternative` must be in {alternatives}") + + # Infer the desired p-value dtype based on the input types + min_float = getattr(xp, 'float16', xp.float32) + dtype = xp.result_type(*data_iv, min_float) + + return (data_iv, rvs, statistic_vectorized, vectorized, n_resamples_int, + batch_iv, alternative, axis_int, dtype, xp) + + +@dataclass +class MonteCarloTestResult: + """Result object returned by `scipy.stats.monte_carlo_test`. + + Attributes + ---------- + statistic : float or ndarray + The observed test statistic of the sample. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + """ + statistic: float | np.ndarray + pvalue: float | np.ndarray + null_distribution: np.ndarray + + +@_rename_parameter('sample', 'data') +def monte_carlo_test(data, rvs, statistic, *, vectorized=None, + n_resamples=9999, batch=None, alternative="two-sided", + axis=0): + r"""Perform a Monte Carlo hypothesis test. + + `data` contains a sample or a sequence of one or more samples. `rvs` + specifies the distribution(s) of the sample(s) in `data` under the null + hypothesis. The value of `statistic` for the given `data` is compared + against a Monte Carlo null distribution: the value of the statistic for + each of `n_resamples` sets of samples generated using `rvs`. This gives + the p-value, the probability of observing such an extreme value of the + test statistic under the null hypothesis. + + Parameters + ---------- + data : array-like or sequence of array-like + An array or sequence of arrays of observations. + rvs : callable or tuple of callables + A callable or sequence of callables that generates random variates + under the null hypothesis. Each element of `rvs` must be a callable + that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and + returns an N-d array sample of that shape. If `rvs` is a sequence, the + number of callables in `rvs` must match the number of samples in + `data`, i.e. ``len(rvs) == len(data)``. If `rvs` is a single callable, + `data` is treated as a single sample. + statistic : callable + Statistic for which the p-value of the hypothesis test is to be + calculated. `statistic` must be a callable that accepts a sample + (e.g. ``statistic(sample)``) or ``len(rvs)`` separate samples (e.g. + ``statistic(samples1, sample2)`` if `rvs` contains two callables and + `data` contains two samples) and returns the resulting statistic. + If `vectorized` is set ``True``, `statistic` must also accept a keyword + argument `axis` and be vectorized to compute the statistic along the + provided `axis` of the samples in `data`. + vectorized : bool, optional + If `vectorized` is set ``False``, `statistic` will not be passed + keyword argument `axis` and is expected to calculate the statistic + only for 1D samples. If ``True``, `statistic` will be passed keyword + argument `axis` and is expected to calculate the statistic along `axis` + when passed ND sample arrays. If ``None`` (default), `vectorized` + will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of + a vectorized statistic typically reduces computation time. + n_resamples : int, default: 9999 + Number of samples drawn from each of the callables of `rvs`. + Equivalently, the number statistic values under the null hypothesis + used as the Monte Carlo null distribution. + batch : int, optional + The number of Monte Carlo samples to process in each call to + `statistic`. Memory usage is O( `batch` * ``sample.size[axis]`` ). Default + is ``None``, in which case `batch` equals `n_resamples`. + alternative : {'two-sided', 'less', 'greater'} + The alternative hypothesis for which the p-value is calculated. + For each alternative, the p-value is defined as follows. + + - ``'greater'`` : the percentage of the null distribution that is + greater than or equal to the observed value of the test statistic. + - ``'less'`` : the percentage of the null distribution that is + less than or equal to the observed value of the test statistic. + - ``'two-sided'`` : twice the smaller of the p-values above. + + axis : int, default: 0 + The axis of `data` (or each sample within `data`) over which to + calculate the statistic. + + Returns + ------- + res : MonteCarloTestResult + An object with attributes: + + statistic : float or ndarray + The test statistic of the observed `data`. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + + .. warning:: + The p-value is calculated by counting the elements of the null + distribution that are as extreme or more extreme than the observed + value of the statistic. Due to the use of finite precision arithmetic, + some statistic functions return numerically distinct values when the + theoretical values would be exactly equal. In some cases, this could + lead to a large error in the calculated p-value. `monte_carlo_test` + guards against this by considering elements in the null distribution + that are "close" (within a relative tolerance of 100 times the + floating point epsilon of inexact dtypes) to the observed + value of the test statistic as equal to the observed value of the + test statistic. However, the user is advised to inspect the null + distribution to assess whether this method of comparison is + appropriate, and if not, calculate the p-value manually. + + References + ---------- + + .. [1] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." + Statistical Applications in Genetics and Molecular Biology 9.1 (2010). + + Examples + -------- + + Suppose we wish to test whether a small sample has been drawn from a normal + distribution. We decide that we will use the skew of the sample as a + test statistic, and we will consider a p-value of 0.05 to be statistically + significant. + + >>> import numpy as np + >>> from scipy import stats + >>> def statistic(x, axis): + ... return stats.skew(x, axis) + + After collecting our data, we calculate the observed value of the test + statistic. + + >>> rng = np.random.default_rng() + >>> x = stats.skewnorm.rvs(a=1, size=50, random_state=rng) + >>> statistic(x, axis=0) + 0.12457412450240658 + + To determine the probability of observing such an extreme value of the + skewness by chance if the sample were drawn from the normal distribution, + we can perform a Monte Carlo hypothesis test. The test will draw many + samples at random from their normal distribution, calculate the skewness + of each sample, and compare our original skewness against this + distribution to determine an approximate p-value. + + >>> from scipy.stats import monte_carlo_test + >>> # because our statistic is vectorized, we pass `vectorized=True` + >>> rvs = lambda size: stats.norm.rvs(size=size, random_state=rng) + >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True) + >>> print(res.statistic) + 0.12457412450240658 + >>> print(res.pvalue) + 0.7012 + + The probability of obtaining a test statistic less than or equal to the + observed value under the null hypothesis is ~70%. This is greater than + our chosen threshold of 5%, so we cannot consider this to be significant + evidence against the null hypothesis. + + Note that this p-value essentially matches that of + `scipy.stats.skewtest`, which relies on an asymptotic distribution of a + test statistic based on the sample skewness. + + >>> stats.skewtest(x).pvalue + 0.6892046027110614 + + This asymptotic approximation is not valid for small sample sizes, but + `monte_carlo_test` can be used with samples of any size. + + >>> x = stats.skewnorm.rvs(a=1, size=7, random_state=rng) + >>> # stats.skewtest(x) would produce an error due to small sample + >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True) + + The Monte Carlo distribution of the test statistic is provided for + further investigation. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.hist(res.null_distribution, bins=50) + >>> ax.set_title("Monte Carlo distribution of test statistic") + >>> ax.set_xlabel("Value of Statistic") + >>> ax.set_ylabel("Frequency") + >>> plt.show() + + """ + args = _monte_carlo_test_iv(data, rvs, statistic, vectorized, + n_resamples, batch, alternative, axis) + (data, rvs, statistic, vectorized, n_resamples, + batch, alternative, axis, dtype, xp) = args + + # Some statistics return plain floats; ensure they're at least a NumPy float + observed = xp.asarray(statistic(*data, axis=-1)) + observed = observed[()] if observed.ndim == 0 else observed + + n_observations = [sample.shape[-1] for sample in data] + batch_nominal = batch or n_resamples + null_distribution = [] + for k in range(0, n_resamples, batch_nominal): + batch_actual = min(batch_nominal, n_resamples - k) + resamples = [rvs_i(size=(batch_actual, n_observations_i)) + for rvs_i, n_observations_i in zip(rvs, n_observations)] + null_distribution.append(statistic(*resamples, axis=-1)) + null_distribution = xp.concat(null_distribution) + null_distribution = xp.reshape(null_distribution, [-1] + [1]*observed.ndim) + + # relative tolerance for detecting numerically distinct but + # theoretically equal values in the null distribution + eps = (0 if not xp.isdtype(observed.dtype, ('real floating')) + else xp.finfo(observed.dtype).eps*100) + gamma = xp.abs(eps * observed) + + def less(null_distribution, observed): + cmps = null_distribution <= observed + gamma + cmps = xp.asarray(cmps, dtype=dtype) + pvalues = (xp.sum(cmps, axis=0, dtype=dtype) + 1.) / (n_resamples + 1.) + return pvalues + + def greater(null_distribution, observed): + cmps = null_distribution >= observed - gamma + cmps = xp.asarray(cmps, dtype=dtype) + pvalues = (xp.sum(cmps, axis=0, dtype=dtype) + 1.) / (n_resamples + 1.) + return pvalues + + def two_sided(null_distribution, observed): + pvalues_less = less(null_distribution, observed) + pvalues_greater = greater(null_distribution, observed) + pvalues = xp_minimum(pvalues_less, pvalues_greater) * 2 + return pvalues + + compare = {"less": less, + "greater": greater, + "two-sided": two_sided} + + pvalues = compare[alternative](null_distribution, observed) + pvalues = xp_clip(pvalues, 0., 1., xp=xp) + + return MonteCarloTestResult(observed, pvalues, null_distribution) + + +@dataclass +class PowerResult: + """Result object returned by `scipy.stats.power`. + + Attributes + ---------- + power : float or ndarray + The estimated power. + pvalues : float or ndarray + The simulated p-values. + """ + power: float | np.ndarray + pvalues: float | np.ndarray + + +def _wrap_kwargs(fun): + """Wrap callable to accept arbitrary kwargs and ignore unused ones""" + + try: + keys = set(inspect.signature(fun).parameters.keys()) + except ValueError: + # NumPy Generator methods can't be inspected + keys = {'size'} + + # Set keys=keys/fun=fun to avoid late binding gotcha + def wrapped_rvs_i(*args, keys=keys, fun=fun, **all_kwargs): + kwargs = {key: val for key, val in all_kwargs.items() + if key in keys} + return fun(*args, **kwargs) + return wrapped_rvs_i + + +def _power_iv(rvs, test, n_observations, significance, vectorized, + n_resamples, batch, kwargs): + """Input validation for `monte_carlo_test`.""" + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if not isinstance(rvs, Sequence): + rvs = (rvs,) + n_observations = (n_observations,) + for rvs_i in rvs: + if not callable(rvs_i): + raise TypeError("`rvs` must be callable or sequence of callables.") + + if not len(rvs) == len(n_observations): + message = ("If `rvs` is a sequence, `len(rvs)` " + "must equal `len(n_observations)`.") + raise ValueError(message) + + significance = np.asarray(significance)[()] + if (not np.issubdtype(significance.dtype, np.floating) + or np.min(significance) < 0 or np.max(significance) > 1): + raise ValueError("`significance` must contain floats between 0 and 1.") + + kwargs = dict() if kwargs is None else kwargs + if not isinstance(kwargs, dict): + raise TypeError("`kwargs` must be a dictionary that maps keywords to arrays.") + + vals = kwargs.values() + keys = kwargs.keys() + + # Wrap callables to ignore unused keyword arguments + wrapped_rvs = [_wrap_kwargs(rvs_i) for rvs_i in rvs] + + # Broadcast, then ravel nobs/kwarg combinations. In the end, + # `nobs` and `vals` have shape (# of combinations, number of variables) + tmp = np.asarray(np.broadcast_arrays(*n_observations, *vals)) + shape = tmp.shape + if tmp.ndim == 1: + tmp = tmp[np.newaxis, :] + else: + tmp = tmp.reshape((shape[0], -1)).T + nobs, vals = tmp[:, :len(rvs)], tmp[:, len(rvs):] + nobs = nobs.astype(int) + + if not callable(test): + raise TypeError("`test` must be callable.") + + if vectorized is None: + vectorized = 'axis' in inspect.signature(test).parameters + + if not vectorized: + test_vectorized = _vectorize_statistic(test) + else: + test_vectorized = test + # Wrap `test` function to ignore unused kwargs + test_vectorized = _wrap_kwargs(test_vectorized) + + n_resamples_int = int(n_resamples) + if n_resamples != n_resamples_int or n_resamples_int <= 0: + raise ValueError("`n_resamples` must be a positive integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + return (wrapped_rvs, test_vectorized, nobs, significance, vectorized, + n_resamples_int, batch_iv, vals, keys, shape[1:]) + + +def power(test, rvs, n_observations, *, significance=0.01, vectorized=None, + n_resamples=10000, batch=None, kwargs=None): + r"""Simulate the power of a hypothesis test under an alternative hypothesis. + + Parameters + ---------- + test : callable + Hypothesis test for which the power is to be simulated. + `test` must be a callable that accepts a sample (e.g. ``test(sample)``) + or ``len(rvs)`` separate samples (e.g. ``test(samples1, sample2)`` if + `rvs` contains two callables and `n_observations` contains two values) + and returns the p-value of the test. + If `vectorized` is set to ``True``, `test` must also accept a keyword + argument `axis` and be vectorized to perform the test along the + provided `axis` of the samples. + Any callable from `scipy.stats` with an `axis` argument that returns an + object with a `pvalue` attribute is also acceptable. + rvs : callable or tuple of callables + A callable or sequence of callables that generate(s) random variates + under the alternative hypothesis. Each element of `rvs` must accept + keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and return an + N-d array of that shape. If `rvs` is a sequence, the number of callables + in `rvs` must match the number of elements of `n_observations`, i.e. + ``len(rvs) == len(n_observations)``. If `rvs` is a single callable, + `n_observations` is treated as a single element. + n_observations : tuple of ints or tuple of integer arrays + If a sequence of ints, each is the sizes of a sample to be passed to `test`. + If a sequence of integer arrays, the power is simulated for each + set of corresponding sample sizes. See Examples. + significance : float or array_like of floats, default: 0.01 + The threshold for significance; i.e., the p-value below which the + hypothesis test results will be considered as evidence against the null + hypothesis. Equivalently, the acceptable rate of Type I error under + the null hypothesis. If an array, the power is simulated for each + significance threshold. + kwargs : dict, optional + Keyword arguments to be passed to `rvs` and/or `test` callables. + Introspection is used to determine which keyword arguments may be + passed to each callable. + The value corresponding with each keyword must be an array. + Arrays must be broadcastable with one another and with each array in + `n_observations`. The power is simulated for each set of corresponding + sample sizes and arguments. See Examples. + vectorized : bool, optional + If `vectorized` is set to ``False``, `test` will not be passed keyword + argument `axis` and is expected to perform the test only for 1D samples. + If ``True``, `test` will be passed keyword argument `axis` and is + expected to perform the test along `axis` when passed N-D sample arrays. + If ``None`` (default), `vectorized` will be set ``True`` if ``axis`` is + a parameter of `test`. Use of a vectorized test typically reduces + computation time. + n_resamples : int, default: 10000 + Number of samples drawn from each of the callables of `rvs`. + Equivalently, the number tests performed under the alternative + hypothesis to approximate the power. + batch : int, optional + The number of samples to process in each call to `test`. Memory usage is + proportional to the product of `batch` and the largest sample size. Default + is ``None``, in which case `batch` equals `n_resamples`. + + Returns + ------- + res : PowerResult + An object with attributes: + + power : float or ndarray + The estimated power against the alternative. + pvalues : ndarray + The p-values observed under the alternative hypothesis. + + Notes + ----- + The power is simulated as follows: + + - Draw many random samples (or sets of samples), each of the size(s) + specified by `n_observations`, under the alternative specified by + `rvs`. + - For each sample (or set of samples), compute the p-value according to + `test`. These p-values are recorded in the ``pvalues`` attribute of + the result object. + - Compute the proportion of p-values that are less than the `significance` + level. This is the power recorded in the ``power`` attribute of the + result object. + + Suppose that `significance` is an array with shape ``shape1``, the elements + of `kwargs` and `n_observations` are mutually broadcastable to shape ``shape2``, + and `test` returns an array of p-values of shape ``shape3``. Then the result + object ``power`` attribute will be of shape ``shape1 + shape2 + shape3``, and + the ``pvalues`` attribute will be of shape ``shape2 + shape3 + (n_resamples,)``. + + Examples + -------- + Suppose we wish to simulate the power of the independent sample t-test + under the following conditions: + + - The first sample has 10 observations drawn from a normal distribution + with mean 0. + - The second sample has 12 observations drawn from a normal distribution + with mean 1.0. + - The threshold on p-values for significance is 0.05. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng(2549598345528) + >>> + >>> test = stats.ttest_ind + >>> n_observations = (10, 12) + >>> rvs1 = rng.normal + >>> rvs2 = lambda size: rng.normal(loc=1, size=size) + >>> rvs = (rvs1, rvs2) + >>> res = stats.power(test, rvs, n_observations, significance=0.05) + >>> res.power + 0.6116 + + With samples of size 10 and 12, respectively, the power of the t-test + with a significance threshold of 0.05 is approximately 60% under the chosen + alternative. We can investigate the effect of sample size on the power + by passing sample size arrays. + + >>> import matplotlib.pyplot as plt + >>> nobs_x = np.arange(5, 21) + >>> nobs_y = nobs_x + >>> n_observations = (nobs_x, nobs_y) + >>> res = stats.power(test, rvs, n_observations, significance=0.05) + >>> ax = plt.subplot() + >>> ax.plot(nobs_x, res.power) + >>> ax.set_xlabel('Sample Size') + >>> ax.set_ylabel('Simulated Power') + >>> ax.set_title('Simulated Power of `ttest_ind` with Equal Sample Sizes') + >>> plt.show() + + Alternatively, we can investigate the impact that effect size has on the power. + In this case, the effect size is the location of the distribution underlying + the second sample. + + >>> n_observations = (10, 12) + >>> loc = np.linspace(0, 1, 20) + >>> rvs2 = lambda size, loc: rng.normal(loc=loc, size=size) + >>> rvs = (rvs1, rvs2) + >>> res = stats.power(test, rvs, n_observations, significance=0.05, + ... kwargs={'loc': loc}) + >>> ax = plt.subplot() + >>> ax.plot(loc, res.power) + >>> ax.set_xlabel('Effect Size') + >>> ax.set_ylabel('Simulated Power') + >>> ax.set_title('Simulated Power of `ttest_ind`, Varying Effect Size') + >>> plt.show() + + We can also use `power` to estimate the Type I error rate (also referred to by the + ambiguous term "size") of a test and assess whether it matches the nominal level. + For example, the null hypothesis of `jarque_bera` is that the sample was drawn from + a distribution with the same skewness and kurtosis as the normal distribution. To + estimate the Type I error rate, we can consider the null hypothesis to be a true + *alternative* hypothesis and calculate the power. + + >>> test = stats.jarque_bera + >>> n_observations = 10 + >>> rvs = rng.normal + >>> significance = np.linspace(0.0001, 0.1, 1000) + >>> res = stats.power(test, rvs, n_observations, significance=significance) + >>> size = res.power + + As shown below, the Type I error rate of the test is far below the nominal level + for such a small sample, as mentioned in its documentation. + + >>> ax = plt.subplot() + >>> ax.plot(significance, size) + >>> ax.plot([0, 0.1], [0, 0.1], '--') + >>> ax.set_xlabel('nominal significance level') + >>> ax.set_ylabel('estimated test size (Type I error rate)') + >>> ax.set_title('Estimated test size vs nominal significance level') + >>> ax.set_aspect('equal', 'box') + >>> ax.legend(('`ttest_1samp`', 'ideal test')) + >>> plt.show() + + As one might expect from such a conservative test, the power is quite low with + respect to some alternatives. For example, the power of the test under the + alternative that the sample was drawn from the Laplace distribution may not + be much greater than the Type I error rate. + + >>> rvs = rng.laplace + >>> significance = np.linspace(0.0001, 0.1, 1000) + >>> res = stats.power(test, rvs, n_observations, significance=0.05) + >>> print(res.power) + 0.0587 + + This is not a mistake in SciPy's implementation; it is simply due to the fact + that the null distribution of the test statistic is derived under the assumption + that the sample size is large (i.e. approaches infinity), and this asymptotic + approximation is not accurate for small samples. In such cases, resampling + and Monte Carlo methods (e.g. `permutation_test`, `goodness_of_fit`, + `monte_carlo_test`) may be more appropriate. + + """ + tmp = _power_iv(rvs, test, n_observations, significance, + vectorized, n_resamples, batch, kwargs) + (rvs, test, nobs, significance, + vectorized, n_resamples, batch, args, kwds, shape)= tmp + + batch_nominal = batch or n_resamples + pvalues = [] # results of various nobs/kwargs combinations + for nobs_i, args_i in zip(nobs, args): + kwargs_i = dict(zip(kwds, args_i)) + pvalues_i = [] # results of batches; fixed nobs/kwargs combination + for k in range(0, n_resamples, batch_nominal): + batch_actual = min(batch_nominal, n_resamples - k) + resamples = [rvs_j(size=(batch_actual, nobs_ij), **kwargs_i) + for rvs_j, nobs_ij in zip(rvs, nobs_i)] + res = test(*resamples, **kwargs_i, axis=-1) + p = getattr(res, 'pvalue', res) + pvalues_i.append(p) + # Concatenate results from batches + pvalues_i = np.concatenate(pvalues_i, axis=-1) + pvalues.append(pvalues_i) + # `test` can return result with array of p-values + shape += pvalues_i.shape[:-1] + # Concatenate results from various nobs/kwargs combinations + pvalues = np.concatenate(pvalues, axis=0) + # nobs/kwargs arrays were raveled to single axis; unravel + pvalues = pvalues.reshape(shape + (-1,)) + if significance.ndim > 0: + newdims = tuple(range(significance.ndim, pvalues.ndim + significance.ndim)) + significance = np.expand_dims(significance, newdims) + powers = np.mean(pvalues < significance, axis=-1) + + return PowerResult(power=powers, pvalues=pvalues) + + +@dataclass +class PermutationTestResult: + """Result object returned by `scipy.stats.permutation_test`. + + Attributes + ---------- + statistic : float or ndarray + The observed test statistic of the data. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + """ + statistic: float | np.ndarray + pvalue: float | np.ndarray + null_distribution: np.ndarray + + +def _all_partitions_concatenated(ns): + """ + Generate all partitions of indices of groups of given sizes, concatenated + + `ns` is an iterable of ints. + """ + def all_partitions(z, n): + for c in combinations(z, n): + x0 = set(c) + x1 = z - x0 + yield [x0, x1] + + def all_partitions_n(z, ns): + if len(ns) == 0: + yield [z] + return + for c in all_partitions(z, ns[0]): + for d in all_partitions_n(c[1], ns[1:]): + yield c[0:1] + d + + z = set(range(np.sum(ns))) + for partitioning in all_partitions_n(z, ns[:]): + x = np.concatenate([list(partition) + for partition in partitioning]).astype(int) + yield x + + +def _batch_generator(iterable, batch): + """A generator that yields batches of elements from an iterable""" + iterator = iter(iterable) + if batch <= 0: + raise ValueError("`batch` must be positive.") + z = [item for i, item in zip(range(batch), iterator)] + while z: # we don't want StopIteration without yielding an empty list + yield z + z = [item for i, item in zip(range(batch), iterator)] + + +def _pairings_permutations_gen(n_permutations, n_samples, n_obs_sample, batch, + random_state): + # Returns a generator that yields arrays of size + # `(batch, n_samples, n_obs_sample)`. + # Each row is an independent permutation of indices 0 to `n_obs_sample`. + batch = min(batch, n_permutations) + + if hasattr(random_state, 'permuted'): + def batched_perm_generator(): + indices = np.arange(n_obs_sample) + indices = np.tile(indices, (batch, n_samples, 1)) + for k in range(0, n_permutations, batch): + batch_actual = min(batch, n_permutations-k) + # Don't permute in place, otherwise results depend on `batch` + permuted_indices = random_state.permuted(indices, axis=-1) + yield permuted_indices[:batch_actual] + else: # RandomState and early Generators don't have `permuted` + def batched_perm_generator(): + for k in range(0, n_permutations, batch): + batch_actual = min(batch, n_permutations-k) + size = (batch_actual, n_samples, n_obs_sample) + x = random_state.random(size=size) + yield np.argsort(x, axis=-1)[:batch_actual] + + return batched_perm_generator() + + +def _calculate_null_both(data, statistic, n_permutations, batch, + random_state=None): + """ + Calculate null distribution for independent sample tests. + """ + n_samples = len(data) + + # compute number of permutations + # (distinct partitions of data into samples of these sizes) + n_obs_i = [sample.shape[-1] for sample in data] # observations per sample + n_obs_ic = np.cumsum(n_obs_i) + n_obs = n_obs_ic[-1] # total number of observations + n_max = np.prod([comb(n_obs_ic[i], n_obs_ic[i-1]) + for i in range(n_samples-1, 0, -1)]) + + # perm_generator is an iterator that produces permutations of indices + # from 0 to n_obs. We'll concatenate the samples, use these indices to + # permute the data, then split the samples apart again. + if n_permutations >= n_max: + exact_test = True + n_permutations = n_max + perm_generator = _all_partitions_concatenated(n_obs_i) + else: + exact_test = False + # Neither RandomState.permutation nor Generator.permutation + # can permute axis-slices independently. If this feature is + # added in the future, batches of the desired size should be + # generated in a single call. + perm_generator = (random_state.permutation(n_obs) + for i in range(n_permutations)) + + batch = batch or int(n_permutations) + null_distribution = [] + + # First, concatenate all the samples. In batches, permute samples with + # indices produced by the `perm_generator`, split them into new samples of + # the original sizes, compute the statistic for each batch, and add these + # statistic values to the null distribution. + data = np.concatenate(data, axis=-1) + for indices in _batch_generator(perm_generator, batch=batch): + indices = np.array(indices) + + # `indices` is 2D: each row is a permutation of the indices. + # We use it to index `data` along its last axis, which corresponds + # with observations. + # After indexing, the second to last axis of `data_batch` corresponds + # with permutations, and the last axis corresponds with observations. + data_batch = data[..., indices] + + # Move the permutation axis to the front: we'll concatenate a list + # of batched statistic values along this zeroth axis to form the + # null distribution. + data_batch = np.moveaxis(data_batch, -2, 0) + data_batch = np.split(data_batch, n_obs_ic[:-1], axis=-1) + null_distribution.append(statistic(*data_batch, axis=-1)) + null_distribution = np.concatenate(null_distribution, axis=0) + + return null_distribution, n_permutations, exact_test + + +def _calculate_null_pairings(data, statistic, n_permutations, batch, + random_state=None): + """ + Calculate null distribution for association tests. + """ + n_samples = len(data) + + # compute number of permutations (factorial(n) permutations of each sample) + n_obs_sample = data[0].shape[-1] # observations per sample; same for each + n_max = factorial(n_obs_sample)**n_samples + + # `perm_generator` is an iterator that produces a list of permutations of + # indices from 0 to n_obs_sample, one for each sample. + if n_permutations >= n_max: + exact_test = True + n_permutations = n_max + batch = batch or int(n_permutations) + # cartesian product of the sets of all permutations of indices + perm_generator = product(*(permutations(range(n_obs_sample)) + for i in range(n_samples))) + batched_perm_generator = _batch_generator(perm_generator, batch=batch) + else: + exact_test = False + batch = batch or int(n_permutations) + # Separate random permutations of indices for each sample. + # Again, it would be nice if RandomState/Generator.permutation + # could permute each axis-slice separately. + args = n_permutations, n_samples, n_obs_sample, batch, random_state + batched_perm_generator = _pairings_permutations_gen(*args) + + null_distribution = [] + + for indices in batched_perm_generator: + indices = np.array(indices) + + # `indices` is 3D: the zeroth axis is for permutations, the next is + # for samples, and the last is for observations. Swap the first two + # to make the zeroth axis correspond with samples, as it does for + # `data`. + indices = np.swapaxes(indices, 0, 1) + + # When we're done, `data_batch` will be a list of length `n_samples`. + # Each element will be a batch of random permutations of one sample. + # The zeroth axis of each batch will correspond with permutations, + # and the last will correspond with observations. (This makes it + # easy to pass into `statistic`.) + data_batch = [None]*n_samples + for i in range(n_samples): + data_batch[i] = data[i][..., indices[i]] + data_batch[i] = np.moveaxis(data_batch[i], -2, 0) + + null_distribution.append(statistic(*data_batch, axis=-1)) + null_distribution = np.concatenate(null_distribution, axis=0) + + return null_distribution, n_permutations, exact_test + + +def _calculate_null_samples(data, statistic, n_permutations, batch, + random_state=None): + """ + Calculate null distribution for paired-sample tests. + """ + n_samples = len(data) + + # By convention, the meaning of the "samples" permutations type for + # data with only one sample is to flip the sign of the observations. + # Achieve this by adding a second sample - the negative of the original. + if n_samples == 1: + data = [data[0], -data[0]] + + # The "samples" permutation strategy is the same as the "pairings" + # strategy except the roles of samples and observations are flipped. + # So swap these axes, then we'll use the function for the "pairings" + # strategy to do all the work! + data = np.swapaxes(data, 0, -1) + + # (Of course, the user's statistic doesn't know what we've done here, + # so we need to pass it what it's expecting.) + def statistic_wrapped(*data, axis): + data = np.swapaxes(data, 0, -1) + if n_samples == 1: + data = data[0:1] + return statistic(*data, axis=axis) + + return _calculate_null_pairings(data, statistic_wrapped, n_permutations, + batch, random_state) + + +def _permutation_test_iv(data, statistic, permutation_type, vectorized, + n_resamples, batch, alternative, axis, random_state): + """Input validation for `permutation_test`.""" + + axis_int = int(axis) + if axis != axis_int: + raise ValueError("`axis` must be an integer.") + + permutation_types = {'samples', 'pairings', 'independent'} + permutation_type = permutation_type.lower() + if permutation_type not in permutation_types: + raise ValueError(f"`permutation_type` must be in {permutation_types}.") + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if vectorized is None: + vectorized = 'axis' in inspect.signature(statistic).parameters + + if not vectorized: + statistic = _vectorize_statistic(statistic) + + message = "`data` must be a tuple containing at least two samples" + try: + if len(data) < 2 and permutation_type == 'independent': + raise ValueError(message) + except TypeError: + raise TypeError(message) + + data = _broadcast_arrays(data, axis) + data_iv = [] + for sample in data: + sample = np.atleast_1d(sample) + if sample.shape[axis] <= 1: + raise ValueError("each sample in `data` must contain two or more " + "observations along `axis`.") + sample = np.moveaxis(sample, axis_int, -1) + data_iv.append(sample) + + n_resamples_int = (int(n_resamples) if not np.isinf(n_resamples) + else np.inf) + if n_resamples != n_resamples_int or n_resamples_int <= 0: + raise ValueError("`n_resamples` must be a positive integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + alternatives = {'two-sided', 'greater', 'less'} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f"`alternative` must be in {alternatives}") + + random_state = check_random_state(random_state) + + return (data_iv, statistic, permutation_type, vectorized, n_resamples_int, + batch_iv, alternative, axis_int, random_state) + + +def permutation_test(data, statistic, *, permutation_type='independent', + vectorized=None, n_resamples=9999, batch=None, + alternative="two-sided", axis=0, random_state=None): + r""" + Performs a permutation test of a given statistic on provided data. + + For independent sample statistics, the null hypothesis is that the data are + randomly sampled from the same distribution. + For paired sample statistics, two null hypothesis can be tested: + that the data are paired at random or that the data are assigned to samples + at random. + + Parameters + ---------- + data : iterable of array-like + Contains the samples, each of which is an array of observations. + Dimensions of sample arrays must be compatible for broadcasting except + along `axis`. + statistic : callable + Statistic for which the p-value of the hypothesis test is to be + calculated. `statistic` must be a callable that accepts samples + as separate arguments (e.g. ``statistic(*data)``) and returns the + resulting statistic. + If `vectorized` is set ``True``, `statistic` must also accept a keyword + argument `axis` and be vectorized to compute the statistic along the + provided `axis` of the sample arrays. + permutation_type : {'independent', 'samples', 'pairings'}, optional + The type of permutations to be performed, in accordance with the + null hypothesis. The first two permutation types are for paired sample + statistics, in which all samples contain the same number of + observations and observations with corresponding indices along `axis` + are considered to be paired; the third is for independent sample + statistics. + + - ``'samples'`` : observations are assigned to different samples + but remain paired with the same observations from other samples. + This permutation type is appropriate for paired sample hypothesis + tests such as the Wilcoxon signed-rank test and the paired t-test. + - ``'pairings'`` : observations are paired with different observations, + but they remain within the same sample. This permutation type is + appropriate for association/correlation tests with statistics such + as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's + :math:`r`. + - ``'independent'`` (default) : observations are assigned to different + samples. Samples may contain different numbers of observations. This + permutation type is appropriate for independent sample hypothesis + tests such as the Mann-Whitney :math:`U` test and the independent + sample t-test. + + Please see the Notes section below for more detailed descriptions + of the permutation types. + + vectorized : bool, optional + If `vectorized` is set ``False``, `statistic` will not be passed + keyword argument `axis` and is expected to calculate the statistic + only for 1D samples. If ``True``, `statistic` will be passed keyword + argument `axis` and is expected to calculate the statistic along `axis` + when passed an ND sample array. If ``None`` (default), `vectorized` + will be set ``True`` if ``axis`` is a parameter of `statistic`. Use + of a vectorized statistic typically reduces computation time. + n_resamples : int or np.inf, default: 9999 + Number of random permutations (resamples) used to approximate the null + distribution. If greater than or equal to the number of distinct + permutations, the exact null distribution will be computed. + Note that the number of distinct permutations grows very rapidly with + the sizes of samples, so exact tests are feasible only for very small + data sets. + batch : int, optional + The number of permutations to process in each call to `statistic`. + Memory usage is O( `batch` * ``n`` ), where ``n`` is the total size + of all samples, regardless of the value of `vectorized`. Default is + ``None``, in which case ``batch`` is the number of permutations. + alternative : {'two-sided', 'less', 'greater'}, optional + The alternative hypothesis for which the p-value is calculated. + For each alternative, the p-value is defined for exact tests as + follows. + + - ``'greater'`` : the percentage of the null distribution that is + greater than or equal to the observed value of the test statistic. + - ``'less'`` : the percentage of the null distribution that is + less than or equal to the observed value of the test statistic. + - ``'two-sided'`` (default) : twice the smaller of the p-values above. + + Note that p-values for randomized tests are calculated according to the + conservative (over-estimated) approximation suggested in [2]_ and [3]_ + rather than the unbiased estimator suggested in [4]_. That is, when + calculating the proportion of the randomized null distribution that is + as extreme as the observed value of the test statistic, the values in + the numerator and denominator are both increased by one. An + interpretation of this adjustment is that the observed value of the + test statistic is always included as an element of the randomized + null distribution. + The convention used for two-sided p-values is not universal; + the observed test statistic and null distribution are returned in + case a different definition is preferred. + + axis : int, default: 0 + The axis of the (broadcasted) samples over which to calculate the + statistic. If samples have a different number of dimensions, + singleton dimensions are prepended to samples with fewer dimensions + before `axis` is considered. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate permutations. + + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + res : PermutationTestResult + An object with attributes: + + statistic : float or ndarray + The observed test statistic of the data. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + + Notes + ----- + + The three types of permutation tests supported by this function are + described below. + + **Unpaired statistics** (``permutation_type='independent'``): + + The null hypothesis associated with this permutation type is that all + observations are sampled from the same underlying distribution and that + they have been assigned to one of the samples at random. + + Suppose ``data`` contains two samples; e.g. ``a, b = data``. + When ``1 < n_resamples < binom(n, k)``, where + + * ``k`` is the number of observations in ``a``, + * ``n`` is the total number of observations in ``a`` and ``b``, and + * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), + + the data are pooled (concatenated), randomly assigned to either the first + or second sample, and the statistic is calculated. This process is + performed repeatedly, `permutation` times, generating a distribution of the + statistic under the null hypothesis. The statistic of the original + data is compared to this distribution to determine the p-value. + + When ``n_resamples >= binom(n, k)``, an exact test is performed: the data + are *partitioned* between the samples in each distinct way exactly once, + and the exact null distribution is formed. + Note that for a given partitioning of the data between the samples, + only one ordering/permutation of the data *within* each sample is + considered. For statistics that do not depend on the order of the data + within samples, this dramatically reduces computational cost without + affecting the shape of the null distribution (because the frequency/count + of each value is affected by the same factor). + + For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this + permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``. + Because only one ordering/permutation of the data *within* each sample + is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]`` + and ``y = [a4, a3, b1]`` would *not* be considered distinct from the + example above. + + ``permutation_type='independent'`` does not support one-sample statistics, + but it can be applied to statistics with more than two samples. In this + case, if ``n`` is an array of the number of observations within each + sample, the number of distinct partitions is:: + + np.prod([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)]) + + **Paired statistics, permute pairings** (``permutation_type='pairings'``): + + The null hypothesis associated with this permutation type is that + observations within each sample are drawn from the same underlying + distribution and that pairings with elements of other samples are + assigned at random. + + Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we + wish to consider all possible pairings of elements of ``a`` with elements + of a second sample, ``b``. Let ``n`` be the number of observations in + ``a``, which must also equal the number of observations in ``b``. + + When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are + randomly permuted. The user-supplied statistic accepts one data argument, + say ``a_perm``, and calculates the statistic considering ``a_perm`` and + ``b``. This process is performed repeatedly, `permutation` times, + generating a distribution of the statistic under the null hypothesis. + The statistic of the original data is compared to this distribution to + determine the p-value. + + When ``n_resamples >= factorial(n)``, an exact test is performed: + ``a`` is permuted in each distinct way exactly once. Therefore, the + `statistic` is computed for each unique pairing of samples between ``a`` + and ``b`` exactly once. + + For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this + permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left + in its original order. + + ``permutation_type='pairings'`` supports ``data`` containing any number + of samples, each of which must contain the same number of observations. + All samples provided in ``data`` are permuted *independently*. Therefore, + if ``m`` is the number of samples and ``n`` is the number of observations + within each sample, then the number of permutations in an exact test is:: + + factorial(n)**m + + Note that if a two-sample statistic, for example, does not inherently + depend on the order in which observations are provided - only on the + *pairings* of observations - then only one of the two samples should be + provided in ``data``. This dramatically reduces computational cost without + affecting the shape of the null distribution (because the frequency/count + of each value is affected by the same factor). + + **Paired statistics, permute samples** (``permutation_type='samples'``): + + The null hypothesis associated with this permutation type is that + observations within each pair are drawn from the same underlying + distribution and that the sample to which they are assigned is random. + + Suppose ``data`` contains two samples; e.g. ``a, b = data``. + Let ``n`` be the number of observations in ``a``, which must also equal + the number of observations in ``b``. + + When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are + randomly swapped between samples (maintaining their pairings) and the + statistic is calculated. This process is performed repeatedly, + `permutation` times, generating a distribution of the statistic under the + null hypothesis. The statistic of the original data is compared to this + distribution to determine the p-value. + + When ``n_resamples >= 2**n``, an exact test is performed: the observations + are assigned to the two samples in each distinct way (while maintaining + pairings) exactly once. + + For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this + permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``. + + ``permutation_type='samples'`` supports ``data`` containing any number + of samples, each of which must contain the same number of observations. + If ``data`` contains more than one sample, paired observations within + ``data`` are exchanged between samples *independently*. Therefore, if ``m`` + is the number of samples and ``n`` is the number of observations within + each sample, then the number of permutations in an exact test is:: + + factorial(m)**n + + Several paired-sample statistical tests, such as the Wilcoxon signed rank + test and paired-sample t-test, can be performed considering only the + *difference* between two paired elements. Accordingly, if ``data`` contains + only one sample, then the null distribution is formed by independently + changing the *sign* of each observation. + + .. warning:: + The p-value is calculated by counting the elements of the null + distribution that are as extreme or more extreme than the observed + value of the statistic. Due to the use of finite precision arithmetic, + some statistic functions return numerically distinct values when the + theoretical values would be exactly equal. In some cases, this could + lead to a large error in the calculated p-value. `permutation_test` + guards against this by considering elements in the null distribution + that are "close" (within a relative tolerance of 100 times the + floating point epsilon of inexact dtypes) to the observed + value of the test statistic as equal to the observed value of the + test statistic. However, the user is advised to inspect the null + distribution to assess whether this method of comparison is + appropriate, and if not, calculate the p-value manually. See example + below. + + References + ---------- + + .. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951). + .. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." + Statistical Applications in Genetics and Molecular Biology 9.1 (2010). + .. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference". + Statistical Science (2004). + .. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap + (1993). + + Examples + -------- + + Suppose we wish to test whether two samples are drawn from the same + distribution. Assume that the underlying distributions are unknown to us, + and that before observing the data, we hypothesized that the mean of the + first sample would be less than that of the second sample. We decide that + we will use the difference between the sample means as a test statistic, + and we will consider a p-value of 0.05 to be statistically significant. + + For efficiency, we write the function defining the test statistic in a + vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the + statistic will be calculated for each axis-slice along `axis`. + + >>> import numpy as np + >>> def statistic(x, y, axis): + ... return np.mean(x, axis=axis) - np.mean(y, axis=axis) + + After collecting our data, we calculate the observed value of the test + statistic. + + >>> from scipy.stats import norm + >>> rng = np.random.default_rng() + >>> x = norm.rvs(size=5, random_state=rng) + >>> y = norm.rvs(size=6, loc = 3, random_state=rng) + >>> statistic(x, y, 0) + -3.5411688580987266 + + Indeed, the test statistic is negative, suggesting that the true mean of + the distribution underlying ``x`` is less than that of the distribution + underlying ``y``. To determine the probability of this occurring by chance + if the two samples were drawn from the same distribution, we perform + a permutation test. + + >>> from scipy.stats import permutation_test + >>> # because our statistic is vectorized, we pass `vectorized=True` + >>> # `n_resamples=np.inf` indicates that an exact test is to be performed + >>> res = permutation_test((x, y), statistic, vectorized=True, + ... n_resamples=np.inf, alternative='less') + >>> print(res.statistic) + -3.5411688580987266 + >>> print(res.pvalue) + 0.004329004329004329 + + The probability of obtaining a test statistic less than or equal to the + observed value under the null hypothesis is 0.4329%. This is less than our + chosen threshold of 5%, so we consider this to be significant evidence + against the null hypothesis in favor of the alternative. + + Because the size of the samples above was small, `permutation_test` could + perform an exact test. For larger samples, we resort to a randomized + permutation test. + + >>> x = norm.rvs(size=100, random_state=rng) + >>> y = norm.rvs(size=120, loc=0.2, random_state=rng) + >>> res = permutation_test((x, y), statistic, n_resamples=9999, + ... vectorized=True, alternative='less', + ... random_state=rng) + >>> print(res.statistic) + -0.4230459671240913 + >>> print(res.pvalue) + 0.0015 + + The approximate probability of obtaining a test statistic less than or + equal to the observed value under the null hypothesis is 0.0225%. This is + again less than our chosen threshold of 5%, so again we have significant + evidence to reject the null hypothesis in favor of the alternative. + + For large samples and number of permutations, the result is comparable to + that of the corresponding asymptotic test, the independent sample t-test. + + >>> from scipy.stats import ttest_ind + >>> res_asymptotic = ttest_ind(x, y, alternative='less') + >>> print(res_asymptotic.pvalue) + 0.0014669545224902675 + + The permutation distribution of the test statistic is provided for + further investigation. + + >>> import matplotlib.pyplot as plt + >>> plt.hist(res.null_distribution, bins=50) + >>> plt.title("Permutation distribution of test statistic") + >>> plt.xlabel("Value of Statistic") + >>> plt.ylabel("Frequency") + >>> plt.show() + + Inspection of the null distribution is essential if the statistic suffers + from inaccuracy due to limited machine precision. Consider the following + case: + + >>> from scipy.stats import pearsonr + >>> x = [1, 2, 4, 3] + >>> y = [2, 4, 6, 8] + >>> def statistic(x, y, axis=-1): + ... return pearsonr(x, y, axis=axis).statistic + >>> res = permutation_test((x, y), statistic, vectorized=True, + ... permutation_type='pairings', + ... alternative='greater') + >>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution + + In this case, some elements of the null distribution differ from the + observed value of the correlation coefficient ``r`` due to numerical noise. + We manually inspect the elements of the null distribution that are nearly + the same as the observed value of the test statistic. + + >>> r + 0.7999999999999999 + >>> unique = np.unique(null) + >>> unique + array([-1. , -1. , -0.8, -0.8, -0.8, -0.6, -0.4, -0.4, -0.2, -0.2, -0.2, + 0. , 0.2, 0.2, 0.2, 0.4, 0.4, 0.6, 0.8, 0.8, 0.8, 1. , + 1. ]) # may vary + >>> unique[np.isclose(r, unique)].tolist() + [0.7999999999999998, 0.7999999999999999, 0.8] # may vary + + If `permutation_test` were to perform the comparison naively, the + elements of the null distribution with value ``0.7999999999999998`` would + not be considered as extreme or more extreme as the observed value of the + statistic, so the calculated p-value would be too small. + + >>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null) + >>> incorrect_pvalue + 0.14583333333333334 # may vary + + Instead, `permutation_test` treats elements of the null distribution that + are within ``max(1e-14, abs(r)*1e-14)`` of the observed value of the + statistic ``r`` to be equal to ``r``. + + >>> correct_pvalue = np.count_nonzero(null >= r - 1e-14) / len(null) + >>> correct_pvalue + 0.16666666666666666 + >>> res.pvalue == correct_pvalue + True + + This method of comparison is expected to be accurate in most practical + situations, but the user is advised to assess this by inspecting the + elements of the null distribution that are close to the observed value + of the statistic. Also, consider the use of statistics that can be + calculated using exact arithmetic (e.g. integer statistics). + + """ + args = _permutation_test_iv(data, statistic, permutation_type, vectorized, + n_resamples, batch, alternative, axis, + random_state) + (data, statistic, permutation_type, vectorized, n_resamples, batch, + alternative, axis, random_state) = args + + observed = statistic(*data, axis=-1) + + null_calculators = {"pairings": _calculate_null_pairings, + "samples": _calculate_null_samples, + "independent": _calculate_null_both} + null_calculator_args = (data, statistic, n_resamples, + batch, random_state) + calculate_null = null_calculators[permutation_type] + null_distribution, n_resamples, exact_test = ( + calculate_null(*null_calculator_args)) + + # See References [2] and [3] + adjustment = 0 if exact_test else 1 + + # relative tolerance for detecting numerically distinct but + # theoretically equal values in the null distribution + eps = (0 if not np.issubdtype(observed.dtype, np.inexact) + else np.finfo(observed.dtype).eps*100) + gamma = np.abs(eps * observed) + + def less(null_distribution, observed): + cmps = null_distribution <= observed + gamma + pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) + return pvalues + + def greater(null_distribution, observed): + cmps = null_distribution >= observed - gamma + pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) + return pvalues + + def two_sided(null_distribution, observed): + pvalues_less = less(null_distribution, observed) + pvalues_greater = greater(null_distribution, observed) + pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 + return pvalues + + compare = {"less": less, + "greater": greater, + "two-sided": two_sided} + + pvalues = compare[alternative](null_distribution, observed) + pvalues = np.clip(pvalues, 0, 1) + + return PermutationTestResult(observed, pvalues, null_distribution) + + +@dataclass +class ResamplingMethod: + """Configuration information for a statistical resampling method. + + Instances of this class can be passed into the `method` parameter of some + hypothesis test functions to perform a resampling or Monte Carlo version + of the hypothesis test. + + Attributes + ---------- + n_resamples : int + The number of resamples to perform or Monte Carlo samples to draw. + batch : int, optional + The number of resamples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all resamples in a single batch. + """ + n_resamples: int = 9999 + batch: int = None # type: ignore[assignment] + + +@dataclass +class MonteCarloMethod(ResamplingMethod): + """Configuration information for a Monte Carlo hypothesis test. + + Instances of this class can be passed into the `method` parameter of some + hypothesis test functions to perform a Monte Carlo version of the + hypothesis tests. + + Attributes + ---------- + n_resamples : int, optional + The number of Monte Carlo samples to draw. Default is 9999. + batch : int, optional + The number of Monte Carlo samples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all samples in a single batch. + rvs : callable or tuple of callables, optional + A callable or sequence of callables that generates random variates + under the null hypothesis. Each element of `rvs` must be a callable + that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and + returns an N-d array sample of that shape. If `rvs` is a sequence, the + number of callables in `rvs` must match the number of samples passed + to the hypothesis test in which the `MonteCarloMethod` is used. Default + is ``None``, in which case the hypothesis test function chooses values + to match the standard version of the hypothesis test. For example, + the null hypothesis of `scipy.stats.pearsonr` is typically that the + samples are drawn from the standard normal distribution, so + ``rvs = (rng.normal, rng.normal)`` where + ``rng = np.random.default_rng()``. + """ + rvs: object = None + + def _asdict(self): + # `dataclasses.asdict` deepcopies; we don't want that. + return dict(n_resamples=self.n_resamples, batch=self.batch, + rvs=self.rvs) + + +@dataclass +class PermutationMethod(ResamplingMethod): + """Configuration information for a permutation hypothesis test. + + Instances of this class can be passed into the `method` parameter of some + hypothesis test functions to perform a permutation version of the + hypothesis tests. + + Attributes + ---------- + n_resamples : int, optional + The number of resamples to perform. Default is 9999. + batch : int, optional + The number of resamples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all resamples in a single batch. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, then that instance is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + """ + random_state: object = None + + def _asdict(self): + # `dataclasses.asdict` deepcopies; we don't want that. + return dict(n_resamples=self.n_resamples, batch=self.batch, + random_state=self.random_state) + + +@dataclass +class BootstrapMethod(ResamplingMethod): + """Configuration information for a bootstrap confidence interval. + + Instances of this class can be passed into the `method` parameter of some + confidence interval methods to generate a bootstrap confidence interval. + + Attributes + ---------- + n_resamples : int, optional + The number of resamples to perform. Default is 9999. + batch : int, optional + The number of resamples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all resamples in a single batch. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, then that instance is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + + method : {'bca', 'percentile', 'basic'} + Whether to use the 'percentile' bootstrap ('percentile'), the 'basic' + (AKA 'reverse') bootstrap ('basic'), or the bias-corrected and + accelerated bootstrap ('BCa', default). + """ + random_state: object = None + method: str = 'BCa' + + def _asdict(self): + # `dataclasses.asdict` deepcopies; we don't want that. + return dict(n_resamples=self.n_resamples, batch=self.batch, + random_state=self.random_state, method=self.method) diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_sampling.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..44143985a88738c43984347b7787279348fac7f4 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_sampling.py @@ -0,0 +1,1314 @@ +import math +import numbers +import numpy as np +from scipy import stats +from scipy import special as sc +from ._qmc import (check_random_state as check_random_state_qmc, + Halton, QMCEngine) +from ._unuran.unuran_wrapper import NumericalInversePolynomial +from scipy._lib._util import check_random_state + + +__all__ = ['FastGeneratorInversion', 'RatioUniforms'] + + +# define pdfs and other helper functions to create the generators + +def argus_pdf(x, chi): + # approach follows Baumgarten/Hoermann: Generating ARGUS random variates + # for chi > 5, use relationship of the ARGUS distribution to Gamma(1.5) + if chi <= 5: + y = 1 - x * x + return x * math.sqrt(y) * math.exp(-0.5 * chi**2 * y) + return math.sqrt(x) * math.exp(-x) + + +def argus_gamma_trf(x, chi): + if chi <= 5: + return x + return np.sqrt(1.0 - 2 * x / chi**2) + + +def argus_gamma_inv_trf(x, chi): + if chi <= 5: + return x + return 0.5 * chi**2 * (1 - x**2) + + +def betaprime_pdf(x, a, b): + if x > 0: + logf = (a - 1) * math.log(x) - (a + b) * math.log1p(x) - sc.betaln(a, b) + return math.exp(logf) + else: + # return pdf at x == 0 separately to avoid runtime warnings + if a > 1: + return 0 + elif a < 1: + return np.inf + else: + return 1 / sc.beta(a, b) + + +def beta_valid_params(a, b): + return (min(a, b) >= 0.1) and (max(a, b) <= 700) + + +def gamma_pdf(x, a): + if x > 0: + return math.exp(-math.lgamma(a) + (a - 1.0) * math.log(x) - x) + else: + return 0 if a >= 1 else np.inf + + +def invgamma_pdf(x, a): + if x > 0: + return math.exp(-(a + 1.0) * math.log(x) - math.lgamma(a) - 1 / x) + else: + return 0 if a >= 1 else np.inf + + +def burr_pdf(x, cc, dd): + # note: we use np.exp instead of math.exp, otherwise an overflow + # error can occur in the setup, e.g., for parameters + # 1.89128135, 0.30195177, see test test_burr_overflow + if x > 0: + lx = math.log(x) + return np.exp(-(cc + 1) * lx - (dd + 1) * math.log1p(np.exp(-cc * lx))) + else: + return 0 + + +def burr12_pdf(x, cc, dd): + if x > 0: + lx = math.log(x) + logterm = math.log1p(math.exp(cc * lx)) + return math.exp((cc - 1) * lx - (dd + 1) * logterm + math.log(cc * dd)) + else: + return 0 + + +def chi_pdf(x, a): + if x > 0: + return math.exp( + (a - 1) * math.log(x) + - 0.5 * (x * x) + - (a / 2 - 1) * math.log(2) + - math.lgamma(0.5 * a) + ) + else: + return 0 if a >= 1 else np.inf + + +def chi2_pdf(x, df): + if x > 0: + return math.exp( + (df / 2 - 1) * math.log(x) + - 0.5 * x + - (df / 2) * math.log(2) + - math.lgamma(0.5 * df) + ) + else: + return 0 if df >= 1 else np.inf + + +def alpha_pdf(x, a): + if x > 0: + return math.exp(-2.0 * math.log(x) - 0.5 * (a - 1.0 / x) ** 2) + return 0.0 + + +def bradford_pdf(x, c): + if 0 <= x <= 1: + return 1.0 / (1.0 + c * x) + return 0.0 + + +def crystalball_pdf(x, b, m): + if x > -b: + return math.exp(-0.5 * x * x) + return math.exp(m * math.log(m / b) - 0.5 * b * b - m * math.log(m / b - b - x)) + + +def weibull_min_pdf(x, c): + if x > 0: + return c * math.exp((c - 1) * math.log(x) - x**c) + return 0.0 + + +def weibull_max_pdf(x, c): + if x < 0: + return c * math.exp((c - 1) * math.log(-x) - ((-x) ** c)) + return 0.0 + + +def invweibull_pdf(x, c): + if x > 0: + return c * math.exp(-(c + 1) * math.log(x) - x ** (-c)) + return 0.0 + + +def wald_pdf(x): + if x > 0: + return math.exp(-((x - 1) ** 2) / (2 * x)) / math.sqrt(x**3) + return 0.0 + + +def geninvgauss_mode(p, b): + if p > 1: # equivalent mode formulas numerical more stable versions + return (math.sqrt((1 - p) ** 2 + b**2) - (1 - p)) / b + return b / (math.sqrt((1 - p) ** 2 + b**2) + (1 - p)) + + +def geninvgauss_pdf(x, p, b): + m = geninvgauss_mode(p, b) + lfm = (p - 1) * math.log(m) - 0.5 * b * (m + 1 / m) + if x > 0: + return math.exp((p - 1) * math.log(x) - 0.5 * b * (x + 1 / x) - lfm) + return 0.0 + + +def invgauss_mode(mu): + return 1.0 / (math.sqrt(1.5 * 1.5 + 1 / (mu * mu)) + 1.5) + + +def invgauss_pdf(x, mu): + m = invgauss_mode(mu) + lfm = -1.5 * math.log(m) - (m - mu) ** 2 / (2 * m * mu**2) + if x > 0: + return math.exp(-1.5 * math.log(x) - (x - mu) ** 2 / (2 * x * mu**2) - lfm) + return 0.0 + + +def powerlaw_pdf(x, a): + if x > 0: + return x ** (a - 1) + return 0.0 + + +# Define a dictionary: for a given distribution (keys), another dictionary +# (values) specifies the parameters for NumericalInversePolynomial (PINV). +# The keys of the latter dictionary are: +# - pdf: the pdf of the distribution (callable). The signature of the pdf +# is float -> float (i.e., the function does not have to be vectorized). +# If possible, functions like log or exp from the module math should be +# preferred over functions from numpy since the PINV setup will be faster +# in that case. +# - check_pinv_params: callable f that returns true if the shape parameters +# (args) are recommended parameters for PINV (i.e., the u-error does +# not exceed the default tolerance) +# - center: scalar if the center does not depend on args, otherwise +# callable that returns the center as a function of the shape parameters +# - rvs_transform: a callable that can be used to transform the rvs that +# are distributed according to the pdf to the target distribution +# (as an example, see the entry for the beta distribution) +# - rvs_transform_inv: the inverse of rvs_transform (it is required +# for the transformed ppf) +# - mirror_uniform: boolean or a callable that returns true or false +# depending on the shape parameters. If True, the ppf is applied +# to 1-u instead of u to generate rvs, where u is a uniform rv. +# While both u and 1-u are uniform, it can be required to use 1-u +# to compute the u-error correctly. This is only relevant for the argus +# distribution. +# The only required keys are "pdf" and "check_pinv_params". +# All other keys are optional. + +PINV_CONFIG = { + "alpha": { + "pdf": alpha_pdf, + "check_pinv_params": lambda a: 1.0e-11 <= a < 2.1e5, + "center": lambda a: 0.25 * (math.sqrt(a * a + 8.0) - a), + }, + "anglit": { + "pdf": lambda x: math.cos(2 * x) + 1.0e-13, + # +1.e-13 is necessary, otherwise PINV has strange problems as + # f(upper border) is very close to 0 + "center": 0, + }, + "argus": { + "pdf": argus_pdf, + "center": lambda chi: 0.7 if chi <= 5 else 0.5, + "check_pinv_params": lambda chi: 1e-20 < chi < 901, + "rvs_transform": argus_gamma_trf, + "rvs_transform_inv": argus_gamma_inv_trf, + "mirror_uniform": lambda chi: chi > 5, + }, + "beta": { + "pdf": betaprime_pdf, + "center": lambda a, b: max(0.1, (a - 1) / (b + 1)), + "check_pinv_params": beta_valid_params, + "rvs_transform": lambda x, *args: x / (1 + x), + "rvs_transform_inv": lambda x, *args: x / (1 - x) if x < 1 else np.inf, + }, + "betaprime": { + "pdf": betaprime_pdf, + "center": lambda a, b: max(0.1, (a - 1) / (b + 1)), + "check_pinv_params": beta_valid_params, + }, + "bradford": { + "pdf": bradford_pdf, + "check_pinv_params": lambda a: 1.0e-6 <= a <= 1e9, + "center": 0.5, + }, + "burr": { + "pdf": burr_pdf, + "center": lambda a, b: (2 ** (1 / b) - 1) ** (-1 / a), + "check_pinv_params": lambda a, b: (min(a, b) >= 0.3) and (max(a, b) <= 50), + }, + "burr12": { + "pdf": burr12_pdf, + "center": lambda a, b: (2 ** (1 / b) - 1) ** (1 / a), + "check_pinv_params": lambda a, b: (min(a, b) >= 0.2) and (max(a, b) <= 50), + }, + "cauchy": { + "pdf": lambda x: 1 / (1 + (x * x)), + "center": 0, + }, + "chi": { + "pdf": chi_pdf, + "check_pinv_params": lambda df: 0.05 <= df <= 1.0e6, + "center": lambda a: math.sqrt(a), + }, + "chi2": { + "pdf": chi2_pdf, + "check_pinv_params": lambda df: 0.07 <= df <= 1e6, + "center": lambda a: a, + }, + "cosine": { + "pdf": lambda x: 1 + math.cos(x), + "center": 0, + }, + "crystalball": { + "pdf": crystalball_pdf, + "check_pinv_params": lambda b, m: (0.01 <= b <= 5.5) + and (1.1 <= m <= 75.1), + "center": 0.0, + }, + "expon": { + "pdf": lambda x: math.exp(-x), + "center": 1.0, + }, + "gamma": { + "pdf": gamma_pdf, + "check_pinv_params": lambda a: 0.04 <= a <= 1e6, + "center": lambda a: a, + }, + "gennorm": { + "pdf": lambda x, b: math.exp(-abs(x) ** b), + "check_pinv_params": lambda b: 0.081 <= b <= 45.0, + "center": 0.0, + }, + "geninvgauss": { + "pdf": geninvgauss_pdf, + "check_pinv_params": lambda p, b: (abs(p) <= 1200.0) + and (1.0e-10 <= b <= 1200.0), + "center": geninvgauss_mode, + }, + "gumbel_l": { + "pdf": lambda x: math.exp(x - math.exp(x)), + "center": -0.6, + }, + "gumbel_r": { + "pdf": lambda x: math.exp(-x - math.exp(-x)), + "center": 0.6, + }, + "hypsecant": { + "pdf": lambda x: 1.0 / (math.exp(x) + math.exp(-x)), + "center": 0.0, + }, + "invgamma": { + "pdf": invgamma_pdf, + "check_pinv_params": lambda a: 0.04 <= a <= 1e6, + "center": lambda a: 1 / a, + }, + "invgauss": { + "pdf": invgauss_pdf, + "check_pinv_params": lambda mu: 1.0e-10 <= mu <= 1.0e9, + "center": invgauss_mode, + }, + "invweibull": { + "pdf": invweibull_pdf, + "check_pinv_params": lambda a: 0.12 <= a <= 512, + "center": 1.0, + }, + "laplace": { + "pdf": lambda x: math.exp(-abs(x)), + "center": 0.0, + }, + "logistic": { + "pdf": lambda x: math.exp(-x) / (1 + math.exp(-x)) ** 2, + "center": 0.0, + }, + "maxwell": { + "pdf": lambda x: x * x * math.exp(-0.5 * x * x), + "center": 1.41421, + }, + "moyal": { + "pdf": lambda x: math.exp(-(x + math.exp(-x)) / 2), + "center": 1.2, + }, + "norm": { + "pdf": lambda x: math.exp(-x * x / 2), + "center": 0.0, + }, + "pareto": { + "pdf": lambda x, b: x ** -(b + 1), + "center": lambda b: b / (b - 1) if b > 2 else 1.5, + "check_pinv_params": lambda b: 0.08 <= b <= 400000, + }, + "powerlaw": { + "pdf": powerlaw_pdf, + "center": 1.0, + "check_pinv_params": lambda a: 0.06 <= a <= 1.0e5, + }, + "t": { + "pdf": lambda x, df: (1 + x * x / df) ** (-0.5 * (df + 1)), + "check_pinv_params": lambda a: 0.07 <= a <= 1e6, + "center": 0.0, + }, + "rayleigh": { + "pdf": lambda x: x * math.exp(-0.5 * (x * x)), + "center": 1.0, + }, + "semicircular": { + "pdf": lambda x: math.sqrt(1.0 - (x * x)), + "center": 0, + }, + "wald": { + "pdf": wald_pdf, + "center": 1.0, + }, + "weibull_max": { + "pdf": weibull_max_pdf, + "check_pinv_params": lambda a: 0.25 <= a <= 512, + "center": -1.0, + }, + "weibull_min": { + "pdf": weibull_min_pdf, + "check_pinv_params": lambda a: 0.25 <= a <= 512, + "center": 1.0, + }, +} + + +def _validate_qmc_input(qmc_engine, d, seed): + # Input validation for `qmc_engine` and `d` + # Error messages for invalid `d` are raised by QMCEngine + # we could probably use a stats.qmc.check_qrandom_state + if isinstance(qmc_engine, QMCEngine): + if d is not None and qmc_engine.d != d: + message = "`d` must be consistent with dimension of `qmc_engine`." + raise ValueError(message) + d = qmc_engine.d if d is None else d + elif qmc_engine is None: + d = 1 if d is None else d + qmc_engine = Halton(d, seed=seed) + else: + message = ( + "`qmc_engine` must be an instance of " + "`scipy.stats.qmc.QMCEngine` or `None`." + ) + raise ValueError(message) + + return qmc_engine, d + + +class CustomDistPINV: + def __init__(self, pdf, args): + self._pdf = lambda x: pdf(x, *args) + + def pdf(self, x): + return self._pdf(x) + + +class FastGeneratorInversion: + """ + Fast sampling by numerical inversion of the CDF for a large class of + continuous distributions in `scipy.stats`. + + Parameters + ---------- + dist : rv_frozen object + Frozen distribution object from `scipy.stats`. The list of supported + distributions can be found in the Notes section. The shape parameters, + `loc` and `scale` used to create the distributions must be scalars. + For example, for the Gamma distribution with shape parameter `p`, + `p` has to be a float, and for the beta distribution with shape + parameters (a, b), both a and b have to be floats. + domain : tuple of floats, optional + If one wishes to sample from a truncated/conditional distribution, + the domain has to be specified. + The default is None. In that case, the random variates are not + truncated, and the domain is inferred from the support of the + distribution. + ignore_shape_range : boolean, optional. + If False, shape parameters that are outside of the valid range + of values to ensure that the numerical accuracy (see Notes) is + high, raise a ValueError. If True, any shape parameters that are valid + for the distribution are accepted. This can be useful for testing. + The default is False. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + A NumPy random number generator or seed for the underlying NumPy + random number generator used to generate the stream of uniform + random numbers. + If `random_state` is None, it uses ``self.random_state``. + If `random_state` is an int, + ``np.random.default_rng(random_state)`` is used. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Attributes + ---------- + loc : float + The location parameter. + random_state : {`numpy.random.Generator`, `numpy.random.RandomState`} + The random state used in relevant methods like `rvs` (unless + another `random_state` is passed as an argument to these methods). + scale : float + The scale parameter. + + Methods + ------- + cdf + evaluate_error + ppf + qrvs + rvs + support + + Notes + ----- + The class creates an object for continuous distributions specified + by `dist`. The method `rvs` uses a generator from + `scipy.stats.sampling` that is created when the object is instantiated. + In addition, the methods `qrvs` and `ppf` are added. + `qrvs` generate samples based on quasi-random numbers from + `scipy.stats.qmc`. `ppf` is the PPF based on the + numerical inversion method in [1]_ (`NumericalInversePolynomial`) that is + used to generate random variates. + + Supported distributions (`distname`) are: + ``alpha``, ``anglit``, ``argus``, ``beta``, ``betaprime``, ``bradford``, + ``burr``, ``burr12``, ``cauchy``, ``chi``, ``chi2``, ``cosine``, + ``crystalball``, ``expon``, ``gamma``, ``gennorm``, ``geninvgauss``, + ``gumbel_l``, ``gumbel_r``, ``hypsecant``, ``invgamma``, ``invgauss``, + ``invweibull``, ``laplace``, ``logistic``, ``maxwell``, ``moyal``, + ``norm``, ``pareto``, ``powerlaw``, ``t``, ``rayleigh``, ``semicircular``, + ``wald``, ``weibull_max``, ``weibull_min``. + + `rvs` relies on the accuracy of the numerical inversion. If very extreme + shape parameters are used, the numerical inversion might not work. However, + for all implemented distributions, the admissible shape parameters have + been tested, and an error will be raised if the user supplies values + outside of the allowed range. The u-error should not exceed 1e-10 for all + valid parameters. Note that warnings might be raised even if parameters + are within the valid range when the object is instantiated. + To check numerical accuracy, the method `evaluate_error` can be used. + + Note that all implemented distributions are also part of `scipy.stats`, and + the object created by `FastGeneratorInversion` relies on methods like + `ppf`, `cdf` and `pdf` from `rv_frozen`. The main benefit of using this + class can be summarized as follows: Once the generator to sample random + variates is created in the setup step, sampling and evaluation of + the PPF using `ppf` are very fast, + and performance is essentially independent of the distribution. Therefore, + a substantial speed-up can be achieved for many distributions if large + numbers of random variates are required. It is important to know that this + fast sampling is achieved by inversion of the CDF. Thus, one uniform + random variate is transformed into a non-uniform variate, which is an + advantage for several simulation methods, e.g., when + the variance reduction methods of common random variates or + antithetic variates are be used ([2]_). + + In addition, inversion makes it possible to + - to use a QMC generator from `scipy.stats.qmc` (method `qrvs`), + - to generate random variates truncated to an interval. For example, if + one aims to sample standard normal random variates from + the interval (2, 4), this can be easily achieved by using the parameter + `domain`. + + The location and scale that are initially defined by `dist` + can be reset without having to rerun the setup + step to create the generator that is used for sampling. The relation + of the distribution `Y` with `loc` and `scale` to the standard + distribution `X` (i.e., ``loc=0`` and ``scale=1``) is given by + ``Y = loc + scale * X``. + + References + ---------- + .. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold. + "Random variate generation by numerical inversion when only the + density is known." ACM Transactions on Modeling and Computer + Simulation (TOMACS) 20.4 (2010): 1-25. + .. [2] Hörmann, Wolfgang, Josef Leydold and Gerhard Derflinger. + "Automatic nonuniform random number generation." + Springer, 2004. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> from scipy.stats.sampling import FastGeneratorInversion + + Let's start with a simple example to illustrate the main features: + + >>> gamma_frozen = stats.gamma(1.5) + >>> gamma_dist = FastGeneratorInversion(gamma_frozen) + >>> r = gamma_dist.rvs(size=1000) + + The mean should be approximately equal to the shape parameter 1.5: + + >>> r.mean() + 1.52423591130436 # may vary + + Similarly, we can draw a sample based on quasi-random numbers: + + >>> r = gamma_dist.qrvs(size=1000) + >>> r.mean() + 1.4996639255942914 # may vary + + Compare the PPF against approximation `ppf`. + + >>> q = [0.001, 0.2, 0.5, 0.8, 0.999] + >>> np.max(np.abs(gamma_frozen.ppf(q) - gamma_dist.ppf(q))) + 4.313394796895409e-08 + + To confirm that the numerical inversion is accurate, we evaluate the + approximation error (u-error), which should be below 1e-10 (for more + details, refer to the documentation of `evaluate_error`): + + >>> gamma_dist.evaluate_error() + (7.446320551265581e-11, nan) # may vary + + Note that the location and scale can be changed without instantiating a + new generator: + + >>> gamma_dist.loc = 2 + >>> gamma_dist.scale = 3 + >>> r = gamma_dist.rvs(size=1000) + + The mean should be approximately 2 + 3*1.5 = 6.5. + + >>> r.mean() + 6.399549295242894 # may vary + + Let us also illustrate how truncation can be applied: + + >>> trunc_norm = FastGeneratorInversion(stats.norm(), domain=(3, 4)) + >>> r = trunc_norm.rvs(size=1000) + >>> 3 < r.min() < r.max() < 4 + True + + Check the mean: + + >>> r.mean() + 3.250433367078603 # may vary + + >>> stats.norm.expect(lb=3, ub=4, conditional=True) + 3.260454285589997 + + In this particular, case, `scipy.stats.truncnorm` could also be used to + generate truncated normal random variates. + + """ + + def __init__( + self, + dist, + *, + domain=None, + ignore_shape_range=False, + random_state=None, + ): + + if isinstance(dist, stats.distributions.rv_frozen): + distname = dist.dist.name + if distname not in PINV_CONFIG.keys(): + raise ValueError( + f"Distribution '{distname}' is not supported." + f"It must be one of {list(PINV_CONFIG.keys())}" + ) + else: + raise ValueError("`dist` must be a frozen distribution object") + + loc = dist.kwds.get("loc", 0) + scale = dist.kwds.get("scale", 1) + args = dist.args + if not np.isscalar(loc): + raise ValueError("loc must be scalar.") + if not np.isscalar(scale): + raise ValueError("scale must be scalar.") + + self._frozendist = getattr(stats, distname)( + *args, + loc=loc, + scale=scale, + ) + self._distname = distname + + nargs = np.broadcast_arrays(args)[0].size + nargs_expected = self._frozendist.dist.numargs + if nargs != nargs_expected: + raise ValueError( + f"Each of the {nargs_expected} shape parameters must be a " + f"scalar, but {nargs} values are provided." + ) + + self.random_state = random_state + + if domain is None: + self._domain = self._frozendist.support() + self._p_lower = 0.0 + self._p_domain = 1.0 + else: + self._domain = domain + self._p_lower = self._frozendist.cdf(self._domain[0]) + _p_domain = self._frozendist.cdf(self._domain[1]) - self._p_lower + self._p_domain = _p_domain + self._set_domain_adj() + self._ignore_shape_range = ignore_shape_range + + # the domain to be passed to NumericalInversePolynomial + # define a separate variable since in case of a transformation, + # domain_pinv will not be the same as self._domain + self._domain_pinv = self._domain + + # get information about the distribution from the config to set up + # the generator + dist = self._process_config(distname, args) + + if self._rvs_transform_inv is not None: + d0 = self._rvs_transform_inv(self._domain[0], *args) + d1 = self._rvs_transform_inv(self._domain[1], *args) + if d0 > d1: + # swap values if transformation if decreasing + d0, d1 = d1, d0 + # only update _domain_pinv and not _domain + # _domain refers to the original distribution, _domain_pinv + # to the transformed distribution + self._domain_pinv = d0, d1 + + # self._center has been set by the call self._process_config + # check if self._center is inside the transformed domain + # _domain_pinv, otherwise move it to the endpoint that is closer + if self._center is not None: + if self._center < self._domain_pinv[0]: + self._center = self._domain_pinv[0] + elif self._center > self._domain_pinv[1]: + self._center = self._domain_pinv[1] + + self._rng = NumericalInversePolynomial( + dist, + random_state=self.random_state, + domain=self._domain_pinv, + center=self._center, + ) + + @property + def random_state(self): + return self._random_state + + @random_state.setter + def random_state(self, random_state): + self._random_state = check_random_state_qmc(random_state) + + @property + def loc(self): + return self._frozendist.kwds.get("loc", 0) + + @loc.setter + def loc(self, loc): + if not np.isscalar(loc): + raise ValueError("loc must be scalar.") + self._frozendist.kwds["loc"] = loc + # update the adjusted domain that depends on loc and scale + self._set_domain_adj() + + @property + def scale(self): + return self._frozendist.kwds.get("scale", 0) + + @scale.setter + def scale(self, scale): + if not np.isscalar(scale): + raise ValueError("scale must be scalar.") + self._frozendist.kwds["scale"] = scale + # update the adjusted domain that depends on loc and scale + self._set_domain_adj() + + def _set_domain_adj(self): + """ Adjust the domain based on loc and scale. """ + loc = self.loc + scale = self.scale + lb = self._domain[0] * scale + loc + ub = self._domain[1] * scale + loc + self._domain_adj = (lb, ub) + + def _process_config(self, distname, args): + cfg = PINV_CONFIG[distname] + if "check_pinv_params" in cfg: + if not self._ignore_shape_range: + if not cfg["check_pinv_params"](*args): + msg = ("No generator is defined for the shape parameters " + f"{args}. Use ignore_shape_range to proceed " + "with the selected values.") + raise ValueError(msg) + + if "center" in cfg.keys(): + if not np.isscalar(cfg["center"]): + self._center = cfg["center"](*args) + else: + self._center = cfg["center"] + else: + self._center = None + self._rvs_transform = cfg.get("rvs_transform", None) + self._rvs_transform_inv = cfg.get("rvs_transform_inv", None) + _mirror_uniform = cfg.get("mirror_uniform", None) + if _mirror_uniform is None: + self._mirror_uniform = False + else: + self._mirror_uniform = _mirror_uniform(*args) + + return CustomDistPINV(cfg["pdf"], args) + + def rvs(self, size=None): + """ + Sample from the distribution by inversion. + + Parameters + ---------- + size : int or tuple, optional + The shape of samples. Default is ``None`` in which case a scalar + sample is returned. + + Returns + ------- + rvs : array_like + A NumPy array of random variates. + + Notes + ----- + Random variates are generated by numerical inversion of the CDF, i.e., + `ppf` computed by `NumericalInversePolynomial` when the class + is instantiated. Note that the + default ``rvs`` method of the rv_continuous class is + overwritten. Hence, a different stream of random numbers is generated + even if the same seed is used. + """ + # note: we cannot use self._rng.rvs directly in case + # self._mirror_uniform is true + u = self.random_state.uniform(size=size) + if self._mirror_uniform: + u = 1 - u + r = self._rng.ppf(u) + if self._rvs_transform is not None: + r = self._rvs_transform(r, *self._frozendist.args) + return self.loc + self.scale * r + + def ppf(self, q): + """ + Very fast PPF (inverse CDF) of the distribution which + is a very close approximation of the exact PPF values. + + Parameters + ---------- + u : array_like + Array with probabilities. + + Returns + ------- + ppf : array_like + Quantiles corresponding to the values in `u`. + + Notes + ----- + The evaluation of the PPF is very fast but it may have a large + relative error in the far tails. The numerical precision of the PPF + is controlled by the u-error, that is, + ``max |u - CDF(PPF(u))|`` where the max is taken over points in + the interval [0,1], see `evaluate_error`. + + Note that this PPF is designed to generate random samples. + """ + q = np.asarray(q) + if self._mirror_uniform: + x = self._rng.ppf(1 - q) + else: + x = self._rng.ppf(q) + if self._rvs_transform is not None: + x = self._rvs_transform(x, *self._frozendist.args) + return self.scale * x + self.loc + + def qrvs(self, size=None, d=None, qmc_engine=None): + """ + Quasi-random variates of the given distribution. + + The `qmc_engine` is used to draw uniform quasi-random variates, and + these are converted to quasi-random variates of the given distribution + using inverse transform sampling. + + Parameters + ---------- + size : int, tuple of ints, or None; optional + Defines shape of random variates array. Default is ``None``. + d : int or None, optional + Defines dimension of uniform quasi-random variates to be + transformed. Default is ``None``. + qmc_engine : scipy.stats.qmc.QMCEngine(d=1), optional + Defines the object to use for drawing + quasi-random variates. Default is ``None``, which uses + `scipy.stats.qmc.Halton(1)`. + + Returns + ------- + rvs : ndarray or scalar + Quasi-random variates. See Notes for shape information. + + Notes + ----- + The shape of the output array depends on `size`, `d`, and `qmc_engine`. + The intent is for the interface to be natural, but the detailed rules + to achieve this are complicated. + + - If `qmc_engine` is ``None``, a `scipy.stats.qmc.Halton` instance is + created with dimension `d`. If `d` is not provided, ``d=1``. + - If `qmc_engine` is not ``None`` and `d` is ``None``, `d` is + determined from the dimension of the `qmc_engine`. + - If `qmc_engine` is not ``None`` and `d` is not ``None`` but the + dimensions are inconsistent, a ``ValueError`` is raised. + - After `d` is determined according to the rules above, the output + shape is ``tuple_shape + d_shape``, where: + + - ``tuple_shape = tuple()`` if `size` is ``None``, + - ``tuple_shape = (size,)`` if `size` is an ``int``, + - ``tuple_shape = size`` if `size` is a sequence, + - ``d_shape = tuple()`` if `d` is ``None`` or `d` is 1, and + - ``d_shape = (d,)`` if `d` is greater than 1. + + The elements of the returned array are part of a low-discrepancy + sequence. If `d` is 1, this means that none of the samples are truly + independent. If `d` > 1, each slice ``rvs[..., i]`` will be of a + quasi-independent sequence; see `scipy.stats.qmc.QMCEngine` for + details. Note that when `d` > 1, the samples returned are still those + of the provided univariate distribution, not a multivariate + generalization of that distribution. + + """ + qmc_engine, d = _validate_qmc_input(qmc_engine, d, self.random_state) + # mainly copied from unuran_wrapper.pyx.templ + # `rvs` is flexible about whether `size` is an int or tuple, so this + # should be, too. + try: + if size is None: + tuple_size = (1,) + else: + tuple_size = tuple(size) + except TypeError: + tuple_size = (size,) + # we do not use rng.qrvs directly since we need to be + # able to apply the ppf to 1 - u + N = 1 if size is None else np.prod(size) + u = qmc_engine.random(N) + if self._mirror_uniform: + u = 1 - u + qrvs = self._ppf(u) + if self._rvs_transform is not None: + qrvs = self._rvs_transform(qrvs, *self._frozendist.args) + if size is None: + qrvs = qrvs.squeeze()[()] + else: + if d == 1: + qrvs = qrvs.reshape(tuple_size) + else: + qrvs = qrvs.reshape(tuple_size + (d,)) + return self.loc + self.scale * qrvs + + def evaluate_error(self, size=100000, random_state=None, x_error=False): + """ + Evaluate the numerical accuracy of the inversion (u- and x-error). + + Parameters + ---------- + size : int, optional + The number of random points over which the error is estimated. + Default is ``100000``. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + A NumPy random number generator or seed for the underlying NumPy + random number generator used to generate the stream of uniform + random numbers. + If `random_state` is None, use ``self.random_state``. + If `random_state` is an int, + ``np.random.default_rng(random_state)`` is used. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + u_error, x_error : tuple of floats + A NumPy array of random variates. + + Notes + ----- + The numerical precision of the inverse CDF `ppf` is controlled by + the u-error. It is computed as follows: + ``max |u - CDF(PPF(u))|`` where the max is taken `size` random + points in the interval [0,1]. `random_state` determines the random + sample. Note that if `ppf` was exact, the u-error would be zero. + + The x-error measures the direct distance between the exact PPF + and `ppf`. If ``x_error`` is set to ``True`, it is + computed as the maximum of the minimum of the relative and absolute + x-error: + ``max(min(x_error_abs[i], x_error_rel[i]))`` where + ``x_error_abs[i] = |PPF(u[i]) - PPF_fast(u[i])|``, + ``x_error_rel[i] = max |(PPF(u[i]) - PPF_fast(u[i])) / PPF(u[i])|``. + Note that it is important to consider the relative x-error in the case + that ``PPF(u)`` is close to zero or very large. + + By default, only the u-error is evaluated and the x-error is set to + ``np.nan``. Note that the evaluation of the x-error will be very slow + if the implementation of the PPF is slow. + + Further information about these error measures can be found in [1]_. + + References + ---------- + .. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold. + "Random variate generation by numerical inversion when only the + density is known." ACM Transactions on Modeling and Computer + Simulation (TOMACS) 20.4 (2010): 1-25. + + Examples + -------- + + >>> import numpy as np + >>> from scipy import stats + >>> from scipy.stats.sampling import FastGeneratorInversion + + Create an object for the normal distribution: + + >>> d_norm_frozen = stats.norm() + >>> d_norm = FastGeneratorInversion(d_norm_frozen) + + To confirm that the numerical inversion is accurate, we evaluate the + approximation error (u-error and x-error). + + >>> u_error, x_error = d_norm.evaluate_error(x_error=True) + + The u-error should be below 1e-10: + + >>> u_error + 8.785783212061915e-11 # may vary + + Compare the PPF against approximation `ppf`: + + >>> q = [0.001, 0.2, 0.4, 0.6, 0.8, 0.999] + >>> diff = np.abs(d_norm_frozen.ppf(q) - d_norm.ppf(q)) + >>> x_error_abs = np.max(diff) + >>> x_error_abs + 1.2937954707581412e-08 + + This is the absolute x-error evaluated at the points q. The relative + error is given by + + >>> x_error_rel = np.max(diff / np.abs(d_norm_frozen.ppf(q))) + >>> x_error_rel + 4.186725600453555e-09 + + The x_error computed above is derived in a very similar way over a + much larger set of random values q. At each value q[i], the minimum + of the relative and absolute error is taken. The final value is then + derived as the maximum of these values. In our example, we get the + following value: + + >>> x_error + 4.507068014335139e-07 # may vary + + """ + if not isinstance(size, (numbers.Integral, np.integer)): + raise ValueError("size must be an integer.") + # urng will be used to draw the samples for testing the error + # it must not interfere with self.random_state. therefore, do not + # call self.rvs, but draw uniform random numbers and apply + # self.ppf (note: like in rvs, consider self._mirror_uniform) + urng = check_random_state_qmc(random_state) + u = urng.uniform(size=size) + if self._mirror_uniform: + u = 1 - u + x = self.ppf(u) + uerr = np.max(np.abs(self._cdf(x) - u)) + if not x_error: + return uerr, np.nan + ppf_u = self._ppf(u) + x_error_abs = np.abs(self.ppf(u)-ppf_u) + x_error_rel = x_error_abs / np.abs(ppf_u) + x_error_combined = np.array([x_error_abs, x_error_rel]).min(axis=0) + return uerr, np.max(x_error_combined) + + def support(self): + """Support of the distribution. + + Returns + ------- + a, b : float + end-points of the distribution's support. + + Notes + ----- + + Note that the support of the distribution depends on `loc`, + `scale` and `domain`. + + Examples + -------- + + >>> from scipy import stats + >>> from scipy.stats.sampling import FastGeneratorInversion + + Define a truncated normal distribution: + + >>> d_norm = FastGeneratorInversion(stats.norm(), domain=(0, 1)) + >>> d_norm.support() + (0, 1) + + Shift the distribution: + + >>> d_norm.loc = 2.5 + >>> d_norm.support() + (2.5, 3.5) + + """ + return self._domain_adj + + def _cdf(self, x): + """Cumulative distribution function (CDF) + + Parameters + ---------- + x : array_like + The values where the CDF is evaluated + + Returns + ------- + y : ndarray + CDF evaluated at x + + """ + y = self._frozendist.cdf(x) + if self._p_domain == 1.0: + return y + return np.clip((y - self._p_lower) / self._p_domain, 0, 1) + + def _ppf(self, q): + """Percent point function (inverse of `cdf`) + + Parameters + ---------- + q : array_like + lower tail probability + + Returns + ------- + x : array_like + quantile corresponding to the lower tail probability q. + + """ + if self._p_domain == 1.0: + return self._frozendist.ppf(q) + x = self._frozendist.ppf(self._p_domain * np.array(q) + self._p_lower) + return np.clip(x, self._domain_adj[0], self._domain_adj[1]) + + +class RatioUniforms: + """ + Generate random samples from a probability density function using the + ratio-of-uniforms method. + + Parameters + ---------- + pdf : callable + A function with signature `pdf(x)` that is proportional to the + probability density function of the distribution. + umax : float + The upper bound of the bounding rectangle in the u-direction. + vmin : float + The lower bound of the bounding rectangle in the v-direction. + vmax : float + The upper bound of the bounding rectangle in the v-direction. + c : float, optional. + Shift parameter of ratio-of-uniforms method, see Notes. Default is 0. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + + Notes + ----- + Given a univariate probability density function `pdf` and a constant `c`, + define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``. + If ``(U, V)`` is a random vector uniformly distributed over ``A``, + then ``V/U + c`` follows a distribution according to `pdf`. + + The above result (see [1]_, [2]_) can be used to sample random variables + using only the PDF, i.e. no inversion of the CDF is required. Typical + choices of `c` are zero or the mode of `pdf`. The set ``A`` is a subset of + the rectangle ``R = [0, umax] x [vmin, vmax]`` where + + - ``umax = sup sqrt(pdf(x))`` + - ``vmin = inf (x - c) sqrt(pdf(x))`` + - ``vmax = sup (x - c) sqrt(pdf(x))`` + + In particular, these values are finite if `pdf` is bounded and + ``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails). + One can generate ``(U, V)`` uniformly on ``R`` and return + ``V/U + c`` if ``(U, V)`` are also in ``A`` which can be directly + verified. + + The algorithm is not changed if one replaces `pdf` by k * `pdf` for any + constant k > 0. Thus, it is often convenient to work with a function + that is proportional to the probability density function by dropping + unnecessary normalization factors. + + Intuitively, the method works well if ``A`` fills up most of the + enclosing rectangle such that the probability is high that ``(U, V)`` + lies in ``A`` whenever it lies in ``R`` as the number of required + iterations becomes too large otherwise. To be more precise, note that + the expected number of iterations to draw ``(U, V)`` uniformly + distributed on ``R`` such that ``(U, V)`` is also in ``A`` is given by + the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin) / area(pdf)``, + where `area(pdf)` is the integral of `pdf` (which is equal to one if the + probability density function is used but can take on other values if a + function proportional to the density is used). The equality holds since + the area of ``A`` is equal to ``0.5 * area(pdf)`` (Theorem 7.1 in [1]_). + If the sampling fails to generate a single random variate after 50000 + iterations (i.e. not a single draw is in ``A``), an exception is raised. + + If the bounding rectangle is not correctly specified (i.e. if it does not + contain ``A``), the algorithm samples from a distribution different from + the one given by `pdf`. It is therefore recommended to perform a + test such as `~scipy.stats.kstest` as a check. + + References + ---------- + .. [1] L. Devroye, "Non-Uniform Random Variate Generation", + Springer-Verlag, 1986. + + .. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian + random variates", Statistics and Computing, 24(4), p. 547--557, 2014. + + .. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random + Variables Using the Ratio of Uniform Deviates", + ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + + >>> from scipy.stats.sampling import RatioUniforms + >>> rng = np.random.default_rng() + + Simulate normally distributed random variables. It is easy to compute the + bounding rectangle explicitly in that case. For simplicity, we drop the + normalization factor of the density. + + >>> f = lambda x: np.exp(-x**2 / 2) + >>> v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + >>> umax = np.sqrt(f(0)) + >>> gen = RatioUniforms(f, umax=umax, vmin=-v, vmax=v, random_state=rng) + >>> r = gen.rvs(size=2500) + + The K-S test confirms that the random variates are indeed normally + distributed (normality is not rejected at 5% significance level): + + >>> stats.kstest(r, 'norm')[1] + 0.250634764150542 + + The exponential distribution provides another example where the bounding + rectangle can be determined explicitly. + + >>> gen = RatioUniforms(lambda x: np.exp(-x), umax=1, vmin=0, + ... vmax=2*np.exp(-1), random_state=rng) + >>> r = gen.rvs(1000) + >>> stats.kstest(r, 'expon')[1] + 0.21121052054580314 + + """ + + def __init__(self, pdf, *, umax, vmin, vmax, c=0, random_state=None): + if vmin >= vmax: + raise ValueError("vmin must be smaller than vmax.") + + if umax <= 0: + raise ValueError("umax must be positive.") + + self._pdf = pdf + self._umax = umax + self._vmin = vmin + self._vmax = vmax + self._c = c + self._rng = check_random_state(random_state) + + def rvs(self, size=1): + """Sampling of random variates + + Parameters + ---------- + size : int or tuple of ints, optional + Number of random variates to be generated (default is 1). + + Returns + ------- + rvs : ndarray + The random variates distributed according to the probability + distribution defined by the pdf. + + """ + size1d = tuple(np.atleast_1d(size)) + N = np.prod(size1d) # number of rvs needed, reshape upon return + + # start sampling using ratio of uniforms method + x = np.zeros(N) + simulated, i = 0, 1 + + # loop until N rvs have been generated: expected runtime is finite. + # to avoid infinite loop, raise exception if not a single rv has been + # generated after 50000 tries. even if the expected number of iterations + # is 1000, the probability of this event is (1-1/1000)**50000 + # which is of order 10e-22 + while simulated < N: + k = N - simulated + # simulate uniform rvs on [0, umax] and [vmin, vmax] + u1 = self._umax * self._rng.uniform(size=k) + v1 = self._rng.uniform(self._vmin, self._vmax, size=k) + # apply rejection method + rvs = v1 / u1 + self._c + accept = (u1**2 <= self._pdf(rvs)) + num_accept = np.sum(accept) + if num_accept > 0: + x[simulated:(simulated + num_accept)] = rvs[accept] + simulated += num_accept + + if (simulated == 0) and (i*N >= 50000): + msg = ( + f"Not a single random variate could be generated in {i*N} " + "attempts. The ratio of uniforms method does not appear " + "to work for the provided parameters. Please check the " + "pdf and the bounds." + ) + raise RuntimeError(msg) + i += 1 + + return np.reshape(x, size1d) diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_sensitivity_analysis.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_sensitivity_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..7baffcbdccb58aa453f5ddf6b83da4730c979e06 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_sensitivity_analysis.py @@ -0,0 +1,712 @@ +from __future__ import annotations + +import inspect +from dataclasses import dataclass +from typing import ( + Callable, Literal, Protocol, TYPE_CHECKING +) + +import numpy as np + +from scipy.stats._common import ConfidenceInterval +from scipy.stats._qmc import check_random_state +from scipy.stats._resampling import BootstrapResult +from scipy.stats import qmc, bootstrap + + +if TYPE_CHECKING: + import numpy.typing as npt + from scipy._lib._util import DecimalNumber, IntNumber, SeedType + + +__all__ = [ + 'sobol_indices' +] + + +def f_ishigami(x: npt.ArrayLike) -> np.ndarray: + r"""Ishigami function. + + .. math:: + + Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1 + + with :math:`\mathbf{x} \in [-\pi, \pi]^3`. + + Parameters + ---------- + x : array_like ([x1, x2, x3], n) + + Returns + ------- + f : array_like (n,) + Function evaluation. + + References + ---------- + .. [1] Ishigami, T. and T. Homma. "An importance quantification technique + in uncertainty analysis for computer models." IEEE, + :doi:`10.1109/ISUMA.1990.151285`, 1990. + """ + x = np.atleast_2d(x) + f_eval = ( + np.sin(x[0]) + + 7 * np.sin(x[1])**2 + + 0.1 * (x[2]**4) * np.sin(x[0]) + ) + return f_eval + + +def sample_A_B( + n: IntNumber, + dists: list[PPFDist], + random_state: SeedType = None +) -> np.ndarray: + """Sample two matrices A and B. + + Uses a Sobol' sequence with 2`d` columns to have 2 uncorrelated matrices. + This is more efficient than using 2 random draw of Sobol'. + See sec. 5 from [1]_. + + Output shape is (d, n). + + References + ---------- + .. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and + S. Tarantola. "Variance based sensitivity analysis of model + output. Design and estimator for the total sensitivity index." + Computer Physics Communications, 181(2):259-270, + :doi:`10.1016/j.cpc.2009.09.018`, 2010. + """ + d = len(dists) + A_B = qmc.Sobol(d=2*d, seed=random_state, bits=64).random(n).T + A_B = A_B.reshape(2, d, -1) + try: + for d_, dist in enumerate(dists): + A_B[:, d_] = dist.ppf(A_B[:, d_]) + except AttributeError as exc: + message = "Each distribution in `dists` must have method `ppf`." + raise ValueError(message) from exc + return A_B + + +def sample_AB(A: np.ndarray, B: np.ndarray) -> np.ndarray: + """AB matrix. + + AB: rows of B into A. Shape (d, d, n). + - Copy A into d "pages" + - In the first page, replace 1st rows of A with 1st row of B. + ... + - In the dth page, replace dth row of A with dth row of B. + - return the stack of pages + """ + d, n = A.shape + AB = np.tile(A, (d, 1, 1)) + i = np.arange(d) + AB[i, i] = B[i] + return AB + + +def saltelli_2010( + f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + r"""Saltelli2010 formulation. + + .. math:: + + S_i = \frac{1}{N} \sum_{j=1}^N + f(\mathbf{B})_j (f(\mathbf{AB}^{(i)})_j - f(\mathbf{A})_j) + + .. math:: + + S_{T_i} = \frac{1}{N} \sum_{j=1}^N + (f(\mathbf{A})_j - f(\mathbf{AB}^{(i)})_j)^2 + + Parameters + ---------- + f_A, f_B : array_like (s, n) + Function values at A and B, respectively + f_AB : array_like (d, s, n) + Function values at each of the AB pages + + Returns + ------- + s, st : array_like (s, d) + First order and total order Sobol' indices. + + References + ---------- + .. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and + S. Tarantola. "Variance based sensitivity analysis of model + output. Design and estimator for the total sensitivity index." + Computer Physics Communications, 181(2):259-270, + :doi:`10.1016/j.cpc.2009.09.018`, 2010. + """ + # Empirical variance calculated using output from A and B which are + # independent. Output of AB is not independent and cannot be used + var = np.var([f_A, f_B], axis=(0, -1)) + + # We divide by the variance to have a ratio of variance + # this leads to eq. 2 + s = np.mean(f_B * (f_AB - f_A), axis=-1) / var # Table 2 (b) + st = 0.5 * np.mean((f_A - f_AB) ** 2, axis=-1) / var # Table 2 (f) + + return s.T, st.T + + +@dataclass +class BootstrapSobolResult: + first_order: BootstrapResult + total_order: BootstrapResult + + +@dataclass +class SobolResult: + first_order: np.ndarray + total_order: np.ndarray + _indices_method: Callable + _f_A: np.ndarray + _f_B: np.ndarray + _f_AB: np.ndarray + _A: np.ndarray | None = None + _B: np.ndarray | None = None + _AB: np.ndarray | None = None + _bootstrap_result: BootstrapResult | None = None + + def bootstrap( + self, + confidence_level: DecimalNumber = 0.95, + n_resamples: IntNumber = 999 + ) -> BootstrapSobolResult: + """Bootstrap Sobol' indices to provide confidence intervals. + + Parameters + ---------- + confidence_level : float, default: ``0.95`` + The confidence level of the confidence intervals. + n_resamples : int, default: ``999`` + The number of resamples performed to form the bootstrap + distribution of the indices. + + Returns + ------- + res : BootstrapSobolResult + Bootstrap result containing the confidence intervals and the + bootstrap distribution of the indices. + + An object with attributes: + + first_order : BootstrapResult + Bootstrap result of the first order indices. + total_order : BootstrapResult + Bootstrap result of the total order indices. + See `BootstrapResult` for more details. + + """ + def statistic(idx): + f_A_ = self._f_A[:, idx] + f_B_ = self._f_B[:, idx] + f_AB_ = self._f_AB[..., idx] + return self._indices_method(f_A_, f_B_, f_AB_) + + n = self._f_A.shape[1] + + res = bootstrap( + [np.arange(n)], statistic=statistic, method="BCa", + n_resamples=n_resamples, + confidence_level=confidence_level, + bootstrap_result=self._bootstrap_result + ) + self._bootstrap_result = res + + first_order = BootstrapResult( + confidence_interval=ConfidenceInterval( + res.confidence_interval.low[0], res.confidence_interval.high[0] + ), + bootstrap_distribution=res.bootstrap_distribution[0], + standard_error=res.standard_error[0], + ) + total_order = BootstrapResult( + confidence_interval=ConfidenceInterval( + res.confidence_interval.low[1], res.confidence_interval.high[1] + ), + bootstrap_distribution=res.bootstrap_distribution[1], + standard_error=res.standard_error[1], + ) + + return BootstrapSobolResult( + first_order=first_order, total_order=total_order + ) + + +class PPFDist(Protocol): + @property + def ppf(self) -> Callable[..., float]: + ... + + +def sobol_indices( + *, + func: Callable[[np.ndarray], npt.ArrayLike] | + dict[Literal['f_A', 'f_B', 'f_AB'], np.ndarray], + n: IntNumber, + dists: list[PPFDist] | None = None, + method: Callable | Literal['saltelli_2010'] = 'saltelli_2010', + random_state: SeedType = None +) -> SobolResult: + r"""Global sensitivity indices of Sobol'. + + Parameters + ---------- + func : callable or dict(str, array_like) + If `func` is a callable, function to compute the Sobol' indices from. + Its signature must be:: + + func(x: ArrayLike) -> ArrayLike + + with ``x`` of shape ``(d, n)`` and output of shape ``(s, n)`` where: + + - ``d`` is the input dimensionality of `func` + (number of input variables), + - ``s`` is the output dimensionality of `func` + (number of output variables), and + - ``n`` is the number of samples (see `n` below). + + Function evaluation values must be finite. + + If `func` is a dictionary, contains the function evaluations from three + different arrays. Keys must be: ``f_A``, ``f_B`` and ``f_AB``. + ``f_A`` and ``f_B`` should have a shape ``(s, n)`` and ``f_AB`` + should have a shape ``(d, s, n)``. + This is an advanced feature and misuse can lead to wrong analysis. + n : int + Number of samples used to generate the matrices ``A`` and ``B``. + Must be a power of 2. The total number of points at which `func` is + evaluated will be ``n*(d+2)``. + dists : list(distributions), optional + List of each parameter's distribution. The distribution of parameters + depends on the application and should be carefully chosen. + Parameters are assumed to be independently distributed, meaning there + is no constraint nor relationship between their values. + + Distributions must be an instance of a class with a ``ppf`` + method. + + Must be specified if `func` is a callable, and ignored otherwise. + method : Callable or str, default: 'saltelli_2010' + Method used to compute the first and total Sobol' indices. + + If a callable, its signature must be:: + + func(f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray) + -> Tuple[np.ndarray, np.ndarray] + + with ``f_A, f_B`` of shape ``(s, n)`` and ``f_AB`` of shape + ``(d, s, n)``. + These arrays contain the function evaluations from three different sets + of samples. + The output is a tuple of the first and total indices with + shape ``(s, d)``. + This is an advanced feature and misuse can lead to wrong analysis. + random_state : {None, int, `numpy.random.Generator`}, optional + If `random_state` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(random_state)``. + If `random_state` is already a ``Generator`` instance, then the + provided instance is used. + + Returns + ------- + res : SobolResult + An object with attributes: + + first_order : ndarray of shape (s, d) + First order Sobol' indices. + total_order : ndarray of shape (s, d) + Total order Sobol' indices. + + And method: + + bootstrap(confidence_level: float, n_resamples: int) + -> BootstrapSobolResult + + A method providing confidence intervals on the indices. + See `scipy.stats.bootstrap` for more details. + + The bootstrapping is done on both first and total order indices, + and they are available in `BootstrapSobolResult` as attributes + ``first_order`` and ``total_order``. + + Notes + ----- + The Sobol' method [1]_, [2]_ is a variance-based Sensitivity Analysis which + obtains the contribution of each parameter to the variance of the + quantities of interest (QoIs; i.e., the outputs of `func`). + Respective contributions can be used to rank the parameters and + also gauge the complexity of the model by computing the + model's effective (or mean) dimension. + + .. note:: + + Parameters are assumed to be independently distributed. Each + parameter can still follow any distribution. In fact, the distribution + is very important and should match the real distribution of the + parameters. + + It uses a functional decomposition of the variance of the function to + explore + + .. math:: + + \mathbb{V}(Y) = \sum_{i}^{d} \mathbb{V}_i (Y) + \sum_{i= 2**12``. The more complex the model is, + the more samples will be needed. + + Even for a purely addiditive model, the indices may not sum to 1 due + to numerical noise. + + References + ---------- + .. [1] Sobol, I. M.. "Sensitivity analysis for nonlinear mathematical + models." Mathematical Modeling and Computational Experiment, 1:407-414, + 1993. + .. [2] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear + mathematical models and their Monte Carlo estimates." Mathematics + and Computers in Simulation, 55(1-3):271-280, + :doi:`10.1016/S0378-4754(00)00270-6`, 2001. + .. [3] Saltelli, A. "Making best use of model evaluations to + compute sensitivity indices." Computer Physics Communications, + 145(2):280-297, :doi:`10.1016/S0010-4655(02)00280-1`, 2002. + .. [4] Saltelli, A., M. Ratto, T. Andres, F. Campolongo, J. Cariboni, + D. Gatelli, M. Saisana, and S. Tarantola. "Global Sensitivity Analysis. + The Primer." 2007. + .. [5] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and + S. Tarantola. "Variance based sensitivity analysis of model + output. Design and estimator for the total sensitivity index." + Computer Physics Communications, 181(2):259-270, + :doi:`10.1016/j.cpc.2009.09.018`, 2010. + .. [6] Ishigami, T. and T. Homma. "An importance quantification technique + in uncertainty analysis for computer models." IEEE, + :doi:`10.1109/ISUMA.1990.151285`, 1990. + + Examples + -------- + The following is an example with the Ishigami function [6]_ + + .. math:: + + Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1, + + with :math:`\mathbf{x} \in [-\pi, \pi]^3`. This function exhibits strong + non-linearity and non-monotonicity. + + Remember, Sobol' indices assumes that samples are independently + distributed. In this case we use a uniform distribution on each marginals. + + >>> import numpy as np + >>> from scipy.stats import sobol_indices, uniform + >>> rng = np.random.default_rng() + >>> def f_ishigami(x): + ... f_eval = ( + ... np.sin(x[0]) + ... + 7 * np.sin(x[1])**2 + ... + 0.1 * (x[2]**4) * np.sin(x[0]) + ... ) + ... return f_eval + >>> indices = sobol_indices( + ... func=f_ishigami, n=1024, + ... dists=[ + ... uniform(loc=-np.pi, scale=2*np.pi), + ... uniform(loc=-np.pi, scale=2*np.pi), + ... uniform(loc=-np.pi, scale=2*np.pi) + ... ], + ... random_state=rng + ... ) + >>> indices.first_order + array([0.31637954, 0.43781162, 0.00318825]) + >>> indices.total_order + array([0.56122127, 0.44287857, 0.24229595]) + + Confidence interval can be obtained using bootstrapping. + + >>> boot = indices.bootstrap() + + Then, this information can be easily visualized. + + >>> import matplotlib.pyplot as plt + >>> fig, axs = plt.subplots(1, 2, figsize=(9, 4)) + >>> _ = axs[0].errorbar( + ... [1, 2, 3], indices.first_order, fmt='o', + ... yerr=[ + ... indices.first_order - boot.first_order.confidence_interval.low, + ... boot.first_order.confidence_interval.high - indices.first_order + ... ], + ... ) + >>> axs[0].set_ylabel("First order Sobol' indices") + >>> axs[0].set_xlabel('Input parameters') + >>> axs[0].set_xticks([1, 2, 3]) + >>> _ = axs[1].errorbar( + ... [1, 2, 3], indices.total_order, fmt='o', + ... yerr=[ + ... indices.total_order - boot.total_order.confidence_interval.low, + ... boot.total_order.confidence_interval.high - indices.total_order + ... ], + ... ) + >>> axs[1].set_ylabel("Total order Sobol' indices") + >>> axs[1].set_xlabel('Input parameters') + >>> axs[1].set_xticks([1, 2, 3]) + >>> plt.tight_layout() + >>> plt.show() + + .. note:: + + By default, `scipy.stats.uniform` has support ``[0, 1]``. + Using the parameters ``loc`` and ``scale``, one obtains the uniform + distribution on ``[loc, loc + scale]``. + + This result is particularly interesting because the first order index + :math:`S_{x_3} = 0` whereas its total order is :math:`S_{T_{x_3}} = 0.244`. + This means that higher order interactions with :math:`x_3` are responsible + for the difference. Almost 25% of the observed variance + on the QoI is due to the correlations between :math:`x_3` and :math:`x_1`, + although :math:`x_3` by itself has no impact on the QoI. + + The following gives a visual explanation of Sobol' indices on this + function. Let's generate 1024 samples in :math:`[-\pi, \pi]^3` and + calculate the value of the output. + + >>> from scipy.stats import qmc + >>> n_dim = 3 + >>> p_labels = ['$x_1$', '$x_2$', '$x_3$'] + >>> sample = qmc.Sobol(d=n_dim, seed=rng).random(1024) + >>> sample = qmc.scale( + ... sample=sample, + ... l_bounds=[-np.pi, -np.pi, -np.pi], + ... u_bounds=[np.pi, np.pi, np.pi] + ... ) + >>> output = f_ishigami(sample.T) + + Now we can do scatter plots of the output with respect to each parameter. + This gives a visual way to understand how each parameter impacts the + output of the function. + + >>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4)) + >>> for i in range(n_dim): + ... xi = sample[:, i] + ... ax[i].scatter(xi, output, marker='+') + ... ax[i].set_xlabel(p_labels[i]) + >>> ax[0].set_ylabel('Y') + >>> plt.tight_layout() + >>> plt.show() + + Now Sobol' goes a step further: + by conditioning the output value by given values of the parameter + (black lines), the conditional output mean is computed. It corresponds to + the term :math:`\mathbb{E}(Y|x_i)`. Taking the variance of this term gives + the numerator of the Sobol' indices. + + >>> mini = np.min(output) + >>> maxi = np.max(output) + >>> n_bins = 10 + >>> bins = np.linspace(-np.pi, np.pi, num=n_bins, endpoint=False) + >>> dx = bins[1] - bins[0] + >>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4)) + >>> for i in range(n_dim): + ... xi = sample[:, i] + ... ax[i].scatter(xi, output, marker='+') + ... ax[i].set_xlabel(p_labels[i]) + ... for bin_ in bins: + ... idx = np.where((bin_ <= xi) & (xi <= bin_ + dx)) + ... xi_ = xi[idx] + ... y_ = output[idx] + ... ave_y_ = np.mean(y_) + ... ax[i].plot([bin_ + dx/2] * 2, [mini, maxi], c='k') + ... ax[i].scatter(bin_ + dx/2, ave_y_, c='r') + >>> ax[0].set_ylabel('Y') + >>> plt.tight_layout() + >>> plt.show() + + Looking at :math:`x_3`, the variance + of the mean is zero leading to :math:`S_{x_3} = 0`. But we can further + observe that the variance of the output is not constant along the parameter + values of :math:`x_3`. This heteroscedasticity is explained by higher order + interactions. Moreover, an heteroscedasticity is also noticeable on + :math:`x_1` leading to an interaction between :math:`x_3` and :math:`x_1`. + On :math:`x_2`, the variance seems to be constant and thus null interaction + with this parameter can be supposed. + + This case is fairly simple to analyse visually---although it is only a + qualitative analysis. Nevertheless, when the number of input parameters + increases such analysis becomes unrealistic as it would be difficult to + conclude on high-order terms. Hence the benefit of using Sobol' indices. + + """ + random_state = check_random_state(random_state) + + n_ = int(n) + if not (n_ & (n_ - 1) == 0) or n != n_: + raise ValueError( + "The balance properties of Sobol' points require 'n' " + "to be a power of 2." + ) + n = n_ + + if not callable(method): + indices_methods: dict[str, Callable] = { + "saltelli_2010": saltelli_2010, + } + try: + method = method.lower() # type: ignore[assignment] + indices_method_ = indices_methods[method] + except KeyError as exc: + message = ( + f"{method!r} is not a valid 'method'. It must be one of" + f" {set(indices_methods)!r} or a callable." + ) + raise ValueError(message) from exc + else: + indices_method_ = method + sig = inspect.signature(indices_method_) + + if set(sig.parameters) != {'f_A', 'f_B', 'f_AB'}: + message = ( + "If 'method' is a callable, it must have the following" + f" signature: {inspect.signature(saltelli_2010)}" + ) + raise ValueError(message) + + def indices_method(f_A, f_B, f_AB): + """Wrap indices method to ensure proper output dimension. + + 1D when single output, 2D otherwise. + """ + return np.squeeze(indices_method_(f_A=f_A, f_B=f_B, f_AB=f_AB)) + + if callable(func): + if dists is None: + raise ValueError( + "'dists' must be defined when 'func' is a callable." + ) + + def wrapped_func(x): + return np.atleast_2d(func(x)) + + A, B = sample_A_B(n=n, dists=dists, random_state=random_state) + AB = sample_AB(A=A, B=B) + + f_A = wrapped_func(A) + + if f_A.shape[1] != n: + raise ValueError( + "'func' output should have a shape ``(s, -1)`` with ``s`` " + "the number of output." + ) + + def funcAB(AB): + d, d, n = AB.shape + AB = np.moveaxis(AB, 0, -1).reshape(d, n*d) + f_AB = wrapped_func(AB) + return np.moveaxis(f_AB.reshape((-1, n, d)), -1, 0) + + f_B = wrapped_func(B) + f_AB = funcAB(AB) + else: + message = ( + "When 'func' is a dictionary, it must contain the following " + "keys: 'f_A', 'f_B' and 'f_AB'." + "'f_A' and 'f_B' should have a shape ``(s, n)`` and 'f_AB' " + "should have a shape ``(d, s, n)``." + ) + try: + f_A, f_B, f_AB = np.atleast_2d( + func['f_A'], func['f_B'], func['f_AB'] + ) + except KeyError as exc: + raise ValueError(message) from exc + + if f_A.shape[1] != n or f_A.shape != f_B.shape or \ + f_AB.shape == f_A.shape or f_AB.shape[-1] % n != 0: + raise ValueError(message) + + # Normalization by mean + # Sobol', I. and Levitan, Y. L. (1999). On the use of variance reducing + # multipliers in monte carlo computations of a global sensitivity index. + # Computer Physics Communications, 117(1) :52-61. + mean = np.mean([f_A, f_B], axis=(0, -1)).reshape(-1, 1) + f_A -= mean + f_B -= mean + f_AB -= mean + + # Compute indices + # Filter warnings for constant output as var = 0 + with np.errstate(divide='ignore', invalid='ignore'): + first_order, total_order = indices_method(f_A=f_A, f_B=f_B, f_AB=f_AB) + + # null variance means null indices + first_order[~np.isfinite(first_order)] = 0 + total_order[~np.isfinite(total_order)] = 0 + + res = dict( + first_order=first_order, + total_order=total_order, + _indices_method=indices_method, + _f_A=f_A, + _f_B=f_B, + _f_AB=f_AB + ) + + if callable(func): + res.update( + dict( + _A=A, + _B=B, + _AB=AB, + ) + ) + + return SobolResult(**res) diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/contingency.py b/llava_next/lib/python3.10/site-packages/scipy/stats/contingency.py new file mode 100644 index 0000000000000000000000000000000000000000..399322475b08953a26a23135d0244d87890467bc --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/contingency.py @@ -0,0 +1,468 @@ +""" +Contingency table functions (:mod:`scipy.stats.contingency`) +============================================================ + +Functions for creating and analyzing contingency tables. + +.. currentmodule:: scipy.stats.contingency + +.. autosummary:: + :toctree: generated/ + + chi2_contingency + relative_risk + odds_ratio + crosstab + association + + expected_freq + margins + +""" + + +from functools import reduce +import math +import numpy as np +from ._stats_py import power_divergence +from ._relative_risk import relative_risk +from ._crosstab import crosstab +from ._odds_ratio import odds_ratio +from scipy._lib._bunch import _make_tuple_bunch + + +__all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab', + 'association', 'relative_risk', 'odds_ratio'] + + +def margins(a): + """Return a list of the marginal sums of the array `a`. + + Parameters + ---------- + a : ndarray + The array for which to compute the marginal sums. + + Returns + ------- + margsums : list of ndarrays + A list of length `a.ndim`. `margsums[k]` is the result + of summing `a` over all axes except `k`; it has the same + number of dimensions as `a`, but the length of each axis + except axis `k` will be 1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats.contingency import margins + + >>> a = np.arange(12).reshape(2, 6) + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11]]) + >>> m0, m1 = margins(a) + >>> m0 + array([[15], + [51]]) + >>> m1 + array([[ 6, 8, 10, 12, 14, 16]]) + + >>> b = np.arange(24).reshape(2,3,4) + >>> m0, m1, m2 = margins(b) + >>> m0 + array([[[ 66]], + [[210]]]) + >>> m1 + array([[[ 60], + [ 92], + [124]]]) + >>> m2 + array([[[60, 66, 72, 78]]]) + """ + margsums = [] + ranged = list(range(a.ndim)) + for k in ranged: + marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k]) + margsums.append(marg) + return margsums + + +def expected_freq(observed): + """ + Compute the expected frequencies from a contingency table. + + Given an n-dimensional contingency table of observed frequencies, + compute the expected frequencies for the table based on the marginal + sums under the assumption that the groups associated with each + dimension are independent. + + Parameters + ---------- + observed : array_like + The table of observed frequencies. (While this function can handle + a 1-D array, that case is trivial. Generally `observed` is at + least 2-D.) + + Returns + ------- + expected : ndarray of float64 + The expected frequencies, based on the marginal sums of the table. + Same shape as `observed`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats.contingency import expected_freq + >>> observed = np.array([[10, 10, 20],[20, 20, 20]]) + >>> expected_freq(observed) + array([[ 12., 12., 16.], + [ 18., 18., 24.]]) + + """ + # Typically `observed` is an integer array. If `observed` has a large + # number of dimensions or holds large values, some of the following + # computations may overflow, so we first switch to floating point. + observed = np.asarray(observed, dtype=np.float64) + + # Create a list of the marginal sums. + margsums = margins(observed) + + # Create the array of expected frequencies. The shapes of the + # marginal sums returned by apply_over_axes() are just what we + # need for broadcasting in the following product. + d = observed.ndim + expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1) + return expected + + +Chi2ContingencyResult = _make_tuple_bunch( + 'Chi2ContingencyResult', + ['statistic', 'pvalue', 'dof', 'expected_freq'], [] +) + + +def chi2_contingency(observed, correction=True, lambda_=None): + """Chi-square test of independence of variables in a contingency table. + + This function computes the chi-square statistic and p-value for the + hypothesis test of independence of the observed frequencies in the + contingency table [1]_ `observed`. The expected frequencies are computed + based on the marginal sums under the assumption of independence; see + `scipy.stats.contingency.expected_freq`. The number of degrees of + freedom is (expressed using numpy functions and attributes):: + + dof = observed.size - sum(observed.shape) + observed.ndim - 1 + + + Parameters + ---------- + observed : array_like + The contingency table. The table contains the observed frequencies + (i.e. number of occurrences) in each category. In the two-dimensional + case, the table is often described as an "R x C table". + correction : bool, optional + If True, *and* the degrees of freedom is 1, apply Yates' correction + for continuity. The effect of the correction is to adjust each + observed value by 0.5 towards the corresponding expected value. + lambda_ : float or str, optional + By default, the statistic computed in this test is Pearson's + chi-squared statistic [2]_. `lambda_` allows a statistic from the + Cressie-Read power divergence family [3]_ to be used instead. See + `scipy.stats.power_divergence` for details. + + Returns + ------- + res : Chi2ContingencyResult + An object containing attributes: + + statistic : float + The test statistic. + pvalue : float + The p-value of the test. + dof : int + The degrees of freedom. + expected_freq : ndarray, same shape as `observed` + The expected frequencies, based on the marginal sums of the table. + + See Also + -------- + scipy.stats.contingency.expected_freq + scipy.stats.fisher_exact + scipy.stats.chisquare + scipy.stats.power_divergence + scipy.stats.barnard_exact + scipy.stats.boschloo_exact + + Notes + ----- + An often quoted guideline for the validity of this calculation is that + the test should be used only if the observed and expected frequencies + in each cell are at least 5. + + This is a test for the independence of different categories of a + population. The test is only meaningful when the dimension of + `observed` is two or more. Applying the test to a one-dimensional + table will always result in `expected` equal to `observed` and a + chi-square statistic equal to 0. + + This function does not handle masked arrays, because the calculation + does not make sense with missing values. + + Like `scipy.stats.chisquare`, this function computes a chi-square + statistic; the convenience this function provides is to figure out the + expected frequencies and degrees of freedom from the given contingency + table. If these were already known, and if the Yates' correction was not + required, one could use `scipy.stats.chisquare`. That is, if one calls:: + + res = chi2_contingency(obs, correction=False) + + then the following is true:: + + (res.statistic, res.pvalue) == stats.chisquare(obs.ravel(), + f_exp=ex.ravel(), + ddof=obs.size - 1 - dof) + + The `lambda_` argument was added in version 0.13.0 of scipy. + + References + ---------- + .. [1] "Contingency table", + https://en.wikipedia.org/wiki/Contingency_table + .. [2] "Pearson's chi-squared test", + https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test + .. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit + Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984), + pp. 440-464. + .. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of + Cardiovascular Events in Women and Men: A Sex-Specific + Meta-analysis of Randomized Controlled Trials." + JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006. + + Examples + -------- + In [4]_, the use of aspirin to prevent cardiovascular events in women + and men was investigated. The study notably concluded: + + ...aspirin therapy reduced the risk of a composite of + cardiovascular events due to its effect on reducing the risk of + ischemic stroke in women [...] + + The article lists studies of various cardiovascular events. Let's + focus on the ischemic stoke in women. + + The following table summarizes the results of the experiment in which + participants took aspirin or a placebo on a regular basis for several + years. Cases of ischemic stroke were recorded:: + + Aspirin Control/Placebo + Ischemic stroke 176 230 + No stroke 21035 21018 + + Is there evidence that the aspirin reduces the risk of ischemic stroke? + We begin by formulating a null hypothesis :math:`H_0`: + + The effect of aspirin is equivalent to that of placebo. + + Let's assess the plausibility of this hypothesis with + a chi-square test. + + >>> import numpy as np + >>> from scipy.stats import chi2_contingency + >>> table = np.array([[176, 230], [21035, 21018]]) + >>> res = chi2_contingency(table) + >>> res.statistic + 6.892569132546561 + >>> res.pvalue + 0.008655478161175739 + + Using a significance level of 5%, we would reject the null hypothesis in + favor of the alternative hypothesis: "the effect of aspirin + is not equivalent to the effect of placebo". + Because `scipy.stats.contingency.chi2_contingency` performs a two-sided + test, the alternative hypothesis does not indicate the direction of the + effect. We can use `stats.contingency.odds_ratio` to support the + conclusion that aspirin *reduces* the risk of ischemic stroke. + + Below are further examples showing how larger contingency tables can be + tested. + + A two-way example (2 x 3): + + >>> obs = np.array([[10, 10, 20], [20, 20, 20]]) + >>> res = chi2_contingency(obs) + >>> res.statistic + 2.7777777777777777 + >>> res.pvalue + 0.24935220877729619 + >>> res.dof + 2 + >>> res.expected_freq + array([[ 12., 12., 16.], + [ 18., 18., 24.]]) + + Perform the test using the log-likelihood ratio (i.e. the "G-test") + instead of Pearson's chi-squared statistic. + + >>> res = chi2_contingency(obs, lambda_="log-likelihood") + >>> res.statistic + 2.7688587616781319 + >>> res.pvalue + 0.25046668010954165 + + A four-way example (2 x 2 x 2 x 2): + + >>> obs = np.array( + ... [[[[12, 17], + ... [11, 16]], + ... [[11, 12], + ... [15, 16]]], + ... [[[23, 15], + ... [30, 22]], + ... [[14, 17], + ... [15, 16]]]]) + >>> res = chi2_contingency(obs) + >>> res.statistic + 8.7584514426741897 + >>> res.pvalue + 0.64417725029295503 + """ + observed = np.asarray(observed) + if np.any(observed < 0): + raise ValueError("All values in `observed` must be nonnegative.") + if observed.size == 0: + raise ValueError("No data; `observed` has size 0.") + + expected = expected_freq(observed) + if np.any(expected == 0): + # Include one of the positions where expected is zero in + # the exception message. + zeropos = list(zip(*np.nonzero(expected == 0)))[0] + raise ValueError("The internally computed table of expected " + f"frequencies has a zero element at {zeropos}.") + + # The degrees of freedom + dof = expected.size - sum(expected.shape) + expected.ndim - 1 + + if dof == 0: + # Degenerate case; this occurs when `observed` is 1D (or, more + # generally, when it has only one nontrivial dimension). In this + # case, we also have observed == expected, so chi2 is 0. + chi2 = 0.0 + p = 1.0 + else: + if dof == 1 and correction: + # Adjust `observed` according to Yates' correction for continuity. + # Magnitude of correction no bigger than difference; see gh-13875 + diff = expected - observed + direction = np.sign(diff) + magnitude = np.minimum(0.5, np.abs(diff)) + observed = observed + magnitude * direction + + chi2, p = power_divergence(observed, expected, + ddof=observed.size - 1 - dof, axis=None, + lambda_=lambda_) + + return Chi2ContingencyResult(chi2, p, dof, expected) + + +def association(observed, method="cramer", correction=False, lambda_=None): + """Calculates degree of association between two nominal variables. + + The function provides the option for computing one of three measures of + association between two nominal variables from the data given in a 2d + contingency table: Tschuprow's T, Pearson's Contingency Coefficient + and Cramer's V. + + Parameters + ---------- + observed : array-like + The array of observed values + method : {"cramer", "tschuprow", "pearson"} (default = "cramer") + The association test statistic. + correction : bool, optional + Inherited from `scipy.stats.contingency.chi2_contingency()` + lambda_ : float or str, optional + Inherited from `scipy.stats.contingency.chi2_contingency()` + + Returns + ------- + statistic : float + Value of the test statistic + + Notes + ----- + Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all + measure the degree to which two nominal or ordinal variables are related, + or the level of their association. This differs from correlation, although + many often mistakenly consider them equivalent. Correlation measures in + what way two variables are related, whereas, association measures how + related the variables are. As such, association does not subsume + independent variables, and is rather a test of independence. A value of + 1.0 indicates perfect association, and 0.0 means the variables have no + association. + + Both the Cramer's V and Tschuprow's T are extensions of the phi + coefficient. Moreover, due to the close relationship between the + Cramer's V and Tschuprow's T the returned values can often be similar + or even equivalent. They are likely to diverge more as the array shape + diverges from a 2x2. + + References + ---------- + .. [1] "Tschuprow's T", + https://en.wikipedia.org/wiki/Tschuprow's_T + .. [2] Tschuprow, A. A. (1939) + Principles of the Mathematical Theory of Correlation; + translated by M. Kantorowitsch. W. Hodge & Co. + .. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V + .. [4] "Nominal Association: Phi and Cramer's V", + http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html + .. [5] Gingrich, Paul, "Association Between Variables", + http://uregina.ca/~gingrich/ch11a.pdf + + Examples + -------- + An example with a 4x2 contingency table: + + >>> import numpy as np + >>> from scipy.stats.contingency import association + >>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]]) + + Pearson's contingency coefficient + + >>> association(obs4x2, method="pearson") + 0.18303298140595667 + + Cramer's V + + >>> association(obs4x2, method="cramer") + 0.18617813077483678 + + Tschuprow's T + + >>> association(obs4x2, method="tschuprow") + 0.14146478765062995 + """ + arr = np.asarray(observed) + if not np.issubdtype(arr.dtype, np.integer): + raise ValueError("`observed` must be an integer array.") + + if len(arr.shape) != 2: + raise ValueError("method only accepts 2d arrays") + + chi2_stat = chi2_contingency(arr, correction=correction, + lambda_=lambda_) + + phi2 = chi2_stat.statistic / arr.sum() + n_rows, n_cols = arr.shape + if method == "cramer": + value = phi2 / min(n_cols - 1, n_rows - 1) + elif method == "tschuprow": + value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1)) + elif method == 'pearson': + value = phi2 / (1 + phi2) + else: + raise ValueError("Invalid argument value: 'method' argument must " + "be 'cramer', 'tschuprow', or 'pearson'") + + return math.sqrt(value) diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/distributions.py b/llava_next/lib/python3.10/site-packages/scipy/stats/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9c37aa98c9545b2616c8d32e8f676d8d49289e --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/distributions.py @@ -0,0 +1,24 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +# NOTE: To look at history using `git blame`, use `git blame -M -C -C` +# instead of `git blame -Lxxx,+x`. +# +from ._distn_infrastructure import (rv_discrete, rv_continuous, rv_frozen) # noqa: F401 + +from . import _continuous_distns +from . import _discrete_distns + +from ._continuous_distns import * # noqa: F403 +from ._levy_stable import levy_stable +from ._discrete_distns import * # noqa: F403 +from ._entropy import entropy + +# For backwards compatibility e.g. pymc expects distributions.__all__. +__all__ = ['rv_discrete', 'rv_continuous', 'rv_histogram', 'entropy'] # noqa: F405 + +# Add only the distribution names, not the *_gen names. +__all__ += _continuous_distns._distn_names +__all__ += ['levy_stable'] +__all__ += _discrete_distns._distn_names diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/mstats.py b/llava_next/lib/python3.10/site-packages/scipy/stats/mstats.py new file mode 100644 index 0000000000000000000000000000000000000000..88016af71803dc5c4ebadba168f22cdcd8273dbb --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/mstats.py @@ -0,0 +1,140 @@ +""" +=================================================================== +Statistical functions for masked arrays (:mod:`scipy.stats.mstats`) +=================================================================== + +.. currentmodule:: scipy.stats.mstats + +This module contains a large number of statistical functions that can +be used with masked arrays. + +Most of these functions are similar to those in `scipy.stats` but might +have small differences in the API or in the algorithm used. Since this +is a relatively new package, some API changes are still possible. + +Summary statistics +================== + +.. autosummary:: + :toctree: generated/ + + describe + gmean + hmean + kurtosis + mode + mquantiles + hdmedian + hdquantiles + hdquantiles_sd + idealfourths + plotting_positions + meppf + moment + skew + tmean + tvar + tmin + tmax + tsem + variation + find_repeats + sem + trimmed_mean + trimmed_mean_ci + trimmed_std + trimmed_var + +Frequency statistics +==================== + +.. autosummary:: + :toctree: generated/ + + scoreatpercentile + +Correlation functions +===================== + +.. autosummary:: + :toctree: generated/ + + f_oneway + pearsonr + spearmanr + pointbiserialr + kendalltau + kendalltau_seasonal + linregress + siegelslopes + theilslopes + sen_seasonal_slopes + +Statistical tests +================= + +.. autosummary:: + :toctree: generated/ + + ttest_1samp + ttest_onesamp + ttest_ind + ttest_rel + chisquare + kstest + ks_2samp + ks_1samp + ks_twosamp + mannwhitneyu + rankdata + kruskal + kruskalwallis + friedmanchisquare + brunnermunzel + skewtest + kurtosistest + normaltest + +Transformations +=============== + +.. autosummary:: + :toctree: generated/ + + obrientransform + trim + trima + trimmed_stde + trimr + trimtail + trimboth + winsorize + zmap + zscore + +Other +===== + +.. autosummary:: + :toctree: generated/ + + argstoarray + count_tied_groups + msign + compare_medians_ms + median_cihs + mjci + mquantiles_cimj + rsh + +""" +from . import _mstats_basic +from . import _mstats_extras +from ._mstats_basic import * # noqa: F403 +from ._mstats_extras import * # noqa: F403 +# Functions that support masked array input in stats but need to be kept in the +# mstats namespace for backwards compatibility: +from scipy.stats import gmean, hmean, zmap, zscore, chisquare + +__all__ = _mstats_basic.__all__ + _mstats_extras.__all__ +__all__ += ['gmean', 'hmean', 'zmap', 'zscore', 'chisquare'] diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/mstats_extras.py b/llava_next/lib/python3.10/site-packages/scipy/stats/mstats_extras.py new file mode 100644 index 0000000000000000000000000000000000000000..fec695329cf2c2d58a4918cc99e209c0650c3ea6 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/scipy/stats/mstats_extras.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'compare_medians_ms', + 'hdquantiles', 'hdmedian', 'hdquantiles_sd', + 'idealfourths', + 'median_cihs','mjci','mquantiles_cimj', + 'rsh', + 'trimmed_mean_ci', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="mstats_extras", + private_modules=["_mstats_extras"], all=__all__, + attribute=name, correct_module="mstats") diff --git a/parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/RECORD b/parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..d9e461d06ff155fb8d95cb950737445d3b836837 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/RECORD @@ -0,0 +1,99 @@ +bitsandbytes-0.41.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +bitsandbytes-0.41.0.dist-info/LICENSE,sha256=UkEte8fOQVfqYou6rLiCngqcs8WPV_mRdhJryM8r_IU,1086 +bitsandbytes-0.41.0.dist-info/METADATA,sha256=z88wKooZxLJ9Z5T3i4YEWBIzRKR9o3DZIes663fhUu4,9810 +bitsandbytes-0.41.0.dist-info/NOTICE.md,sha256=_4zDL2L8BqUwtmvoznR_wqhQmsP2QwdXHrAHnBMzAl8,265 +bitsandbytes-0.41.0.dist-info/RECORD,, +bitsandbytes-0.41.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bitsandbytes-0.41.0.dist-info/WHEEL,sha256=AtBG6SXL3KF_v0NxLf0ehyVOh0cold-JbJYXNGorC6Q,92 +bitsandbytes-0.41.0.dist-info/top_level.txt,sha256=bK-Zzu-JyIIh4njm8jTYcbuqX-Z80XTcDal4lXCG0-M,13 +bitsandbytes/__init__.py,sha256=mQQknbw8xSpKDtEJgVEiyCemE4HaB-FtAddxY2-Uyhc,670 +bitsandbytes/__main__.py,sha256=rWjs6LsifG_Vglj3WM4brY2IOCjwKpAjuBP3OIzYFPU,4014 +bitsandbytes/__pycache__/__init__.cpython-310.pyc,, +bitsandbytes/__pycache__/__main__.cpython-310.pyc,, +bitsandbytes/__pycache__/cextension.cpython-310.pyc,, +bitsandbytes/__pycache__/functional.cpython-310.pyc,, +bitsandbytes/__pycache__/utils.cpython-310.pyc,, +bitsandbytes/autograd/__init__.py,sha256=Ltb59FJrcWYVsTfGW6SscEZtiDhHZe7EFrYnIhnASug,67 +bitsandbytes/autograd/__pycache__/__init__.cpython-310.pyc,, +bitsandbytes/autograd/__pycache__/_functions.cpython-310.pyc,, +bitsandbytes/autograd/_functions.py,sha256=ER9xwzolX9T32Xu0VFbvpoRdDiCas1neEaKOZARI2Kw,22361 +bitsandbytes/cextension.py,sha256=klJwL-8ZPylUOETDTW-fvUbZ_Bt_rdB6wRDND1fB_wk,1635 +bitsandbytes/cuda_setup/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bitsandbytes/cuda_setup/__pycache__/__init__.cpython-310.pyc,, +bitsandbytes/cuda_setup/__pycache__/env_vars.cpython-310.pyc,, +bitsandbytes/cuda_setup/__pycache__/main.cpython-310.pyc,, +bitsandbytes/cuda_setup/env_vars.py,sha256=4T8i0LKAbE6tyDceGbJxdW1o4Nm4_vDLY6br39VwCxc,1614 +bitsandbytes/cuda_setup/main.py,sha256=o9YcJj87_t1yADdrMWY0c_XQRyX_8t3XGjwiERKtaVk,17946 +bitsandbytes/functional.py,sha256=vw-RE4CfEirCvM-O8rsiGKvAGIM5cKWNM0Ekbr8-xXc,79598 +bitsandbytes/libbitsandbytes_cpu.so,sha256=nejNfivapxN6MN_bJxFfR423YImIeqNVhXdts2BcDR8,41608 +bitsandbytes/libbitsandbytes_cuda110.so,sha256=1NM_-9xHfCz2djWods0YXQcDKITkX3KSJfklrUESkKw,5938904 +bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so,sha256=q_1Zn2FlCd6LaXYwjkDrE_rq0lFuNwDjGBJlWM_Nufg,11110784 +bitsandbytes/libbitsandbytes_cuda111.so,sha256=JBLZ6wBWB5x1DasFqxcog59xxks5XHzLAdQFGZjCiDY,8974040 +bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so,sha256=1qsndcAVNcCz-LcXytWYx81hPJgifIgNDw1MSx81ays,20244864 +bitsandbytes/libbitsandbytes_cuda114.so,sha256=kh0dVhz5EoSIcpFoRt9vB9rtMSYayFrT1uQmDAP_nCI,9313912 +bitsandbytes/libbitsandbytes_cuda114_nocublaslt.so,sha256=7BfmpKsEYpxamIB7a9WhjhXN7FC1o0FpyqO8IXu1Ep4,20973856 +bitsandbytes/libbitsandbytes_cuda115.so,sha256=ncH3CjlEB0fyXvvj9my_SkUyfGwj_FVo4D-adRX63Gs,9310152 +bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so,sha256=1vB8bV-E6pXTKZzOmfxFWiz3l7LrtQuSAh9n33oY1hM,20925040 +bitsandbytes/libbitsandbytes_cuda117.so,sha256=bEkYZLxEKQZvsu3Agy-aDcIC2ZqQ8B6JDBHL2n1Osq0,9117944 +bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so,sha256=jqc_QsosEBzjd7cNFNA-6QG5e1GGG1cLfEoh7d23zxA,20741032 +bitsandbytes/libbitsandbytes_cuda118.so,sha256=B2MQaG_5NLc8iVHawOSu3V-ABcpbos6QdpSLTQ0IDXY,14918184 +bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so,sha256=GaYqo8N7cNkxbAhI-dizyyBbuOqbEbNRR0nyh8LIWW4,26516696 +bitsandbytes/libbitsandbytes_cuda120.so,sha256=1olVGrA_Frm3ZzYaUxDKRyeWXbJlTTWhlPjO1a0il_o,14504296 +bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so,sha256=VUXyIHZb4V6-SOGPVPWVHyeKafG9xQPLEQIelTh69Oo,25709592 +bitsandbytes/libbitsandbytes_cuda121.so,sha256=XRKDct-9s0poQp0sNFSgdvrGUMed2lRror6aVBU3hGM,14512488 +bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so,sha256=YeYH36m5h2N7tULUoZ8Gt-CAfb8szLDPW5m9OLAQFAE,25721880 +bitsandbytes/libbitsandbytes_cuda122.so,sha256=FrhXhmfraDbGt5I6OzUI1igJ5OkUKWdKDDq5fPYMU0k,14561032 +bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so,sha256=WPSiBD_ozuUsk_aRdoJd5XVTcnpannmEmR6yok2mZTA,25803272 +bitsandbytes/nn/__init__.py,sha256=i-gJR2uQrRvn8zZCZcS1KC0SbsUqCKTta4aV7HXZTT4,446 +bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc,, +bitsandbytes/nn/__pycache__/modules.cpython-310.pyc,, +bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc,, +bitsandbytes/nn/modules.py,sha256=sIwAAAtMnk9s95HHTOC10rKERMvAl5gw03dCPL12oBY,20528 +bitsandbytes/nn/triton_based_modules.py,sha256=eMEldLd7GX0Dc3dzX0XZpfgzofBPRAi-z1NXf84wCPs,9843 +bitsandbytes/optim/__init__.py,sha256=TSl80yMFkwGBl8N0FBFcfBLt2vt4cZn-hbkuwHGuCUE,794 +bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc,, +bitsandbytes/optim/__pycache__/adagrad.cpython-310.pyc,, +bitsandbytes/optim/__pycache__/adam.cpython-310.pyc,, +bitsandbytes/optim/__pycache__/adamw.cpython-310.pyc,, +bitsandbytes/optim/__pycache__/lamb.cpython-310.pyc,, +bitsandbytes/optim/__pycache__/lars.cpython-310.pyc,, +bitsandbytes/optim/__pycache__/lion.cpython-310.pyc,, +bitsandbytes/optim/__pycache__/optimizer.cpython-310.pyc,, +bitsandbytes/optim/__pycache__/rmsprop.cpython-310.pyc,, +bitsandbytes/optim/__pycache__/sgd.cpython-310.pyc,, +bitsandbytes/optim/adagrad.py,sha256=E4KsNJKOB2VfgkyKEoeYwFFXnedsxHZItdfzwc5_cdE,3719 +bitsandbytes/optim/adam.py,sha256=nHHvXoeiAuosn4a9VWI3Z7_XmvYC6bOHb8en6mxiwkA,12776 +bitsandbytes/optim/adamw.py,sha256=byibv4xoBM7FUK8FScRTx2KbI4-2Mi0yB8WJCb2x3wE,2699 +bitsandbytes/optim/lamb.py,sha256=hfH4H9eVAHcbjL04DAI_lcPD1OPAmcY4_myow-o21aw,2313 +bitsandbytes/optim/lars.py,sha256=PeUB8RlfaRtHEa-ZZZkrKDdmkHa7XEEfU81irU-mKsY,5653 +bitsandbytes/optim/lion.py,sha256=jANwqVZSAxNZnoqi_OQ9XG8hKa6e84mkwJ9CchtpLHs,2304 +bitsandbytes/optim/optimizer.py,sha256=219zPzx9dpeY0VndzlXt6jn2yV9sEiSXkrxe26wXjIo,25167 +bitsandbytes/optim/rmsprop.py,sha256=1zGT9JIZh214fbBZ-CTirVKk1rQxSZe-BRJzhRtYL2U,2785 +bitsandbytes/optim/sgd.py,sha256=YHVUeEkwxgYx_0GhH0Et6fCpk7rfhboDR2F06jRWz4E,2340 +bitsandbytes/research/__init__.py,sha256=_MilJdwSRWObRfzzy14WD6HsJa6okT4d5YxH4aB9zg4,119 +bitsandbytes/research/__pycache__/__init__.cpython-310.pyc,, +bitsandbytes/research/autograd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc,, +bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc,, +bitsandbytes/research/autograd/_functions.py,sha256=k72rcf4hT3M5GOpGoijWkpTAqjRNoecGlOHmTTn3n80,15874 +bitsandbytes/research/nn/__init__.py,sha256=j5XA_2ZA6efMtcbuUCyegfCLkDDQuL3ix5xS4yKZayY,53 +bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc,, +bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc,, +bitsandbytes/research/nn/modules.py,sha256=EnI2qVTosAMkH4G1fQleA0zvm8dZR9G-GJ4pFDo8V9M,2357 +bitsandbytes/triton/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc,, +bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc,, +bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequanitze.cpython-310.pyc,, +bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc,, +bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc,, +bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc,, +bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc,, +bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc,, +bitsandbytes/triton/dequantize_rowwise.py,sha256=qdh3f4O53faM6SFT_aYvrytWF_FQW3q2bhBll6Uwfc4,2193 +bitsandbytes/triton/int8_matmul_mixed_dequanitze.py,sha256=QJ_hrZ94ZthnoPD0TCp5ZCPAMkxNNQQY-UNg50TWwHo,8256 +bitsandbytes/triton/int8_matmul_rowwise_dequantize.py,sha256=EMiY3nfx0LIvYEGUqtzcfUonQxwoDcppYli9Qd6kViw,8240 +bitsandbytes/triton/quantize_columnwise_and_transpose.py,sha256=K2fFegPtSsi2tgKxb5goO8YpUmQ6wgTvsXabgTRAFNI,2749 +bitsandbytes/triton/quantize_global.py,sha256=5in9Plx1Kgf6Nx5B1RBXCiJnb0G4qwraGADNiq1LtVc,3957 +bitsandbytes/triton/quantize_rowwise.py,sha256=sraX6TMubZQGiG9Gyh0UFzK823e_TkXZk9R1BILJdPU,2331 +bitsandbytes/triton/triton_utils.py,sha256=f7CP_3lvUoTQJ-xSp4wAfiU8uX_trtGdUsoLzlcsHQY,103 +bitsandbytes/utils.py,sha256=XASxdyR11sKKtY9DIwthe-zLU6v0vXwZzQvIVasjH7o,7499 diff --git a/parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/REQUESTED b/parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/top_level.txt b/parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..38cb1102777e6c9f61ee8e9ef4bfd8e37b59a368 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/top_level.txt @@ -0,0 +1 @@ +bitsandbytes diff --git a/parrot/lib/python3.10/site-packages/httpcore/__init__.py b/parrot/lib/python3.10/site-packages/httpcore/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..da95f8d0bb6bf7c91713dddc9615873d5bf268bc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/__init__.py @@ -0,0 +1,139 @@ +from ._api import request, stream +from ._async import ( + AsyncConnectionInterface, + AsyncConnectionPool, + AsyncHTTP2Connection, + AsyncHTTP11Connection, + AsyncHTTPConnection, + AsyncHTTPProxy, + AsyncSOCKSProxy, +) +from ._backends.base import ( + SOCKET_OPTION, + AsyncNetworkBackend, + AsyncNetworkStream, + NetworkBackend, + NetworkStream, +) +from ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream +from ._backends.sync import SyncBackend +from ._exceptions import ( + ConnectError, + ConnectionNotAvailable, + ConnectTimeout, + LocalProtocolError, + NetworkError, + PoolTimeout, + ProtocolError, + ProxyError, + ReadError, + ReadTimeout, + RemoteProtocolError, + TimeoutException, + UnsupportedProtocol, + WriteError, + WriteTimeout, +) +from ._models import URL, Origin, Request, Response +from ._ssl import default_ssl_context +from ._sync import ( + ConnectionInterface, + ConnectionPool, + HTTP2Connection, + HTTP11Connection, + HTTPConnection, + HTTPProxy, + SOCKSProxy, +) + +# The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed. +try: + from ._backends.anyio import AnyIOBackend +except ImportError: # pragma: nocover + + class AnyIOBackend: # type: ignore + def __init__(self, *args, **kwargs): # type: ignore + msg = ( + "Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed." + ) + raise RuntimeError(msg) + + +# The 'httpcore.TrioBackend' class is conditional on 'trio' being installed. +try: + from ._backends.trio import TrioBackend +except ImportError: # pragma: nocover + + class TrioBackend: # type: ignore + def __init__(self, *args, **kwargs): # type: ignore + msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed." + raise RuntimeError(msg) + + +__all__ = [ + # top-level requests + "request", + "stream", + # models + "Origin", + "URL", + "Request", + "Response", + # async + "AsyncHTTPConnection", + "AsyncConnectionPool", + "AsyncHTTPProxy", + "AsyncHTTP11Connection", + "AsyncHTTP2Connection", + "AsyncConnectionInterface", + "AsyncSOCKSProxy", + # sync + "HTTPConnection", + "ConnectionPool", + "HTTPProxy", + "HTTP11Connection", + "HTTP2Connection", + "ConnectionInterface", + "SOCKSProxy", + # network backends, implementations + "SyncBackend", + "AnyIOBackend", + "TrioBackend", + # network backends, mock implementations + "AsyncMockBackend", + "AsyncMockStream", + "MockBackend", + "MockStream", + # network backends, interface + "AsyncNetworkStream", + "AsyncNetworkBackend", + "NetworkStream", + "NetworkBackend", + # util + "default_ssl_context", + "SOCKET_OPTION", + # exceptions + "ConnectionNotAvailable", + "ProxyError", + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "UnsupportedProtocol", + "TimeoutException", + "PoolTimeout", + "ConnectTimeout", + "ReadTimeout", + "WriteTimeout", + "NetworkError", + "ConnectError", + "ReadError", + "WriteError", +] + +__version__ = "0.17.3" + + +__locals = locals() +for __name in __all__: + if not __name.startswith("__"): + setattr(__locals[__name], "__module__", "httpcore") # noqa diff --git a/parrot/lib/python3.10/site-packages/httpcore/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..642ac8440b5c0340c67757b8775c8770cf3f5187 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_api.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f938760e97b3bdbc4513863c1e8abc28ddece1da Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_api.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_models.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..909848d981926524d90bb7df617791fcce0e7879 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_models.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_ssl.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_ssl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..420ac57c91d47109756ea9ddaa0dea1813dff4f6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_ssl.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_synchronization.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_synchronization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f52474849101ed04b3a9a26e28b8e35baec0e50c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_synchronization.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_trace.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..281d80d94a4aee9c551ba248f9e0bdb6edb76e95 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_trace.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54161660dffa801b692bdd2968e4d23a33a8840f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/__pycache__/_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_api.py b/parrot/lib/python3.10/site-packages/httpcore/_api.py new file mode 100644 index 0000000000000000000000000000000000000000..854235f5f6035031f0960d4a4b8834081d5df389 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_api.py @@ -0,0 +1,92 @@ +from contextlib import contextmanager +from typing import Iterator, Optional, Union + +from ._models import URL, Extensions, HeaderTypes, Response +from ._sync.connection_pool import ConnectionPool + + +def request( + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: HeaderTypes = None, + content: Union[bytes, Iterator[bytes], None] = None, + extensions: Optional[Extensions] = None, +) -> Response: + """ + Sends an HTTP request, returning the response. + + ``` + response = httpcore.request("GET", "https://www.example.com/") + ``` + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + return pool.request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + + +@contextmanager +def stream( + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: HeaderTypes = None, + content: Union[bytes, Iterator[bytes], None] = None, + extensions: Optional[Extensions] = None, +) -> Iterator[Response]: + """ + Sends an HTTP request, returning the response within a content manager. + + ``` + with httpcore.stream("GET", "https://www.example.com/") as response: + ... + ``` + + When using the `stream()` function, the body of the response will not be + automatically read. If you want to access the response body you should + either use `content = response.read()`, or `for chunk in response.iter_content()`. + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + with pool.stream( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) as response: + yield response diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/__init__.py b/parrot/lib/python3.10/site-packages/httpcore/_async/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..88dc7f01e132933728cbcf45c88ce82e85ddf65f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_async/__init__.py @@ -0,0 +1,39 @@ +from .connection import AsyncHTTPConnection +from .connection_pool import AsyncConnectionPool +from .http11 import AsyncHTTP11Connection +from .http_proxy import AsyncHTTPProxy +from .interfaces import AsyncConnectionInterface + +try: + from .http2 import AsyncHTTP2Connection +except ImportError: # pragma: nocover + + class AsyncHTTP2Connection: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use http2 support, but the `h2` package is not " + "installed. Use 'pip install httpcore[http2]'." + ) + + +try: + from .socks_proxy import AsyncSOCKSProxy +except ImportError: # pragma: nocover + + class AsyncSOCKSProxy: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use SOCKS support, but the `socksio` package is not " + "installed. Use 'pip install httpcore[socks]'." + ) + + +__all__ = [ + "AsyncHTTPConnection", + "AsyncConnectionPool", + "AsyncHTTPProxy", + "AsyncHTTP11Connection", + "AsyncHTTP2Connection", + "AsyncConnectionInterface", + "AsyncSOCKSProxy", +] diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09c9348f18b8cf983ab7a689db1dec884b90e1e9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10cd320eea69e15fbf982e63e28ec34b6510f208 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de74212a648d6ebfd99825d5e116f606f111b9e4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http11.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http11.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8bdad8ce82c948160624fccd502d739ec7ebdc3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http11.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http2.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f135e2e5d8aff4919abdde4f4c3683234365cf87 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http2.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d523ff1289c25034e275419fecca4cdf0babccd1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/interfaces.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/interfaces.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a0ccf20da506ac4a18c799d8c6d0efc21e0e56a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/interfaces.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27afe1e3bd13f144e75a166f7c30c3650e123871 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/connection.py b/parrot/lib/python3.10/site-packages/httpcore/_async/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..9014ab957a2b03a9ca258ec693f15189c6d8cd77 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_async/connection.py @@ -0,0 +1,215 @@ +import itertools +import logging +import ssl +from types import TracebackType +from typing import Iterable, Iterator, Optional, Type + +from .._backends.auto import AutoBackend +from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream +from .._exceptions import ConnectError, ConnectionNotAvailable, ConnectTimeout +from .._models import Origin, Request, Response +from .._ssl import default_ssl_context +from .._synchronization import AsyncLock +from .._trace import Trace +from .http11 import AsyncHTTP11Connection +from .interfaces import AsyncConnectionInterface + +RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc. + + +logger = logging.getLogger("httpcore.connection") + + +def exponential_backoff(factor: float) -> Iterator[float]: + yield 0 + for n in itertools.count(2): + yield factor * (2 ** (n - 2)) + + +class AsyncHTTPConnection(AsyncConnectionInterface): + def __init__( + self, + origin: Origin, + ssl_context: Optional[ssl.SSLContext] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[AsyncNetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + self._origin = origin + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._network_backend: AsyncNetworkBackend = ( + AutoBackend() if network_backend is None else network_backend + ) + self._connection: Optional[AsyncConnectionInterface] = None + self._connect_failed: bool = False + self._request_lock = AsyncLock() + self._socket_options = socket_options + + async def handle_async_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection to {self._origin}" + ) + + async with self._request_lock: + if self._connection is None: + try: + stream = await self._connect(request) + + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import AsyncHTTP2Connection + + self._connection = AsyncHTTP2Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = AsyncHTTP11Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): + raise ConnectionNotAvailable() + + return await self._connection.handle_async_request(request) + + async def _connect(self, request: Request) -> AsyncNetworkStream: + timeouts = request.extensions.get("timeout", {}) + sni_hostname = request.extensions.get("sni_hostname", None) + timeout = timeouts.get("connect", None) + + retries_left = self._retries + delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) + + while True: + try: + if self._uds is None: + kwargs = { + "host": self._origin.host.decode("ascii"), + "port": self._origin.port, + "local_address": self._local_address, + "timeout": timeout, + "socket_options": self._socket_options, + } + async with Trace("connect_tcp", logger, request, kwargs) as trace: + stream = await self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + else: + kwargs = { + "path": self._uds, + "timeout": timeout, + "socket_options": self._socket_options, + } + async with Trace( + "connect_unix_socket", logger, request, kwargs + ) as trace: + stream = await self._network_backend.connect_unix_socket( + **kwargs + ) + trace.return_value = stream + + if self._origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": sni_hostname + or self._origin.host.decode("ascii"), + "timeout": timeout, + } + async with Trace("start_tls", logger, request, kwargs) as trace: + stream = await stream.start_tls(**kwargs) + trace.return_value = stream + return stream + except (ConnectError, ConnectTimeout): + if retries_left <= 0: + raise + retries_left -= 1 + delay = next(delays) + async with Trace("retry", logger, request, kwargs) as trace: + await self._network_backend.sleep(delay) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + async def aclose(self) -> None: + if self._connection is not None: + async with Trace("close", logger, None, {}): + await self._connection.aclose() + + def is_available(self) -> bool: + if self._connection is None: + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + async def __aenter__(self) -> "AsyncHTTPConnection": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + await self.aclose() diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/connection_pool.py b/parrot/lib/python3.10/site-packages/httpcore/_async/connection_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc0510e60e7b744b177394dba49f7541c81b803 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_async/connection_pool.py @@ -0,0 +1,356 @@ +import ssl +import sys +from types import TracebackType +from typing import AsyncIterable, AsyncIterator, Iterable, List, Optional, Type + +from .._backends.auto import AutoBackend +from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend +from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol +from .._models import Origin, Request, Response +from .._synchronization import AsyncEvent, AsyncLock, AsyncShieldCancellation +from .connection import AsyncHTTPConnection +from .interfaces import AsyncConnectionInterface, AsyncRequestInterface + + +class RequestStatus: + def __init__(self, request: Request): + self.request = request + self.connection: Optional[AsyncConnectionInterface] = None + self._connection_acquired = AsyncEvent() + + def set_connection(self, connection: AsyncConnectionInterface) -> None: + assert self.connection is None + self.connection = connection + self._connection_acquired.set() + + def unset_connection(self) -> None: + assert self.connection is not None + self.connection = None + self._connection_acquired = AsyncEvent() + + async def wait_for_connection( + self, timeout: Optional[float] = None + ) -> AsyncConnectionInterface: + if self.connection is None: + await self._connection_acquired.wait(timeout=timeout) + assert self.connection is not None + return self.connection + + +class AsyncConnectionPool(AsyncRequestInterface): + """ + A connection pool for making HTTP requests. + """ + + def __init__( + self, + ssl_context: Optional[ssl.SSLContext] = None, + max_connections: Optional[int] = 10, + max_keepalive_connections: Optional[int] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[AsyncNetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish a + connection. + local_address: Local address to connect from. Can also be used to connect + using a particular address family. Using `local_address="0.0.0.0"` + will connect using an `AF_INET` address (IPv4), while using + `local_address="::"` will connect using an `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + socket_options: Socket options that have to be included + in the TCP socket when the connection was established. + """ + self._ssl_context = ssl_context + + self._max_connections = ( + sys.maxsize if max_connections is None else max_connections + ) + self._max_keepalive_connections = ( + sys.maxsize + if max_keepalive_connections is None + else max_keepalive_connections + ) + self._max_keepalive_connections = min( + self._max_connections, self._max_keepalive_connections + ) + + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._pool: List[AsyncConnectionInterface] = [] + self._requests: List[RequestStatus] = [] + self._pool_lock = AsyncLock() + self._network_backend = ( + AutoBackend() if network_backend is None else network_backend + ) + self._socket_options = socket_options + + def create_connection(self, origin: Origin) -> AsyncConnectionInterface: + return AsyncHTTPConnection( + origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + retries=self._retries, + local_address=self._local_address, + uds=self._uds, + network_backend=self._network_backend, + socket_options=self._socket_options, + ) + + @property + def connections(self) -> List[AsyncConnectionInterface]: + """ + Return a list of the connections currently in the pool. + + For example: + + ```python + >>> pool.connections + [ + , + , + , + ] + ``` + """ + return list(self._pool) + + async def _attempt_to_acquire_connection(self, status: RequestStatus) -> bool: + """ + Attempt to provide a connection that can handle the given origin. + """ + origin = status.request.url.origin + + # If there are queued requests in front of us, then don't acquire a + # connection. We handle requests strictly in order. + waiting = [s for s in self._requests if s.connection is None] + if waiting and waiting[0] is not status: + return False + + # Reuse an existing connection if one is currently available. + for idx, connection in enumerate(self._pool): + if connection.can_handle_request(origin) and connection.is_available(): + self._pool.pop(idx) + self._pool.insert(0, connection) + status.set_connection(connection) + return True + + # If the pool is currently full, attempt to close one idle connection. + if len(self._pool) >= self._max_connections: + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.is_idle(): + await connection.aclose() + self._pool.pop(idx) + break + + # If the pool is still full, then we cannot acquire a connection. + if len(self._pool) >= self._max_connections: + return False + + # Otherwise create a new connection. + connection = self.create_connection(origin) + self._pool.insert(0, connection) + status.set_connection(connection) + return True + + async def _close_expired_connections(self) -> None: + """ + Clean up the connection pool by closing off any connections that have expired. + """ + # Close any connections that have expired their keep-alive time. + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.has_expired(): + await connection.aclose() + self._pool.pop(idx) + + # If the pool size exceeds the maximum number of allowed keep-alive connections, + # then close off idle connections as required. + pool_size = len(self._pool) + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.is_idle() and pool_size > self._max_keepalive_connections: + await connection.aclose() + self._pool.pop(idx) + pool_size -= 1 + + async def handle_async_request(self, request: Request) -> Response: + """ + Send an HTTP request, and return an HTTP response. + + This is the core implementation that is called into by `.request()` or `.stream()`. + """ + scheme = request.url.scheme.decode() + if scheme == "": + raise UnsupportedProtocol( + "Request URL is missing an 'http://' or 'https://' protocol." + ) + if scheme not in ("http", "https", "ws", "wss"): + raise UnsupportedProtocol( + f"Request URL has an unsupported protocol '{scheme}://'." + ) + + status = RequestStatus(request) + + async with self._pool_lock: + self._requests.append(status) + await self._close_expired_connections() + await self._attempt_to_acquire_connection(status) + + while True: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("pool", None) + try: + connection = await status.wait_for_connection(timeout=timeout) + except BaseException as exc: + # If we timeout here, or if the task is cancelled, then make + # sure to remove the request from the queue before bubbling + # up the exception. + async with self._pool_lock: + # Ensure only remove when task exists. + if status in self._requests: + self._requests.remove(status) + raise exc + + try: + response = await connection.handle_async_request(request) + except ConnectionNotAvailable: + # The ConnectionNotAvailable exception is a special case, that + # indicates we need to retry the request on a new connection. + # + # The most common case where this can occur is when multiple + # requests are queued waiting for a single connection, which + # might end up as an HTTP/2 connection, but which actually ends + # up as HTTP/1.1. + async with self._pool_lock: + # Maintain our position in the request queue, but reset the + # status so that the request becomes queued again. + status.unset_connection() + await self._attempt_to_acquire_connection(status) + except BaseException as exc: + with AsyncShieldCancellation(): + await self.response_closed(status) + raise exc + else: + break + + # When we return the response, we wrap the stream in a special class + # that handles notifying the connection pool once the response + # has been released. + assert isinstance(response.stream, AsyncIterable) + return Response( + status=response.status, + headers=response.headers, + content=ConnectionPoolByteStream(response.stream, self, status), + extensions=response.extensions, + ) + + async def response_closed(self, status: RequestStatus) -> None: + """ + This method acts as a callback once the request/response cycle is complete. + + It is called into from the `ConnectionPoolByteStream.aclose()` method. + """ + assert status.connection is not None + connection = status.connection + + async with self._pool_lock: + # Update the state of the connection pool. + if status in self._requests: + self._requests.remove(status) + + if connection.is_closed() and connection in self._pool: + self._pool.remove(connection) + + # Since we've had a response closed, it's possible we'll now be able + # to service one or more requests that are currently pending. + for status in self._requests: + if status.connection is None: + acquired = await self._attempt_to_acquire_connection(status) + # If we could not acquire a connection for a queued request + # then we don't need to check anymore requests that are + # queued later behind it. + if not acquired: + break + + # Housekeeping. + await self._close_expired_connections() + + async def aclose(self) -> None: + """ + Close any connections in the pool. + """ + async with self._pool_lock: + for connection in self._pool: + await connection.aclose() + self._pool = [] + self._requests = [] + + async def __aenter__(self) -> "AsyncConnectionPool": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + await self.aclose() + + +class ConnectionPoolByteStream: + """ + A wrapper around the response byte stream, that additionally handles + notifying the connection pool when the response has been closed. + """ + + def __init__( + self, + stream: AsyncIterable[bytes], + pool: AsyncConnectionPool, + status: RequestStatus, + ) -> None: + self._stream = stream + self._pool = pool + self._status = status + + async def __aiter__(self) -> AsyncIterator[bytes]: + async for part in self._stream: + yield part + + async def aclose(self) -> None: + try: + if hasattr(self._stream, "aclose"): + await self._stream.aclose() + finally: + with AsyncShieldCancellation(): + await self._pool.response_closed(self._status) diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/http11.py b/parrot/lib/python3.10/site-packages/httpcore/_async/http11.py new file mode 100644 index 0000000000000000000000000000000000000000..7ad3664205b7371ec63013953e8ec9fafad0cf60 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_async/http11.py @@ -0,0 +1,331 @@ +import enum +import logging +import time +from types import TracebackType +from typing import ( + AsyncIterable, + AsyncIterator, + List, + Optional, + Tuple, + Type, + Union, + cast, +) + +import h11 + +from .._backends.base import AsyncNetworkStream +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, + map_exceptions, +) +from .._models import Origin, Request, Response +from .._synchronization import AsyncLock, AsyncShieldCancellation +from .._trace import Trace +from .interfaces import AsyncConnectionInterface + +logger = logging.getLogger("httpcore.http11") + + +# A subset of `h11.Event` types supported by `_send_event` +H11SendEvent = Union[ + h11.Request, + h11.Data, + h11.EndOfMessage, +] + + +class HTTPConnectionState(enum.IntEnum): + NEW = 0 + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class AsyncHTTP11Connection(AsyncConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024 + + def __init__( + self, + origin: Origin, + stream: AsyncNetworkStream, + keepalive_expiry: Optional[float] = None, + ) -> None: + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: Optional[float] = keepalive_expiry + self._expire_at: Optional[float] = None + self._state = HTTPConnectionState.NEW + self._state_lock = AsyncLock() + self._request_count = 0 + self._h11_state = h11.Connection( + our_role=h11.CLIENT, + max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE, + ) + + async def handle_async_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + async with self._state_lock: + if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE): + self._request_count += 1 + self._state = HTTPConnectionState.ACTIVE + self._expire_at = None + else: + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request} + async with Trace("send_request_headers", logger, request, kwargs) as trace: + await self._send_request_headers(**kwargs) + async with Trace("send_request_body", logger, request, kwargs) as trace: + await self._send_request_body(**kwargs) + async with Trace( + "receive_response_headers", logger, request, kwargs + ) as trace: + ( + http_version, + status, + reason_phrase, + headers, + ) = await self._receive_response_headers(**kwargs) + trace.return_value = ( + http_version, + status, + reason_phrase, + headers, + ) + + return Response( + status=status, + headers=headers, + content=HTTP11ConnectionByteStream(self, request), + extensions={ + "http_version": http_version, + "reason_phrase": reason_phrase, + "network_stream": self._network_stream, + }, + ) + except BaseException as exc: + with AsyncShieldCancellation(): + async with Trace("response_closed", logger, request) as trace: + await self._response_closed() + raise exc + + # Sending the request... + + async def _send_request_headers(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): + event = h11.Request( + method=request.method, + target=request.url.target, + headers=request.headers, + ) + await self._send_event(event, timeout=timeout) + + async def _send_request_body(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + assert isinstance(request.stream, AsyncIterable) + async for chunk in request.stream: + event = h11.Data(data=chunk) + await self._send_event(event, timeout=timeout) + + await self._send_event(h11.EndOfMessage(), timeout=timeout) + + async def _send_event( + self, event: h11.Event, timeout: Optional[float] = None + ) -> None: + bytes_to_send = self._h11_state.send(event) + if bytes_to_send is not None: + await self._network_stream.write(bytes_to_send, timeout=timeout) + + # Receiving the response... + + async def _receive_response_headers( + self, request: Request + ) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = await self._receive_event(timeout=timeout) + if isinstance(event, h11.Response): + break + if ( + isinstance(event, h11.InformationalResponse) + and event.status_code == 101 + ): + break + + http_version = b"HTTP/" + event.http_version + + # h11 version 0.11+ supports a `raw_items` interface to get the + # raw header casing, rather than the enforced lowercase headers. + headers = event.headers.raw_items() + + return http_version, event.status_code, event.reason, headers + + async def _receive_response_body(self, request: Request) -> AsyncIterator[bytes]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = await self._receive_event(timeout=timeout) + if isinstance(event, h11.Data): + yield bytes(event.data) + elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)): + break + + async def _receive_event( + self, timeout: Optional[float] = None + ) -> Union[h11.Event, Type[h11.PAUSED]]: + while True: + with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}): + event = self._h11_state.next_event() + + if event is h11.NEED_DATA: + data = await self._network_stream.read( + self.READ_NUM_BYTES, timeout=timeout + ) + + # If we feed this case through h11 we'll raise an exception like: + # + # httpcore.RemoteProtocolError: can't handle event type + # ConnectionClosed when role=SERVER and state=SEND_RESPONSE + # + # Which is accurate, but not very informative from an end-user + # perspective. Instead we handle this case distinctly and treat + # it as a ConnectError. + if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE: + msg = "Server disconnected without sending a response." + raise RemoteProtocolError(msg) + + self._h11_state.receive_data(data) + else: + # mypy fails to narrow the type in the above if statement above + return cast(Union[h11.Event, Type[h11.PAUSED]], event) + + async def _response_closed(self) -> None: + async with self._state_lock: + if ( + self._h11_state.our_state is h11.DONE + and self._h11_state.their_state is h11.DONE + ): + self._state = HTTPConnectionState.IDLE + self._h11_state.start_next_cycle() + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + else: + await self.aclose() + + # Once the connection is no longer required... + + async def aclose(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._state = HTTPConnectionState.CLOSED + await self._network_stream.aclose() + + # The AsyncConnectionInterface methods provide information about the state of + # the connection, allowing for a connection pooling implementation to + # determine when to reuse and when to close the connection... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + # Note that HTTP/1.1 connections in the "NEW" state are not treated as + # being "available". The control flow which created the connection will + # be able to send an outgoing request, but the connection will not be + # acquired from the connection pool for any other request. + return self._state == HTTPConnectionState.IDLE + + def has_expired(self) -> bool: + now = time.monotonic() + keepalive_expired = self._expire_at is not None and now > self._expire_at + + # If the HTTP connection is idle but the socket is readable, then the + # only valid state is that the socket is about to return b"", indicating + # a server-initiated disconnect. + server_disconnected = ( + self._state == HTTPConnectionState.IDLE + and self._network_stream.get_extra_info("is_readable") + ) + + return keepalive_expired or server_disconnected + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/1.1, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + async def __aenter__(self) -> "AsyncHTTP11Connection": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + await self.aclose() + + +class HTTP11ConnectionByteStream: + def __init__(self, connection: AsyncHTTP11Connection, request: Request) -> None: + self._connection = connection + self._request = request + self._closed = False + + async def __aiter__(self) -> AsyncIterator[bytes]: + kwargs = {"request": self._request} + try: + async with Trace("receive_response_body", logger, self._request, kwargs): + async for chunk in self._connection._receive_response_body(**kwargs): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + with AsyncShieldCancellation(): + await self.aclose() + raise exc + + async def aclose(self) -> None: + if not self._closed: + self._closed = True + async with Trace("response_closed", logger, self._request): + await self._connection._response_closed() diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/http2.py b/parrot/lib/python3.10/site-packages/httpcore/_async/http2.py new file mode 100644 index 0000000000000000000000000000000000000000..8dc776ffa004e063cc5958621dcf188359f0d47b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_async/http2.py @@ -0,0 +1,589 @@ +import enum +import logging +import time +import types +import typing + +import h2.config +import h2.connection +import h2.events +import h2.exceptions +import h2.settings + +from .._backends.base import AsyncNetworkStream +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, +) +from .._models import Origin, Request, Response +from .._synchronization import AsyncLock, AsyncSemaphore, AsyncShieldCancellation +from .._trace import Trace +from .interfaces import AsyncConnectionInterface + +logger = logging.getLogger("httpcore.http2") + + +def has_body_headers(request: Request) -> bool: + return any( + k.lower() == b"content-length" or k.lower() == b"transfer-encoding" + for k, v in request.headers + ) + + +class HTTPConnectionState(enum.IntEnum): + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class AsyncHTTP2Connection(AsyncConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + CONFIG = h2.config.H2Configuration(validate_inbound_headers=False) + + def __init__( + self, + origin: Origin, + stream: AsyncNetworkStream, + keepalive_expiry: typing.Optional[float] = None, + ): + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: typing.Optional[float] = keepalive_expiry + self._h2_state = h2.connection.H2Connection(config=self.CONFIG) + self._state = HTTPConnectionState.IDLE + self._expire_at: typing.Optional[float] = None + self._request_count = 0 + self._init_lock = AsyncLock() + self._state_lock = AsyncLock() + self._read_lock = AsyncLock() + self._write_lock = AsyncLock() + self._sent_connection_init = False + self._used_all_stream_ids = False + self._connection_error = False + + # Mapping from stream ID to response stream events. + self._events: typing.Dict[ + int, + typing.Union[ + h2.events.ResponseReceived, + h2.events.DataReceived, + h2.events.StreamEnded, + h2.events.StreamReset, + ], + ] = {} + + # Connection terminated events are stored as state since + # we need to handle them for all streams. + self._connection_terminated: typing.Optional[ + h2.events.ConnectionTerminated + ] = None + + self._read_exception: typing.Optional[Exception] = None + self._write_exception: typing.Optional[Exception] = None + + async def handle_async_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + # This cannot occur in normal operation, since the connection pool + # will only send requests on connections that handle them. + # It's in place simply for resilience as a guard against incorrect + # usage, for anyone working directly with httpcore connections. + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + async with self._state_lock: + if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE): + self._request_count += 1 + self._expire_at = None + self._state = HTTPConnectionState.ACTIVE + else: + raise ConnectionNotAvailable() + + async with self._init_lock: + if not self._sent_connection_init: + try: + kwargs = {"request": request} + async with Trace("send_connection_init", logger, request, kwargs): + await self._send_connection_init(**kwargs) + except BaseException as exc: + with AsyncShieldCancellation(): + await self.aclose() + raise exc + + self._sent_connection_init = True + + # Initially start with just 1 until the remote server provides + # its max_concurrent_streams value + self._max_streams = 1 + + local_settings_max_streams = ( + self._h2_state.local_settings.max_concurrent_streams + ) + self._max_streams_semaphore = AsyncSemaphore(local_settings_max_streams) + + for _ in range(local_settings_max_streams - self._max_streams): + await self._max_streams_semaphore.acquire() + + await self._max_streams_semaphore.acquire() + + try: + stream_id = self._h2_state.get_next_available_stream_id() + self._events[stream_id] = [] + except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover + self._used_all_stream_ids = True + self._request_count -= 1 + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request, "stream_id": stream_id} + async with Trace("send_request_headers", logger, request, kwargs): + await self._send_request_headers(request=request, stream_id=stream_id) + async with Trace("send_request_body", logger, request, kwargs): + await self._send_request_body(request=request, stream_id=stream_id) + async with Trace( + "receive_response_headers", logger, request, kwargs + ) as trace: + status, headers = await self._receive_response( + request=request, stream_id=stream_id + ) + trace.return_value = (status, headers) + + return Response( + status=status, + headers=headers, + content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id), + extensions={ + "http_version": b"HTTP/2", + "network_stream": self._network_stream, + "stream_id": stream_id, + }, + ) + except BaseException as exc: # noqa: PIE786 + with AsyncShieldCancellation(): + kwargs = {"stream_id": stream_id} + async with Trace("response_closed", logger, request, kwargs): + await self._response_closed(stream_id=stream_id) + + if isinstance(exc, h2.exceptions.ProtocolError): + # One case where h2 can raise a protocol error is when a + # closed frame has been seen by the state machine. + # + # This happens when one stream is reading, and encounters + # a GOAWAY event. Other flows of control may then raise + # a protocol error at any point they interact with the 'h2_state'. + # + # In this case we'll have stored the event, and should raise + # it as a RemoteProtocolError. + if self._connection_terminated: # pragma: nocover + raise RemoteProtocolError(self._connection_terminated) + # If h2 raises a protocol error in some other state then we + # must somehow have made a protocol violation. + raise LocalProtocolError(exc) # pragma: nocover + + raise exc + + async def _send_connection_init(self, request: Request) -> None: + """ + The HTTP/2 connection requires some initial setup before we can start + using individual request/response streams on it. + """ + # Need to set these manually here instead of manipulating via + # __setitem__() otherwise the H2Connection will emit SettingsUpdate + # frames in addition to sending the undesired defaults. + self._h2_state.local_settings = h2.settings.Settings( + client=True, + initial_values={ + # Disable PUSH_PROMISE frames from the server since we don't do anything + # with them for now. Maybe when we support caching? + h2.settings.SettingCodes.ENABLE_PUSH: 0, + # These two are taken from h2 for safe defaults + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100, + h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536, + }, + ) + + # Some websites (*cough* Yahoo *cough*) balk at this setting being + # present in the initial handshake since it's not defined in the original + # RFC despite the RFC mandating ignoring settings you don't know about. + del self._h2_state.local_settings[ + h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL + ] + + self._h2_state.initiate_connection() + self._h2_state.increment_flow_control_window(2**24) + await self._write_outgoing_data(request) + + # Sending the request... + + async def _send_request_headers(self, request: Request, stream_id: int) -> None: + """ + Send the request headers to a given stream ID. + """ + end_stream = not has_body_headers(request) + + # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'. + # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require + # HTTP/1.1 style headers, and map them appropriately if we end up on + # an HTTP/2 connection. + authority = [v for k, v in request.headers if k.lower() == b"host"][0] + + headers = [ + (b":method", request.method), + (b":authority", authority), + (b":scheme", request.url.scheme), + (b":path", request.url.target), + ] + [ + (k.lower(), v) + for k, v in request.headers + if k.lower() + not in ( + b"host", + b"transfer-encoding", + ) + ] + + self._h2_state.send_headers(stream_id, headers, end_stream=end_stream) + self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id) + await self._write_outgoing_data(request) + + async def _send_request_body(self, request: Request, stream_id: int) -> None: + """ + Iterate over the request body sending it to a given stream ID. + """ + if not has_body_headers(request): + return + + assert isinstance(request.stream, typing.AsyncIterable) + async for data in request.stream: + await self._send_stream_data(request, stream_id, data) + await self._send_end_stream(request, stream_id) + + async def _send_stream_data( + self, request: Request, stream_id: int, data: bytes + ) -> None: + """ + Send a single chunk of data in one or more data frames. + """ + while data: + max_flow = await self._wait_for_outgoing_flow(request, stream_id) + chunk_size = min(len(data), max_flow) + chunk, data = data[:chunk_size], data[chunk_size:] + self._h2_state.send_data(stream_id, chunk) + await self._write_outgoing_data(request) + + async def _send_end_stream(self, request: Request, stream_id: int) -> None: + """ + Send an empty data frame on on a given stream ID with the END_STREAM flag set. + """ + self._h2_state.end_stream(stream_id) + await self._write_outgoing_data(request) + + # Receiving the response... + + async def _receive_response( + self, request: Request, stream_id: int + ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]: + """ + Return the response status code and headers for a given stream ID. + """ + while True: + event = await self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.ResponseReceived): + break + + status_code = 200 + headers = [] + for k, v in event.headers: + if k == b":status": + status_code = int(v.decode("ascii", errors="ignore")) + elif not k.startswith(b":"): + headers.append((k, v)) + + return (status_code, headers) + + async def _receive_response_body( + self, request: Request, stream_id: int + ) -> typing.AsyncIterator[bytes]: + """ + Iterator that returns the bytes of the response body for a given stream ID. + """ + while True: + event = await self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.DataReceived): + amount = event.flow_controlled_length + self._h2_state.acknowledge_received_data(amount, stream_id) + await self._write_outgoing_data(request) + yield event.data + elif isinstance(event, h2.events.StreamEnded): + break + + async def _receive_stream_event( + self, request: Request, stream_id: int + ) -> typing.Union[ + h2.events.ResponseReceived, h2.events.DataReceived, h2.events.StreamEnded + ]: + """ + Return the next available event for a given stream ID. + + Will read more data from the network if required. + """ + while not self._events.get(stream_id): + await self._receive_events(request, stream_id) + event = self._events[stream_id].pop(0) + if isinstance(event, h2.events.StreamReset): + raise RemoteProtocolError(event) + return event + + async def _receive_events( + self, request: Request, stream_id: typing.Optional[int] = None + ) -> None: + """ + Read some data from the network until we see one or more events + for a given stream ID. + """ + async with self._read_lock: + if self._connection_terminated is not None: + last_stream_id = self._connection_terminated.last_stream_id + if stream_id and last_stream_id and stream_id > last_stream_id: + self._request_count -= 1 + raise ConnectionNotAvailable() + raise RemoteProtocolError(self._connection_terminated) + + # This conditional is a bit icky. We don't want to block reading if we've + # actually got an event to return for a given stream. We need to do that + # check *within* the atomic read lock. Though it also need to be optional, + # because when we call it from `_wait_for_outgoing_flow` we *do* want to + # block until we've available flow control, event when we have events + # pending for the stream ID we're attempting to send on. + if stream_id is None or not self._events.get(stream_id): + events = await self._read_incoming_data(request) + for event in events: + if isinstance(event, h2.events.RemoteSettingsChanged): + async with Trace( + "receive_remote_settings", logger, request + ) as trace: + await self._receive_remote_settings_change(event) + trace.return_value = event + + elif isinstance( + event, + ( + h2.events.ResponseReceived, + h2.events.DataReceived, + h2.events.StreamEnded, + h2.events.StreamReset, + ), + ): + if event.stream_id in self._events: + self._events[event.stream_id].append(event) + + elif isinstance(event, h2.events.ConnectionTerminated): + self._connection_terminated = event + + await self._write_outgoing_data(request) + + async def _receive_remote_settings_change(self, event: h2.events.Event) -> None: + max_concurrent_streams = event.changed_settings.get( + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS + ) + if max_concurrent_streams: + new_max_streams = min( + max_concurrent_streams.new_value, + self._h2_state.local_settings.max_concurrent_streams, + ) + if new_max_streams and new_max_streams != self._max_streams: + while new_max_streams > self._max_streams: + await self._max_streams_semaphore.release() + self._max_streams += 1 + while new_max_streams < self._max_streams: + await self._max_streams_semaphore.acquire() + self._max_streams -= 1 + + async def _response_closed(self, stream_id: int) -> None: + await self._max_streams_semaphore.release() + del self._events[stream_id] + async with self._state_lock: + if self._connection_terminated and not self._events: + await self.aclose() + + elif self._state == HTTPConnectionState.ACTIVE and not self._events: + self._state = HTTPConnectionState.IDLE + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + if self._used_all_stream_ids: # pragma: nocover + await self.aclose() + + async def aclose(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._h2_state.close_connection() + self._state = HTTPConnectionState.CLOSED + await self._network_stream.aclose() + + # Wrappers around network read/write operations... + + async def _read_incoming_data( + self, request: Request + ) -> typing.List[h2.events.Event]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + if self._read_exception is not None: + raise self._read_exception # pragma: nocover + + try: + data = await self._network_stream.read(self.READ_NUM_BYTES, timeout) + if data == b"": + raise RemoteProtocolError("Server disconnected") + except Exception as exc: + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future reads. + # (For example, this means that a single read timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._read_exception = exc + self._connection_error = True + raise exc + + events: typing.List[h2.events.Event] = self._h2_state.receive_data(data) + + return events + + async def _write_outgoing_data(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + async with self._write_lock: + data_to_send = self._h2_state.data_to_send() + + if self._write_exception is not None: + raise self._write_exception # pragma: nocover + + try: + await self._network_stream.write(data_to_send, timeout) + except Exception as exc: # pragma: nocover + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future write. + # (For example, this means that a single write timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._write_exception = exc + self._connection_error = True + raise exc + + # Flow control... + + async def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int: + """ + Returns the maximum allowable outgoing flow for a given stream. + + If the allowable flow is zero, then waits on the network until + WindowUpdated frames have increased the flow rate. + https://tools.ietf.org/html/rfc7540#section-6.9 + """ + local_flow: int = self._h2_state.local_flow_control_window(stream_id) + max_frame_size: int = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + while flow == 0: + await self._receive_events(request) + local_flow = self._h2_state.local_flow_control_window(stream_id) + max_frame_size = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + return flow + + # Interface for connection pooling... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + return ( + self._state != HTTPConnectionState.CLOSED + and not self._connection_error + and not self._used_all_stream_ids + and not ( + self._h2_state.state_machine.state + == h2.connection.ConnectionState.CLOSED + ) + ) + + def has_expired(self) -> bool: + now = time.monotonic() + return self._expire_at is not None and now > self._expire_at + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/2, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + async def __aenter__(self) -> "AsyncHTTP2Connection": + return self + + async def __aexit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[types.TracebackType] = None, + ) -> None: + await self.aclose() + + +class HTTP2ConnectionByteStream: + def __init__( + self, connection: AsyncHTTP2Connection, request: Request, stream_id: int + ) -> None: + self._connection = connection + self._request = request + self._stream_id = stream_id + self._closed = False + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + kwargs = {"request": self._request, "stream_id": self._stream_id} + try: + async with Trace("receive_response_body", logger, self._request, kwargs): + async for chunk in self._connection._receive_response_body( + request=self._request, stream_id=self._stream_id + ): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + with AsyncShieldCancellation(): + await self.aclose() + raise exc + + async def aclose(self) -> None: + if not self._closed: + self._closed = True + kwargs = {"stream_id": self._stream_id} + async with Trace("response_closed", logger, self._request, kwargs): + await self._connection._response_closed(stream_id=self._stream_id) diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/http_proxy.py b/parrot/lib/python3.10/site-packages/httpcore/_async/http_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..62f510978f990209d334fd473ee22c1741052049 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_async/http_proxy.py @@ -0,0 +1,350 @@ +import logging +import ssl +from base64 import b64encode +from typing import Iterable, List, Mapping, Optional, Sequence, Tuple, Union + +from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend +from .._exceptions import ProxyError +from .._models import ( + URL, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, +) +from .._ssl import default_ssl_context +from .._synchronization import AsyncLock +from .._trace import Trace +from .connection import AsyncHTTPConnection +from .connection_pool import AsyncConnectionPool +from .http11 import AsyncHTTP11Connection +from .interfaces import AsyncConnectionInterface + +HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]] +HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]] + + +logger = logging.getLogger("httpcore.proxy") + + +def merge_headers( + default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, +) -> List[Tuple[bytes, bytes]]: + """ + Append default_headers and override_headers, de-duplicating if a key exists + in both cases. + """ + default_headers = [] if default_headers is None else list(default_headers) + override_headers = [] if override_headers is None else list(override_headers) + has_override = set(key.lower() for key, value in override_headers) + default_headers = [ + (key, value) + for key, value in default_headers + if key.lower() not in has_override + ] + return default_headers + override_headers + + +def build_auth_header(username: bytes, password: bytes) -> bytes: + userpass = username + b":" + password + return b"Basic " + b64encode(userpass) + + +class AsyncHTTPProxy(AsyncConnectionPool): + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: Union[URL, bytes, str], + proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + ssl_context: Optional[ssl.SSLContext] = None, + max_connections: Optional[int] = 10, + max_keepalive_connections: Optional[int] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[AsyncNetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + proxy_auth: Any proxy authentication as a two-tuple of + (username, password). May be either bytes or ascii-only str. + proxy_headers: Any HTTP headers to use for the proxy requests. + For example `{"Proxy-Authorization": "Basic :"}`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + local_address=local_address, + uds=uds, + socket_options=socket_options, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + if proxy_auth is not None: + username = enforce_bytes(proxy_auth[0], name="proxy_auth") + password = enforce_bytes(proxy_auth[1], name="proxy_auth") + authorization = build_auth_header(username, password) + self._proxy_headers = [ + (b"Proxy-Authorization", authorization) + ] + self._proxy_headers + + def create_connection(self, origin: Origin) -> AsyncConnectionInterface: + if origin.scheme == b"http": + return AsyncForwardHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + keepalive_expiry=self._keepalive_expiry, + network_backend=self._network_backend, + ) + return AsyncTunnelHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class AsyncForwardHTTPConnection(AsyncConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + keepalive_expiry: Optional[float] = None, + network_backend: Optional[AsyncNetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + self._connection = AsyncHTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ) + self._proxy_origin = proxy_origin + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._remote_origin = remote_origin + + async def handle_async_request(self, request: Request) -> Response: + headers = merge_headers(self._proxy_headers, request.headers) + url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=bytes(request.url), + ) + proxy_request = Request( + method=request.method, + url=url, + headers=headers, + content=request.stream, + extensions=request.extensions, + ) + return await self._connection.handle_async_request(proxy_request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + async def aclose(self) -> None: + await self._connection.aclose() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + +class AsyncTunnelHTTPConnection(AsyncConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + ssl_context: Optional[ssl.SSLContext] = None, + proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: Optional[AsyncNetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + self._connection: AsyncConnectionInterface = AsyncHTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ) + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._ssl_context = ssl_context + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._connect_lock = AsyncLock() + self._connected = False + + async def handle_async_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + async with self._connect_lock: + if not self._connected: + target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port) + + connect_url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=target, + ) + connect_headers = merge_headers( + [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers + ) + connect_request = Request( + method=b"CONNECT", + url=connect_url, + headers=connect_headers, + extensions=request.extensions, + ) + connect_response = await self._connection.handle_async_request( + connect_request + ) + + if connect_response.status < 200 or connect_response.status > 299: + reason_bytes = connect_response.extensions.get("reason_phrase", b"") + reason_str = reason_bytes.decode("ascii", errors="ignore") + msg = "%d %s" % (connect_response.status, reason_str) + await self._connection.aclose() + raise ProxyError(msg) + + stream = connect_response.extensions["network_stream"] + + # Upgrade the stream to SSL + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + async with Trace("start_tls", logger, request, kwargs) as trace: + stream = await stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import AsyncHTTP2Connection + + self._connection = AsyncHTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = AsyncHTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + + self._connected = True + return await self._connection.handle_async_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + async def aclose(self) -> None: + await self._connection.aclose() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/interfaces.py b/parrot/lib/python3.10/site-packages/httpcore/_async/interfaces.py new file mode 100644 index 0000000000000000000000000000000000000000..c998dd2763262443d67d42c4f0ceb6058688f8f5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_async/interfaces.py @@ -0,0 +1,135 @@ +from contextlib import asynccontextmanager +from typing import AsyncIterator, Optional, Union + +from .._models import ( + URL, + Extensions, + HeaderTypes, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, + include_request_headers, +) + + +class AsyncRequestInterface: + async def request( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: HeaderTypes = None, + content: Union[bytes, AsyncIterator[bytes], None] = None, + extensions: Optional[Extensions] = None, + ) -> Response: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = await self.handle_async_request(request) + try: + await response.aread() + finally: + await response.aclose() + return response + + @asynccontextmanager + async def stream( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: HeaderTypes = None, + content: Union[bytes, AsyncIterator[bytes], None] = None, + extensions: Optional[Extensions] = None, + ) -> AsyncIterator[Response]: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = await self.handle_async_request(request) + try: + yield response + finally: + await response.aclose() + + async def handle_async_request(self, request: Request) -> Response: + raise NotImplementedError() # pragma: nocover + + +class AsyncConnectionInterface(AsyncRequestInterface): + async def aclose(self) -> None: + raise NotImplementedError() # pragma: nocover + + def info(self) -> str: + raise NotImplementedError() # pragma: nocover + + def can_handle_request(self, origin: Origin) -> bool: + raise NotImplementedError() # pragma: nocover + + def is_available(self) -> bool: + """ + Return `True` if the connection is currently able to accept an + outgoing request. + + An HTTP/1.1 connection will only be available if it is currently idle. + + An HTTP/2 connection will be available so long as the stream ID space is + not yet exhausted, and the connection is not in an error state. + + While the connection is being established we may not yet know if it is going + to result in an HTTP/1.1 or HTTP/2 connection. The connection should be + treated as being available, but might ultimately raise `NewConnectionRequired` + required exceptions if multiple requests are attempted over a connection + that ends up being established as HTTP/1.1. + """ + raise NotImplementedError() # pragma: nocover + + def has_expired(self) -> bool: + """ + Return `True` if the connection is in a state where it should be closed. + + This either means that the connection is idle and it has passed the + expiry time on its keep-alive, or that server has sent an EOF. + """ + raise NotImplementedError() # pragma: nocover + + def is_idle(self) -> bool: + """ + Return `True` if the connection is currently idle. + """ + raise NotImplementedError() # pragma: nocover + + def is_closed(self) -> bool: + """ + Return `True` if the connection has been closed. + + Used when a response is closed to determine if the connection may be + returned to the connection pool or not. + """ + raise NotImplementedError() # pragma: nocover diff --git a/parrot/lib/python3.10/site-packages/httpcore/_async/socks_proxy.py b/parrot/lib/python3.10/site-packages/httpcore/_async/socks_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..f12cb373f858b60c3b5987f75ca2795e67d81759 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_async/socks_proxy.py @@ -0,0 +1,340 @@ +import logging +import ssl +import typing + +from socksio import socks5 + +from .._backends.auto import AutoBackend +from .._backends.base import AsyncNetworkBackend, AsyncNetworkStream +from .._exceptions import ConnectionNotAvailable, ProxyError +from .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url +from .._ssl import default_ssl_context +from .._synchronization import AsyncLock +from .._trace import Trace +from .connection_pool import AsyncConnectionPool +from .http11 import AsyncHTTP11Connection +from .interfaces import AsyncConnectionInterface + +logger = logging.getLogger("httpcore.socks") + + +AUTH_METHODS = { + b"\x00": "NO AUTHENTICATION REQUIRED", + b"\x01": "GSSAPI", + b"\x02": "USERNAME/PASSWORD", + b"\xff": "NO ACCEPTABLE METHODS", +} + +REPLY_CODES = { + b"\x00": "Succeeded", + b"\x01": "General SOCKS server failure", + b"\x02": "Connection not allowed by ruleset", + b"\x03": "Network unreachable", + b"\x04": "Host unreachable", + b"\x05": "Connection refused", + b"\x06": "TTL expired", + b"\x07": "Command not supported", + b"\x08": "Address type not supported", +} + + +async def _init_socks5_connection( + stream: AsyncNetworkStream, + *, + host: bytes, + port: int, + auth: typing.Optional[typing.Tuple[bytes, bytes]] = None, +) -> None: + conn = socks5.SOCKS5Connection() + + # Auth method request + auth_method = ( + socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED + if auth is None + else socks5.SOCKS5AuthMethod.USERNAME_PASSWORD + ) + conn.send(socks5.SOCKS5AuthMethodsRequest([auth_method])) + outgoing_bytes = conn.data_to_send() + await stream.write(outgoing_bytes) + + # Auth method response + incoming_bytes = await stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5AuthReply) + if response.method != auth_method: + requested = AUTH_METHODS.get(auth_method, "UNKNOWN") + responded = AUTH_METHODS.get(response.method, "UNKNOWN") + raise ProxyError( + f"Requested {requested} from proxy server, but got {responded}." + ) + + if response.method == socks5.SOCKS5AuthMethod.USERNAME_PASSWORD: + # Username/password request + assert auth is not None + username, password = auth + conn.send(socks5.SOCKS5UsernamePasswordRequest(username, password)) + outgoing_bytes = conn.data_to_send() + await stream.write(outgoing_bytes) + + # Username/password response + incoming_bytes = await stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5UsernamePasswordReply) + if not response.success: + raise ProxyError("Invalid username/password") + + # Connect request + conn.send( + socks5.SOCKS5CommandRequest.from_address( + socks5.SOCKS5Command.CONNECT, (host, port) + ) + ) + outgoing_bytes = conn.data_to_send() + await stream.write(outgoing_bytes) + + # Connect response + incoming_bytes = await stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5Reply) + if response.reply_code != socks5.SOCKS5ReplyCode.SUCCEEDED: + reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN") + raise ProxyError(f"Proxy Server could not connect: {reply_code}.") + + +class AsyncSOCKSProxy(AsyncConnectionPool): + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: typing.Union[URL, bytes, str], + proxy_auth: typing.Optional[ + typing.Tuple[typing.Union[bytes, str], typing.Union[bytes, str]] + ] = None, + ssl_context: typing.Optional[ssl.SSLContext] = None, + max_connections: typing.Optional[int] = 10, + max_keepalive_connections: typing.Optional[int] = None, + keepalive_expiry: typing.Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + network_backend: typing.Optional[AsyncNetworkBackend] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + if proxy_auth is not None: + username, password = proxy_auth + username_bytes = enforce_bytes(username, name="proxy_auth") + password_bytes = enforce_bytes(password, name="proxy_auth") + self._proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = ( + username_bytes, + password_bytes, + ) + else: + self._proxy_auth = None + + def create_connection(self, origin: Origin) -> AsyncConnectionInterface: + return AsyncSocks5Connection( + proxy_origin=self._proxy_url.origin, + remote_origin=origin, + proxy_auth=self._proxy_auth, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class AsyncSocks5Connection(AsyncConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = None, + ssl_context: typing.Optional[ssl.SSLContext] = None, + keepalive_expiry: typing.Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: typing.Optional[AsyncNetworkBackend] = None, + ) -> None: + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._proxy_auth = proxy_auth + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + + self._network_backend: AsyncNetworkBackend = ( + AutoBackend() if network_backend is None else network_backend + ) + self._connect_lock = AsyncLock() + self._connection: typing.Optional[AsyncConnectionInterface] = None + self._connect_failed = False + + async def handle_async_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + async with self._connect_lock: + if self._connection is None: + try: + # Connect to the proxy + kwargs = { + "host": self._proxy_origin.host.decode("ascii"), + "port": self._proxy_origin.port, + "timeout": timeout, + } + with Trace("connect_tcp", logger, request, kwargs) as trace: + stream = await self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + + # Connect to the remote host using socks5 + kwargs = { + "stream": stream, + "host": self._remote_origin.host.decode("ascii"), + "port": self._remote_origin.port, + "auth": self._proxy_auth, + } + with Trace( + "setup_socks5_connection", logger, request, kwargs + ) as trace: + await _init_socks5_connection(**kwargs) + trace.return_value = stream + + # Upgrade the stream to SSL + if self._remote_origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ( + ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ) + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + async with Trace("start_tls", logger, request, kwargs) as trace: + stream = await stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or ( + self._http2 and not self._http1 + ): # pragma: nocover + from .http2 import AsyncHTTP2Connection + + self._connection = AsyncHTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = AsyncHTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): # pragma: nocover + raise ConnectionNotAvailable() + + return await self._connection.handle_async_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + async def aclose(self) -> None: + if self._connection is not None: + await self._connection.aclose() + + def is_available(self) -> bool: + if self._connection is None: # pragma: nocover + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._remote_origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: # pragma: nocover + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/auto.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/auto.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c29077e1572cb9ff8fa9f25ff72e2471cc092dfb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/auto.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/mock.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/mock.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..469fc1325e6799dd46468dc8c493b05aeb8498e8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/mock.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/sync.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/sync.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c1f409fea05e052966a5d0e4be932d6667f71fd Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/sync.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/trio.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/trio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daae5de881152595b40b35314485ccfa32805eec Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_backends/__pycache__/trio.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_backends/sync.py b/parrot/lib/python3.10/site-packages/httpcore/_backends/sync.py new file mode 100644 index 0000000000000000000000000000000000000000..a4c85f0449af4e1150b21f36606597ddb00775d9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_backends/sync.py @@ -0,0 +1,133 @@ +import socket +import ssl +import sys +import typing + +from .._exceptions import ( + ConnectError, + ConnectTimeout, + ExceptionMapping, + ReadError, + ReadTimeout, + WriteError, + WriteTimeout, + map_exceptions, +) +from .._utils import is_socket_readable +from .base import SOCKET_OPTION, NetworkBackend, NetworkStream + + +class SyncStream(NetworkStream): + def __init__(self, sock: socket.socket) -> None: + self._sock = sock + + def read(self, max_bytes: int, timeout: typing.Optional[float] = None) -> bytes: + exc_map: ExceptionMapping = {socket.timeout: ReadTimeout, OSError: ReadError} + with map_exceptions(exc_map): + self._sock.settimeout(timeout) + return self._sock.recv(max_bytes) + + def write(self, buffer: bytes, timeout: typing.Optional[float] = None) -> None: + if not buffer: + return + + exc_map: ExceptionMapping = {socket.timeout: WriteTimeout, OSError: WriteError} + with map_exceptions(exc_map): + while buffer: + self._sock.settimeout(timeout) + n = self._sock.send(buffer) + buffer = buffer[n:] + + def close(self) -> None: + self._sock.close() + + def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: typing.Optional[str] = None, + timeout: typing.Optional[float] = None, + ) -> NetworkStream: + exc_map: ExceptionMapping = { + socket.timeout: ConnectTimeout, + OSError: ConnectError, + } + with map_exceptions(exc_map): + try: + self._sock.settimeout(timeout) + sock = ssl_context.wrap_socket( + self._sock, server_hostname=server_hostname + ) + except Exception as exc: # pragma: nocover + self.close() + raise exc + return SyncStream(sock) + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object" and isinstance(self._sock, ssl.SSLSocket): + return self._sock._sslobj # type: ignore + if info == "client_addr": + return self._sock.getsockname() + if info == "server_addr": + return self._sock.getpeername() + if info == "socket": + return self._sock + if info == "is_readable": + return is_socket_readable(self._sock) + return None + + +class SyncBackend(NetworkBackend): + def connect_tcp( + self, + host: str, + port: int, + timeout: typing.Optional[float] = None, + local_address: typing.Optional[str] = None, + socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, + ) -> NetworkStream: + # Note that we automatically include `TCP_NODELAY` + # in addition to any other custom socket options. + if socket_options is None: + socket_options = [] # pragma: no cover + address = (host, port) + source_address = None if local_address is None else (local_address, 0) + exc_map: ExceptionMapping = { + socket.timeout: ConnectTimeout, + OSError: ConnectError, + } + + with map_exceptions(exc_map): + sock = socket.create_connection( + address, + timeout, + source_address=source_address, + ) + for option in socket_options: + sock.setsockopt(*option) # pragma: no cover + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + return SyncStream(sock) + + def connect_unix_socket( + self, + path: str, + timeout: typing.Optional[float] = None, + socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, + ) -> NetworkStream: # pragma: nocover + if sys.platform == "win32": + raise RuntimeError( + "Attempted to connect to a UNIX socket on a Windows system." + ) + if socket_options is None: + socket_options = [] + + exc_map: ExceptionMapping = { + socket.timeout: ConnectTimeout, + OSError: ConnectError, + } + with map_exceptions(exc_map): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + for option in socket_options: + sock.setsockopt(*option) + sock.settimeout(timeout) + sock.connect(path) + return SyncStream(sock) diff --git a/parrot/lib/python3.10/site-packages/httpcore/_backends/trio.py b/parrot/lib/python3.10/site-packages/httpcore/_backends/trio.py new file mode 100644 index 0000000000000000000000000000000000000000..b1626d28e2ded284a65d48a32309ee201d953c5e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_backends/trio.py @@ -0,0 +1,161 @@ +import ssl +import typing + +import trio + +from .._exceptions import ( + ConnectError, + ConnectTimeout, + ExceptionMapping, + ReadError, + ReadTimeout, + WriteError, + WriteTimeout, + map_exceptions, +) +from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream + + +class TrioStream(AsyncNetworkStream): + def __init__(self, stream: trio.abc.Stream) -> None: + self._stream = stream + + async def read( + self, max_bytes: int, timeout: typing.Optional[float] = None + ) -> bytes: + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ReadTimeout, + trio.BrokenResourceError: ReadError, + trio.ClosedResourceError: ReadError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + data: bytes = await self._stream.receive_some(max_bytes=max_bytes) + return data + + async def write( + self, buffer: bytes, timeout: typing.Optional[float] = None + ) -> None: + if not buffer: + return + + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: WriteTimeout, + trio.BrokenResourceError: WriteError, + trio.ClosedResourceError: WriteError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + await self._stream.send_all(data=buffer) + + async def aclose(self) -> None: + await self._stream.aclose() + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: typing.Optional[str] = None, + timeout: typing.Optional[float] = None, + ) -> AsyncNetworkStream: + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + } + ssl_stream = trio.SSLStream( + self._stream, + ssl_context=ssl_context, + server_hostname=server_hostname, + https_compatible=True, + server_side=False, + ) + with map_exceptions(exc_map): + try: + with trio.fail_after(timeout_or_inf): + await ssl_stream.do_handshake() + except Exception as exc: # pragma: nocover + await self.aclose() + raise exc + return TrioStream(ssl_stream) + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object" and isinstance(self._stream, trio.SSLStream): + # Type checkers cannot see `_ssl_object` attribute because trio._ssl.SSLStream uses __getattr__/__setattr__. + # Tracked at https://github.com/python-trio/trio/issues/542 + return self._stream._ssl_object # type: ignore[attr-defined] + if info == "client_addr": + return self._get_socket_stream().socket.getsockname() + if info == "server_addr": + return self._get_socket_stream().socket.getpeername() + if info == "socket": + stream = self._stream + while isinstance(stream, trio.SSLStream): + stream = stream.transport_stream + assert isinstance(stream, trio.SocketStream) + return stream.socket + if info == "is_readable": + socket = self.get_extra_info("socket") + return socket.is_readable() + return None + + def _get_socket_stream(self) -> trio.SocketStream: + stream = self._stream + while isinstance(stream, trio.SSLStream): + stream = stream.transport_stream + assert isinstance(stream, trio.SocketStream) + return stream + + +class TrioBackend(AsyncNetworkBackend): + async def connect_tcp( + self, + host: str, + port: int, + timeout: typing.Optional[float] = None, + local_address: typing.Optional[str] = None, + socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, + ) -> AsyncNetworkStream: + # By default for TCP sockets, trio enables TCP_NODELAY. + # https://trio.readthedocs.io/en/stable/reference-io.html#trio.SocketStream + if socket_options is None: + socket_options = [] # pragma: no cover + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + OSError: ConnectError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + stream: trio.abc.Stream = await trio.open_tcp_stream( + host=host, port=port, local_address=local_address + ) + for option in socket_options: + stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover + return TrioStream(stream) + + async def connect_unix_socket( + self, + path: str, + timeout: typing.Optional[float] = None, + socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, + ) -> AsyncNetworkStream: # pragma: nocover + if socket_options is None: + socket_options = [] + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + OSError: ConnectError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + stream: trio.abc.Stream = await trio.open_unix_socket(path) + for option in socket_options: + stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover + return TrioStream(stream) + + async def sleep(self, seconds: float) -> None: + await trio.sleep(seconds) # pragma: nocover diff --git a/parrot/lib/python3.10/site-packages/httpcore/_exceptions.py b/parrot/lib/python3.10/site-packages/httpcore/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..81e7fc61ddfe258296d4d08b436fa8627f335dc9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_exceptions.py @@ -0,0 +1,81 @@ +import contextlib +from typing import Iterator, Mapping, Type + +ExceptionMapping = Mapping[Type[Exception], Type[Exception]] + + +@contextlib.contextmanager +def map_exceptions(map: ExceptionMapping) -> Iterator[None]: + try: + yield + except Exception as exc: # noqa: PIE786 + for from_exc, to_exc in map.items(): + if isinstance(exc, from_exc): + raise to_exc(exc) from exc + raise # pragma: nocover + + +class ConnectionNotAvailable(Exception): + pass + + +class ProxyError(Exception): + pass + + +class UnsupportedProtocol(Exception): + pass + + +class ProtocolError(Exception): + pass + + +class RemoteProtocolError(ProtocolError): + pass + + +class LocalProtocolError(ProtocolError): + pass + + +# Timeout errors + + +class TimeoutException(Exception): + pass + + +class PoolTimeout(TimeoutException): + pass + + +class ConnectTimeout(TimeoutException): + pass + + +class ReadTimeout(TimeoutException): + pass + + +class WriteTimeout(TimeoutException): + pass + + +# Network errors + + +class NetworkError(Exception): + pass + + +class ConnectError(NetworkError): + pass + + +class ReadError(NetworkError): + pass + + +class WriteError(NetworkError): + pass diff --git a/parrot/lib/python3.10/site-packages/httpcore/_models.py b/parrot/lib/python3.10/site-packages/httpcore/_models.py new file mode 100644 index 0000000000000000000000000000000000000000..e15305eec4ee7037f3eaaa1b2c8aca21fcfdc0d6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_models.py @@ -0,0 +1,483 @@ +from typing import ( + Any, + AsyncIterable, + AsyncIterator, + Iterable, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) +from urllib.parse import urlparse + +# Functions for typechecking... + + +HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]] +HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]] +HeaderTypes = Union[HeadersAsSequence, HeadersAsMapping, None] + +Extensions = Mapping[str, Any] + + +def enforce_bytes(value: Union[bytes, str], *, name: str) -> bytes: + """ + Any arguments that are ultimately represented as bytes can be specified + either as bytes or as strings. + + However we enforce that any string arguments must only contain characters in + the plain ASCII range. chr(0)...chr(127). If you need to use characters + outside that range then be precise, and use a byte-wise argument. + """ + if isinstance(value, str): + try: + return value.encode("ascii") + except UnicodeEncodeError: + raise TypeError(f"{name} strings may not include unicode characters.") + elif isinstance(value, bytes): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be bytes or str, but got {seen_type}.") + + +def enforce_url(value: Union["URL", bytes, str], *, name: str) -> "URL": + """ + Type check for URL parameters. + """ + if isinstance(value, (bytes, str)): + return URL(value) + elif isinstance(value, URL): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be a URL, bytes, or str, but got {seen_type}.") + + +def enforce_headers( + value: Union[HeadersAsMapping, HeadersAsSequence, None] = None, *, name: str +) -> List[Tuple[bytes, bytes]]: + """ + Convienence function that ensure all items in request or response headers + are either bytes or strings in the plain ASCII range. + """ + if value is None: + return [] + elif isinstance(value, Mapping): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value.items() + ] + elif isinstance(value, Sequence): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value + ] + + seen_type = type(value).__name__ + raise TypeError( + f"{name} must be a mapping or sequence of two-tuples, but got {seen_type}." + ) + + +def enforce_stream( + value: Union[bytes, Iterable[bytes], AsyncIterable[bytes], None], *, name: str +) -> Union[Iterable[bytes], AsyncIterable[bytes]]: + if value is None: + return ByteStream(b"") + elif isinstance(value, bytes): + return ByteStream(value) + return value + + +# * https://tools.ietf.org/html/rfc3986#section-3.2.3 +# * https://url.spec.whatwg.org/#url-miscellaneous +# * https://url.spec.whatwg.org/#scheme-state +DEFAULT_PORTS = { + b"ftp": 21, + b"http": 80, + b"https": 443, + b"ws": 80, + b"wss": 443, +} + + +def include_request_headers( + headers: List[Tuple[bytes, bytes]], + *, + url: "URL", + content: Union[None, bytes, Iterable[bytes], AsyncIterable[bytes]], +) -> List[Tuple[bytes, bytes]]: + headers_set = set(k.lower() for k, v in headers) + + if b"host" not in headers_set: + default_port = DEFAULT_PORTS.get(url.scheme) + if url.port is None or url.port == default_port: + header_value = url.host + else: + header_value = b"%b:%d" % (url.host, url.port) + headers = [(b"Host", header_value)] + headers + + if ( + content is not None + and b"content-length" not in headers_set + and b"transfer-encoding" not in headers_set + ): + if isinstance(content, bytes): + content_length = str(len(content)).encode("ascii") + headers += [(b"Content-Length", content_length)] + else: + headers += [(b"Transfer-Encoding", b"chunked")] # pragma: nocover + + return headers + + +# Interfaces for byte streams... + + +class ByteStream: + """ + A container for non-streaming content, and that supports both sync and async + stream iteration. + """ + + def __init__(self, content: bytes) -> None: + self._content = content + + def __iter__(self) -> Iterator[bytes]: + yield self._content + + async def __aiter__(self) -> AsyncIterator[bytes]: + yield self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{len(self._content)} bytes]>" + + +class Origin: + def __init__(self, scheme: bytes, host: bytes, port: int) -> None: + self.scheme = scheme + self.host = host + self.port = port + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, Origin) + and self.scheme == other.scheme + and self.host == other.host + and self.port == other.port + ) + + def __str__(self) -> str: + scheme = self.scheme.decode("ascii") + host = self.host.decode("ascii") + port = str(self.port) + return f"{scheme}://{host}:{port}" + + +class URL: + """ + Represents the URL against which an HTTP request may be made. + + The URL may either be specified as a plain string, for convienence: + + ```python + url = httpcore.URL("https://www.example.com/") + ``` + + Or be constructed with explicitily pre-parsed components: + + ```python + url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/') + ``` + + Using this second more explicit style allows integrations that are using + `httpcore` to pass through URLs that have already been parsed in order to use + libraries such as `rfc-3986` rather than relying on the stdlib. It also ensures + that URL parsing is treated identically at both the networking level and at any + higher layers of abstraction. + + The four components are important here, as they allow the URL to be precisely + specified in a pre-parsed format. They also allow certain types of request to + be created that could not otherwise be expressed. + + For example, an HTTP request to `http://www.example.com/` forwarded via a proxy + at `http://localhost:8080`... + + ```python + # Constructs an HTTP request with a complete URL as the target: + # GET https://www.example.com/ HTTP/1.1 + url = httpcore.URL( + scheme=b'http', + host=b'localhost', + port=8080, + target=b'https://www.example.com/' + ) + request = httpcore.Request( + method="GET", + url=url + ) + ``` + + Another example is constructing an `OPTIONS *` request... + + ```python + # Constructs an 'OPTIONS *' HTTP request: + # OPTIONS * HTTP/1.1 + url = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*') + request = httpcore.Request(method="OPTIONS", url=url) + ``` + + This kind of request is not possible to formulate with a URL string, + because the `/` delimiter is always used to demark the target from the + host/port portion of the URL. + + For convenience, string-like arguments may be specified either as strings or + as bytes. However, once a request is being issue over-the-wire, the URL + components are always ultimately required to be a bytewise representation. + + In order to avoid any ambiguity over character encodings, when strings are used + as arguments, they must be strictly limited to the ASCII range `chr(0)`-`chr(127)`. + If you require a bytewise representation that is outside this range you must + handle the character encoding directly, and pass a bytes instance. + """ + + def __init__( + self, + url: Union[bytes, str] = "", + *, + scheme: Union[bytes, str] = b"", + host: Union[bytes, str] = b"", + port: Optional[int] = None, + target: Union[bytes, str] = b"", + ) -> None: + """ + Parameters: + url: The complete URL as a string or bytes. + scheme: The URL scheme as a string or bytes. + Typically either `"http"` or `"https"`. + host: The URL host as a string or bytes. Such as `"www.example.com"`. + port: The port to connect to. Either an integer or `None`. + target: The target of the HTTP request. Such as `"/items?search=red"`. + """ + if url: + parsed = urlparse(enforce_bytes(url, name="url")) + self.scheme = parsed.scheme + self.host = parsed.hostname or b"" + self.port = parsed.port + self.target = (parsed.path or b"/") + ( + b"?" + parsed.query if parsed.query else b"" + ) + else: + self.scheme = enforce_bytes(scheme, name="scheme") + self.host = enforce_bytes(host, name="host") + self.port = port + self.target = enforce_bytes(target, name="target") + + @property + def origin(self) -> Origin: + default_port = { + b"http": 80, + b"https": 443, + b"ws": 80, + b"wss": 443, + b"socks5": 1080, + }[self.scheme] + return Origin( + scheme=self.scheme, host=self.host, port=self.port or default_port + ) + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, URL) + and other.scheme == self.scheme + and other.host == self.host + and other.port == self.port + and other.target == self.target + ) + + def __bytes__(self) -> bytes: + if self.port is None: + return b"%b://%b%b" % (self.scheme, self.host, self.target) + return b"%b://%b:%d%b" % (self.scheme, self.host, self.port, self.target) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(scheme={self.scheme!r}, " + f"host={self.host!r}, port={self.port!r}, target={self.target!r})" + ) + + +class Request: + """ + An HTTP request. + """ + + def __init__( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: HeaderTypes = None, + content: Union[bytes, Iterable[bytes], AsyncIterable[bytes], None] = None, + extensions: Optional[Extensions] = None, + ) -> None: + """ + Parameters: + method: The HTTP request method, either as a string or bytes. + For example: `GET`. + url: The request URL, either as a `URL` instance, or as a string or bytes. + For example: `"https://www.example.com".` + headers: The HTTP request headers. + content: The content of the response body. + extensions: A dictionary of optional extra information included on + the request. Possible keys include `"timeout"`, and `"trace"`. + """ + self.method: bytes = enforce_bytes(method, name="method") + self.url: URL = enforce_url(url, name="url") + self.headers: List[Tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: Union[Iterable[bytes], AsyncIterable[bytes]] = enforce_stream( + content, name="content" + ) + self.extensions = {} if extensions is None else extensions + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.method!r}]>" + + +class Response: + """ + An HTTP response. + """ + + def __init__( + self, + status: int, + *, + headers: HeaderTypes = None, + content: Union[bytes, Iterable[bytes], AsyncIterable[bytes], None] = None, + extensions: Optional[Extensions] = None, + ) -> None: + """ + Parameters: + status: The HTTP status code of the response. For example `200`. + headers: The HTTP response headers. + content: The content of the response body. + extensions: A dictionary of optional extra information included on + the responseself.Possible keys include `"http_version"`, + `"reason_phrase"`, and `"network_stream"`. + """ + self.status: int = status + self.headers: List[Tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: Union[Iterable[bytes], AsyncIterable[bytes]] = enforce_stream( + content, name="content" + ) + self.extensions = {} if extensions is None else extensions + + self._stream_consumed = False + + @property + def content(self) -> bytes: + if not hasattr(self, "_content"): + if isinstance(self.stream, Iterable): + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'response.read()' first." + ) + else: + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'await response.aread()' first." + ) + return self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.status}]>" + + # Sync interface... + + def read(self) -> bytes: + if not isinstance(self.stream, Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an asynchronous response using 'response.read()'. " + "You should use 'await response.aread()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part for part in self.iter_stream()]) + return self._content + + def iter_stream(self) -> Iterator[bytes]: + if not isinstance(self.stream, Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an asynchronous response using 'for ... in " + "response.iter_stream()'. " + "You should use 'async for ... in response.aiter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'for ... in response.iter_stream()' more than once." + ) + self._stream_consumed = True + for chunk in self.stream: + yield chunk + + def close(self) -> None: + if not isinstance(self.stream, Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to close an asynchronous response using 'response.close()'. " + "You should use 'await response.aclose()' instead." + ) + if hasattr(self.stream, "close"): + self.stream.close() + + # Async interface... + + async def aread(self) -> bytes: + if not isinstance(self.stream, AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an synchronous response using " + "'await response.aread()'. " + "You should use 'response.read()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part async for part in self.aiter_stream()]) + return self._content + + async def aiter_stream(self) -> AsyncIterator[bytes]: + if not isinstance(self.stream, AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an synchronous response using 'async for ... in " + "response.aiter_stream()'. " + "You should use 'for ... in response.iter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'async for ... in response.aiter_stream()' " + "more than once." + ) + self._stream_consumed = True + async for chunk in self.stream: + yield chunk + + async def aclose(self) -> None: + if not isinstance(self.stream, AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to close a synchronous response using " + "'await response.aclose()'. " + "You should use 'response.close()' instead." + ) + if hasattr(self.stream, "aclose"): + await self.stream.aclose() diff --git a/parrot/lib/python3.10/site-packages/httpcore/_ssl.py b/parrot/lib/python3.10/site-packages/httpcore/_ssl.py new file mode 100644 index 0000000000000000000000000000000000000000..c99c5a67945b8a3a3544d481e979c791ab45fe23 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_ssl.py @@ -0,0 +1,9 @@ +import ssl + +import certifi + + +def default_ssl_context() -> ssl.SSLContext: + context = ssl.create_default_context() + context.load_verify_locations(certifi.where()) + return context diff --git a/parrot/lib/python3.10/site-packages/httpcore/_sync/__init__.py b/parrot/lib/python3.10/site-packages/httpcore/_sync/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b476d76d9a7ff45de8d18ec22d33d6af2982f92e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_sync/__init__.py @@ -0,0 +1,39 @@ +from .connection import HTTPConnection +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .http_proxy import HTTPProxy +from .interfaces import ConnectionInterface + +try: + from .http2 import HTTP2Connection +except ImportError: # pragma: nocover + + class HTTP2Connection: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use http2 support, but the `h2` package is not " + "installed. Use 'pip install httpcore[http2]'." + ) + + +try: + from .socks_proxy import SOCKSProxy +except ImportError: # pragma: nocover + + class SOCKSProxy: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use SOCKS support, but the `socksio` package is not " + "installed. Use 'pip install httpcore[socks]'." + ) + + +__all__ = [ + "HTTPConnection", + "ConnectionPool", + "HTTPProxy", + "HTTP11Connection", + "HTTP2Connection", + "ConnectionInterface", + "SOCKSProxy", +] diff --git a/parrot/lib/python3.10/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-310.pyc b/parrot/lib/python3.10/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69a795cd55c15fd9a724b95993eb31bef4666b36 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/httpcore/_sync/connection_pool.py b/parrot/lib/python3.10/site-packages/httpcore/_sync/connection_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..dbcaff1fcf1b1cbb404b3e7367b037942f4e9d03 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_sync/connection_pool.py @@ -0,0 +1,356 @@ +import ssl +import sys +from types import TracebackType +from typing import Iterable, Iterator, Iterable, List, Optional, Type + +from .._backends.sync import SyncBackend +from .._backends.base import SOCKET_OPTION, NetworkBackend +from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol +from .._models import Origin, Request, Response +from .._synchronization import Event, Lock, ShieldCancellation +from .connection import HTTPConnection +from .interfaces import ConnectionInterface, RequestInterface + + +class RequestStatus: + def __init__(self, request: Request): + self.request = request + self.connection: Optional[ConnectionInterface] = None + self._connection_acquired = Event() + + def set_connection(self, connection: ConnectionInterface) -> None: + assert self.connection is None + self.connection = connection + self._connection_acquired.set() + + def unset_connection(self) -> None: + assert self.connection is not None + self.connection = None + self._connection_acquired = Event() + + def wait_for_connection( + self, timeout: Optional[float] = None + ) -> ConnectionInterface: + if self.connection is None: + self._connection_acquired.wait(timeout=timeout) + assert self.connection is not None + return self.connection + + +class ConnectionPool(RequestInterface): + """ + A connection pool for making HTTP requests. + """ + + def __init__( + self, + ssl_context: Optional[ssl.SSLContext] = None, + max_connections: Optional[int] = 10, + max_keepalive_connections: Optional[int] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[NetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish a + connection. + local_address: Local address to connect from. Can also be used to connect + using a particular address family. Using `local_address="0.0.0.0"` + will connect using an `AF_INET` address (IPv4), while using + `local_address="::"` will connect using an `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + socket_options: Socket options that have to be included + in the TCP socket when the connection was established. + """ + self._ssl_context = ssl_context + + self._max_connections = ( + sys.maxsize if max_connections is None else max_connections + ) + self._max_keepalive_connections = ( + sys.maxsize + if max_keepalive_connections is None + else max_keepalive_connections + ) + self._max_keepalive_connections = min( + self._max_connections, self._max_keepalive_connections + ) + + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._pool: List[ConnectionInterface] = [] + self._requests: List[RequestStatus] = [] + self._pool_lock = Lock() + self._network_backend = ( + SyncBackend() if network_backend is None else network_backend + ) + self._socket_options = socket_options + + def create_connection(self, origin: Origin) -> ConnectionInterface: + return HTTPConnection( + origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + retries=self._retries, + local_address=self._local_address, + uds=self._uds, + network_backend=self._network_backend, + socket_options=self._socket_options, + ) + + @property + def connections(self) -> List[ConnectionInterface]: + """ + Return a list of the connections currently in the pool. + + For example: + + ```python + >>> pool.connections + [ + , + , + , + ] + ``` + """ + return list(self._pool) + + def _attempt_to_acquire_connection(self, status: RequestStatus) -> bool: + """ + Attempt to provide a connection that can handle the given origin. + """ + origin = status.request.url.origin + + # If there are queued requests in front of us, then don't acquire a + # connection. We handle requests strictly in order. + waiting = [s for s in self._requests if s.connection is None] + if waiting and waiting[0] is not status: + return False + + # Reuse an existing connection if one is currently available. + for idx, connection in enumerate(self._pool): + if connection.can_handle_request(origin) and connection.is_available(): + self._pool.pop(idx) + self._pool.insert(0, connection) + status.set_connection(connection) + return True + + # If the pool is currently full, attempt to close one idle connection. + if len(self._pool) >= self._max_connections: + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.is_idle(): + connection.close() + self._pool.pop(idx) + break + + # If the pool is still full, then we cannot acquire a connection. + if len(self._pool) >= self._max_connections: + return False + + # Otherwise create a new connection. + connection = self.create_connection(origin) + self._pool.insert(0, connection) + status.set_connection(connection) + return True + + def _close_expired_connections(self) -> None: + """ + Clean up the connection pool by closing off any connections that have expired. + """ + # Close any connections that have expired their keep-alive time. + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.has_expired(): + connection.close() + self._pool.pop(idx) + + # If the pool size exceeds the maximum number of allowed keep-alive connections, + # then close off idle connections as required. + pool_size = len(self._pool) + for idx, connection in reversed(list(enumerate(self._pool))): + if connection.is_idle() and pool_size > self._max_keepalive_connections: + connection.close() + self._pool.pop(idx) + pool_size -= 1 + + def handle_request(self, request: Request) -> Response: + """ + Send an HTTP request, and return an HTTP response. + + This is the core implementation that is called into by `.request()` or `.stream()`. + """ + scheme = request.url.scheme.decode() + if scheme == "": + raise UnsupportedProtocol( + "Request URL is missing an 'http://' or 'https://' protocol." + ) + if scheme not in ("http", "https", "ws", "wss"): + raise UnsupportedProtocol( + f"Request URL has an unsupported protocol '{scheme}://'." + ) + + status = RequestStatus(request) + + with self._pool_lock: + self._requests.append(status) + self._close_expired_connections() + self._attempt_to_acquire_connection(status) + + while True: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("pool", None) + try: + connection = status.wait_for_connection(timeout=timeout) + except BaseException as exc: + # If we timeout here, or if the task is cancelled, then make + # sure to remove the request from the queue before bubbling + # up the exception. + with self._pool_lock: + # Ensure only remove when task exists. + if status in self._requests: + self._requests.remove(status) + raise exc + + try: + response = connection.handle_request(request) + except ConnectionNotAvailable: + # The ConnectionNotAvailable exception is a special case, that + # indicates we need to retry the request on a new connection. + # + # The most common case where this can occur is when multiple + # requests are queued waiting for a single connection, which + # might end up as an HTTP/2 connection, but which actually ends + # up as HTTP/1.1. + with self._pool_lock: + # Maintain our position in the request queue, but reset the + # status so that the request becomes queued again. + status.unset_connection() + self._attempt_to_acquire_connection(status) + except BaseException as exc: + with ShieldCancellation(): + self.response_closed(status) + raise exc + else: + break + + # When we return the response, we wrap the stream in a special class + # that handles notifying the connection pool once the response + # has been released. + assert isinstance(response.stream, Iterable) + return Response( + status=response.status, + headers=response.headers, + content=ConnectionPoolByteStream(response.stream, self, status), + extensions=response.extensions, + ) + + def response_closed(self, status: RequestStatus) -> None: + """ + This method acts as a callback once the request/response cycle is complete. + + It is called into from the `ConnectionPoolByteStream.close()` method. + """ + assert status.connection is not None + connection = status.connection + + with self._pool_lock: + # Update the state of the connection pool. + if status in self._requests: + self._requests.remove(status) + + if connection.is_closed() and connection in self._pool: + self._pool.remove(connection) + + # Since we've had a response closed, it's possible we'll now be able + # to service one or more requests that are currently pending. + for status in self._requests: + if status.connection is None: + acquired = self._attempt_to_acquire_connection(status) + # If we could not acquire a connection for a queued request + # then we don't need to check anymore requests that are + # queued later behind it. + if not acquired: + break + + # Housekeeping. + self._close_expired_connections() + + def close(self) -> None: + """ + Close any connections in the pool. + """ + with self._pool_lock: + for connection in self._pool: + connection.close() + self._pool = [] + self._requests = [] + + def __enter__(self) -> "ConnectionPool": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + self.close() + + +class ConnectionPoolByteStream: + """ + A wrapper around the response byte stream, that additionally handles + notifying the connection pool when the response has been closed. + """ + + def __init__( + self, + stream: Iterable[bytes], + pool: ConnectionPool, + status: RequestStatus, + ) -> None: + self._stream = stream + self._pool = pool + self._status = status + + def __iter__(self) -> Iterator[bytes]: + for part in self._stream: + yield part + + def close(self) -> None: + try: + if hasattr(self._stream, "close"): + self._stream.close() + finally: + with ShieldCancellation(): + self._pool.response_closed(self._status) diff --git a/parrot/lib/python3.10/site-packages/httpcore/_sync/http2.py b/parrot/lib/python3.10/site-packages/httpcore/_sync/http2.py new file mode 100644 index 0000000000000000000000000000000000000000..d141d459a59d134beac3b2dffb17d17f29abcea4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_sync/http2.py @@ -0,0 +1,589 @@ +import enum +import logging +import time +import types +import typing + +import h2.config +import h2.connection +import h2.events +import h2.exceptions +import h2.settings + +from .._backends.base import NetworkStream +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, +) +from .._models import Origin, Request, Response +from .._synchronization import Lock, Semaphore, ShieldCancellation +from .._trace import Trace +from .interfaces import ConnectionInterface + +logger = logging.getLogger("httpcore.http2") + + +def has_body_headers(request: Request) -> bool: + return any( + k.lower() == b"content-length" or k.lower() == b"transfer-encoding" + for k, v in request.headers + ) + + +class HTTPConnectionState(enum.IntEnum): + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class HTTP2Connection(ConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + CONFIG = h2.config.H2Configuration(validate_inbound_headers=False) + + def __init__( + self, + origin: Origin, + stream: NetworkStream, + keepalive_expiry: typing.Optional[float] = None, + ): + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: typing.Optional[float] = keepalive_expiry + self._h2_state = h2.connection.H2Connection(config=self.CONFIG) + self._state = HTTPConnectionState.IDLE + self._expire_at: typing.Optional[float] = None + self._request_count = 0 + self._init_lock = Lock() + self._state_lock = Lock() + self._read_lock = Lock() + self._write_lock = Lock() + self._sent_connection_init = False + self._used_all_stream_ids = False + self._connection_error = False + + # Mapping from stream ID to response stream events. + self._events: typing.Dict[ + int, + typing.Union[ + h2.events.ResponseReceived, + h2.events.DataReceived, + h2.events.StreamEnded, + h2.events.StreamReset, + ], + ] = {} + + # Connection terminated events are stored as state since + # we need to handle them for all streams. + self._connection_terminated: typing.Optional[ + h2.events.ConnectionTerminated + ] = None + + self._read_exception: typing.Optional[Exception] = None + self._write_exception: typing.Optional[Exception] = None + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + # This cannot occur in normal operation, since the connection pool + # will only send requests on connections that handle them. + # It's in place simply for resilience as a guard against incorrect + # usage, for anyone working directly with httpcore connections. + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + with self._state_lock: + if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE): + self._request_count += 1 + self._expire_at = None + self._state = HTTPConnectionState.ACTIVE + else: + raise ConnectionNotAvailable() + + with self._init_lock: + if not self._sent_connection_init: + try: + kwargs = {"request": request} + with Trace("send_connection_init", logger, request, kwargs): + self._send_connection_init(**kwargs) + except BaseException as exc: + with ShieldCancellation(): + self.close() + raise exc + + self._sent_connection_init = True + + # Initially start with just 1 until the remote server provides + # its max_concurrent_streams value + self._max_streams = 1 + + local_settings_max_streams = ( + self._h2_state.local_settings.max_concurrent_streams + ) + self._max_streams_semaphore = Semaphore(local_settings_max_streams) + + for _ in range(local_settings_max_streams - self._max_streams): + self._max_streams_semaphore.acquire() + + self._max_streams_semaphore.acquire() + + try: + stream_id = self._h2_state.get_next_available_stream_id() + self._events[stream_id] = [] + except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover + self._used_all_stream_ids = True + self._request_count -= 1 + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request, "stream_id": stream_id} + with Trace("send_request_headers", logger, request, kwargs): + self._send_request_headers(request=request, stream_id=stream_id) + with Trace("send_request_body", logger, request, kwargs): + self._send_request_body(request=request, stream_id=stream_id) + with Trace( + "receive_response_headers", logger, request, kwargs + ) as trace: + status, headers = self._receive_response( + request=request, stream_id=stream_id + ) + trace.return_value = (status, headers) + + return Response( + status=status, + headers=headers, + content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id), + extensions={ + "http_version": b"HTTP/2", + "network_stream": self._network_stream, + "stream_id": stream_id, + }, + ) + except BaseException as exc: # noqa: PIE786 + with ShieldCancellation(): + kwargs = {"stream_id": stream_id} + with Trace("response_closed", logger, request, kwargs): + self._response_closed(stream_id=stream_id) + + if isinstance(exc, h2.exceptions.ProtocolError): + # One case where h2 can raise a protocol error is when a + # closed frame has been seen by the state machine. + # + # This happens when one stream is reading, and encounters + # a GOAWAY event. Other flows of control may then raise + # a protocol error at any point they interact with the 'h2_state'. + # + # In this case we'll have stored the event, and should raise + # it as a RemoteProtocolError. + if self._connection_terminated: # pragma: nocover + raise RemoteProtocolError(self._connection_terminated) + # If h2 raises a protocol error in some other state then we + # must somehow have made a protocol violation. + raise LocalProtocolError(exc) # pragma: nocover + + raise exc + + def _send_connection_init(self, request: Request) -> None: + """ + The HTTP/2 connection requires some initial setup before we can start + using individual request/response streams on it. + """ + # Need to set these manually here instead of manipulating via + # __setitem__() otherwise the H2Connection will emit SettingsUpdate + # frames in addition to sending the undesired defaults. + self._h2_state.local_settings = h2.settings.Settings( + client=True, + initial_values={ + # Disable PUSH_PROMISE frames from the server since we don't do anything + # with them for now. Maybe when we support caching? + h2.settings.SettingCodes.ENABLE_PUSH: 0, + # These two are taken from h2 for safe defaults + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100, + h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536, + }, + ) + + # Some websites (*cough* Yahoo *cough*) balk at this setting being + # present in the initial handshake since it's not defined in the original + # RFC despite the RFC mandating ignoring settings you don't know about. + del self._h2_state.local_settings[ + h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL + ] + + self._h2_state.initiate_connection() + self._h2_state.increment_flow_control_window(2**24) + self._write_outgoing_data(request) + + # Sending the request... + + def _send_request_headers(self, request: Request, stream_id: int) -> None: + """ + Send the request headers to a given stream ID. + """ + end_stream = not has_body_headers(request) + + # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'. + # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require + # HTTP/1.1 style headers, and map them appropriately if we end up on + # an HTTP/2 connection. + authority = [v for k, v in request.headers if k.lower() == b"host"][0] + + headers = [ + (b":method", request.method), + (b":authority", authority), + (b":scheme", request.url.scheme), + (b":path", request.url.target), + ] + [ + (k.lower(), v) + for k, v in request.headers + if k.lower() + not in ( + b"host", + b"transfer-encoding", + ) + ] + + self._h2_state.send_headers(stream_id, headers, end_stream=end_stream) + self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id) + self._write_outgoing_data(request) + + def _send_request_body(self, request: Request, stream_id: int) -> None: + """ + Iterate over the request body sending it to a given stream ID. + """ + if not has_body_headers(request): + return + + assert isinstance(request.stream, typing.Iterable) + for data in request.stream: + self._send_stream_data(request, stream_id, data) + self._send_end_stream(request, stream_id) + + def _send_stream_data( + self, request: Request, stream_id: int, data: bytes + ) -> None: + """ + Send a single chunk of data in one or more data frames. + """ + while data: + max_flow = self._wait_for_outgoing_flow(request, stream_id) + chunk_size = min(len(data), max_flow) + chunk, data = data[:chunk_size], data[chunk_size:] + self._h2_state.send_data(stream_id, chunk) + self._write_outgoing_data(request) + + def _send_end_stream(self, request: Request, stream_id: int) -> None: + """ + Send an empty data frame on on a given stream ID with the END_STREAM flag set. + """ + self._h2_state.end_stream(stream_id) + self._write_outgoing_data(request) + + # Receiving the response... + + def _receive_response( + self, request: Request, stream_id: int + ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]: + """ + Return the response status code and headers for a given stream ID. + """ + while True: + event = self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.ResponseReceived): + break + + status_code = 200 + headers = [] + for k, v in event.headers: + if k == b":status": + status_code = int(v.decode("ascii", errors="ignore")) + elif not k.startswith(b":"): + headers.append((k, v)) + + return (status_code, headers) + + def _receive_response_body( + self, request: Request, stream_id: int + ) -> typing.Iterator[bytes]: + """ + Iterator that returns the bytes of the response body for a given stream ID. + """ + while True: + event = self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.DataReceived): + amount = event.flow_controlled_length + self._h2_state.acknowledge_received_data(amount, stream_id) + self._write_outgoing_data(request) + yield event.data + elif isinstance(event, h2.events.StreamEnded): + break + + def _receive_stream_event( + self, request: Request, stream_id: int + ) -> typing.Union[ + h2.events.ResponseReceived, h2.events.DataReceived, h2.events.StreamEnded + ]: + """ + Return the next available event for a given stream ID. + + Will read more data from the network if required. + """ + while not self._events.get(stream_id): + self._receive_events(request, stream_id) + event = self._events[stream_id].pop(0) + if isinstance(event, h2.events.StreamReset): + raise RemoteProtocolError(event) + return event + + def _receive_events( + self, request: Request, stream_id: typing.Optional[int] = None + ) -> None: + """ + Read some data from the network until we see one or more events + for a given stream ID. + """ + with self._read_lock: + if self._connection_terminated is not None: + last_stream_id = self._connection_terminated.last_stream_id + if stream_id and last_stream_id and stream_id > last_stream_id: + self._request_count -= 1 + raise ConnectionNotAvailable() + raise RemoteProtocolError(self._connection_terminated) + + # This conditional is a bit icky. We don't want to block reading if we've + # actually got an event to return for a given stream. We need to do that + # check *within* the atomic read lock. Though it also need to be optional, + # because when we call it from `_wait_for_outgoing_flow` we *do* want to + # block until we've available flow control, event when we have events + # pending for the stream ID we're attempting to send on. + if stream_id is None or not self._events.get(stream_id): + events = self._read_incoming_data(request) + for event in events: + if isinstance(event, h2.events.RemoteSettingsChanged): + with Trace( + "receive_remote_settings", logger, request + ) as trace: + self._receive_remote_settings_change(event) + trace.return_value = event + + elif isinstance( + event, + ( + h2.events.ResponseReceived, + h2.events.DataReceived, + h2.events.StreamEnded, + h2.events.StreamReset, + ), + ): + if event.stream_id in self._events: + self._events[event.stream_id].append(event) + + elif isinstance(event, h2.events.ConnectionTerminated): + self._connection_terminated = event + + self._write_outgoing_data(request) + + def _receive_remote_settings_change(self, event: h2.events.Event) -> None: + max_concurrent_streams = event.changed_settings.get( + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS + ) + if max_concurrent_streams: + new_max_streams = min( + max_concurrent_streams.new_value, + self._h2_state.local_settings.max_concurrent_streams, + ) + if new_max_streams and new_max_streams != self._max_streams: + while new_max_streams > self._max_streams: + self._max_streams_semaphore.release() + self._max_streams += 1 + while new_max_streams < self._max_streams: + self._max_streams_semaphore.acquire() + self._max_streams -= 1 + + def _response_closed(self, stream_id: int) -> None: + self._max_streams_semaphore.release() + del self._events[stream_id] + with self._state_lock: + if self._connection_terminated and not self._events: + self.close() + + elif self._state == HTTPConnectionState.ACTIVE and not self._events: + self._state = HTTPConnectionState.IDLE + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + if self._used_all_stream_ids: # pragma: nocover + self.close() + + def close(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._h2_state.close_connection() + self._state = HTTPConnectionState.CLOSED + self._network_stream.close() + + # Wrappers around network read/write operations... + + def _read_incoming_data( + self, request: Request + ) -> typing.List[h2.events.Event]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + if self._read_exception is not None: + raise self._read_exception # pragma: nocover + + try: + data = self._network_stream.read(self.READ_NUM_BYTES, timeout) + if data == b"": + raise RemoteProtocolError("Server disconnected") + except Exception as exc: + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future reads. + # (For example, this means that a single read timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._read_exception = exc + self._connection_error = True + raise exc + + events: typing.List[h2.events.Event] = self._h2_state.receive_data(data) + + return events + + def _write_outgoing_data(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with self._write_lock: + data_to_send = self._h2_state.data_to_send() + + if self._write_exception is not None: + raise self._write_exception # pragma: nocover + + try: + self._network_stream.write(data_to_send, timeout) + except Exception as exc: # pragma: nocover + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future write. + # (For example, this means that a single write timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._write_exception = exc + self._connection_error = True + raise exc + + # Flow control... + + def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int: + """ + Returns the maximum allowable outgoing flow for a given stream. + + If the allowable flow is zero, then waits on the network until + WindowUpdated frames have increased the flow rate. + https://tools.ietf.org/html/rfc7540#section-6.9 + """ + local_flow: int = self._h2_state.local_flow_control_window(stream_id) + max_frame_size: int = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + while flow == 0: + self._receive_events(request) + local_flow = self._h2_state.local_flow_control_window(stream_id) + max_frame_size = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + return flow + + # Interface for connection pooling... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + return ( + self._state != HTTPConnectionState.CLOSED + and not self._connection_error + and not self._used_all_stream_ids + and not ( + self._h2_state.state_machine.state + == h2.connection.ConnectionState.CLOSED + ) + ) + + def has_expired(self) -> bool: + now = time.monotonic() + return self._expire_at is not None and now > self._expire_at + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/2, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> "HTTP2Connection": + return self + + def __exit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[types.TracebackType] = None, + ) -> None: + self.close() + + +class HTTP2ConnectionByteStream: + def __init__( + self, connection: HTTP2Connection, request: Request, stream_id: int + ) -> None: + self._connection = connection + self._request = request + self._stream_id = stream_id + self._closed = False + + def __iter__(self) -> typing.Iterator[bytes]: + kwargs = {"request": self._request, "stream_id": self._stream_id} + try: + with Trace("receive_response_body", logger, self._request, kwargs): + for chunk in self._connection._receive_response_body( + request=self._request, stream_id=self._stream_id + ): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + with ShieldCancellation(): + self.close() + raise exc + + def close(self) -> None: + if not self._closed: + self._closed = True + kwargs = {"stream_id": self._stream_id} + with Trace("response_closed", logger, self._request, kwargs): + self._connection._response_closed(stream_id=self._stream_id) diff --git a/parrot/lib/python3.10/site-packages/httpcore/_sync/http_proxy.py b/parrot/lib/python3.10/site-packages/httpcore/_sync/http_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..bb368dd42d559a6de6961c95b0cdef855b868c97 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_sync/http_proxy.py @@ -0,0 +1,350 @@ +import logging +import ssl +from base64 import b64encode +from typing import Iterable, List, Mapping, Optional, Sequence, Tuple, Union + +from .._backends.base import SOCKET_OPTION, NetworkBackend +from .._exceptions import ProxyError +from .._models import ( + URL, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, +) +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from .connection import HTTPConnection +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]] +HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]] + + +logger = logging.getLogger("httpcore.proxy") + + +def merge_headers( + default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, +) -> List[Tuple[bytes, bytes]]: + """ + Append default_headers and override_headers, de-duplicating if a key exists + in both cases. + """ + default_headers = [] if default_headers is None else list(default_headers) + override_headers = [] if override_headers is None else list(override_headers) + has_override = set(key.lower() for key, value in override_headers) + default_headers = [ + (key, value) + for key, value in default_headers + if key.lower() not in has_override + ] + return default_headers + override_headers + + +def build_auth_header(username: bytes, password: bytes) -> bytes: + userpass = username + b":" + password + return b"Basic " + b64encode(userpass) + + +class HTTPProxy(ConnectionPool): + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: Union[URL, bytes, str], + proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + ssl_context: Optional[ssl.SSLContext] = None, + max_connections: Optional[int] = 10, + max_keepalive_connections: Optional[int] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[NetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + proxy_auth: Any proxy authentication as a two-tuple of + (username, password). May be either bytes or ascii-only str. + proxy_headers: Any HTTP headers to use for the proxy requests. + For example `{"Proxy-Authorization": "Basic :"}`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + local_address=local_address, + uds=uds, + socket_options=socket_options, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + if proxy_auth is not None: + username = enforce_bytes(proxy_auth[0], name="proxy_auth") + password = enforce_bytes(proxy_auth[1], name="proxy_auth") + authorization = build_auth_header(username, password) + self._proxy_headers = [ + (b"Proxy-Authorization", authorization) + ] + self._proxy_headers + + def create_connection(self, origin: Origin) -> ConnectionInterface: + if origin.scheme == b"http": + return ForwardHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + keepalive_expiry=self._keepalive_expiry, + network_backend=self._network_backend, + ) + return TunnelHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class ForwardHTTPConnection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + keepalive_expiry: Optional[float] = None, + network_backend: Optional[NetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + self._connection = HTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ) + self._proxy_origin = proxy_origin + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._remote_origin = remote_origin + + def handle_request(self, request: Request) -> Response: + headers = merge_headers(self._proxy_headers, request.headers) + url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=bytes(request.url), + ) + proxy_request = Request( + method=request.method, + url=url, + headers=headers, + content=request.stream, + extensions=request.extensions, + ) + return self._connection.handle_request(proxy_request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + self._connection.close() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + +class TunnelHTTPConnection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + ssl_context: Optional[ssl.SSLContext] = None, + proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: Optional[NetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + self._connection: ConnectionInterface = HTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ) + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._ssl_context = ssl_context + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._connect_lock = Lock() + self._connected = False + + def handle_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + with self._connect_lock: + if not self._connected: + target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port) + + connect_url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=target, + ) + connect_headers = merge_headers( + [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers + ) + connect_request = Request( + method=b"CONNECT", + url=connect_url, + headers=connect_headers, + extensions=request.extensions, + ) + connect_response = self._connection.handle_request( + connect_request + ) + + if connect_response.status < 200 or connect_response.status > 299: + reason_bytes = connect_response.extensions.get("reason_phrase", b"") + reason_str = reason_bytes.decode("ascii", errors="ignore") + msg = "%d %s" % (connect_response.status, reason_str) + self._connection.close() + raise ProxyError(msg) + + stream = connect_response.extensions["network_stream"] + + # Upgrade the stream to SSL + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace("start_tls", logger, request, kwargs) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + + self._connected = True + return self._connection.handle_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + self._connection.close() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/parrot/lib/python3.10/site-packages/httpcore/_sync/interfaces.py b/parrot/lib/python3.10/site-packages/httpcore/_sync/interfaces.py new file mode 100644 index 0000000000000000000000000000000000000000..5e95be1ec72425178245c32c33874303e0906405 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_sync/interfaces.py @@ -0,0 +1,135 @@ +from contextlib import contextmanager +from typing import Iterator, Optional, Union + +from .._models import ( + URL, + Extensions, + HeaderTypes, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, + include_request_headers, +) + + +class RequestInterface: + def request( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: HeaderTypes = None, + content: Union[bytes, Iterator[bytes], None] = None, + extensions: Optional[Extensions] = None, + ) -> Response: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = self.handle_request(request) + try: + response.read() + finally: + response.close() + return response + + @contextmanager + def stream( + self, + method: Union[bytes, str], + url: Union[URL, bytes, str], + *, + headers: HeaderTypes = None, + content: Union[bytes, Iterator[bytes], None] = None, + extensions: Optional[Extensions] = None, + ) -> Iterator[Response]: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = self.handle_request(request) + try: + yield response + finally: + response.close() + + def handle_request(self, request: Request) -> Response: + raise NotImplementedError() # pragma: nocover + + +class ConnectionInterface(RequestInterface): + def close(self) -> None: + raise NotImplementedError() # pragma: nocover + + def info(self) -> str: + raise NotImplementedError() # pragma: nocover + + def can_handle_request(self, origin: Origin) -> bool: + raise NotImplementedError() # pragma: nocover + + def is_available(self) -> bool: + """ + Return `True` if the connection is currently able to accept an + outgoing request. + + An HTTP/1.1 connection will only be available if it is currently idle. + + An HTTP/2 connection will be available so long as the stream ID space is + not yet exhausted, and the connection is not in an error state. + + While the connection is being established we may not yet know if it is going + to result in an HTTP/1.1 or HTTP/2 connection. The connection should be + treated as being available, but might ultimately raise `NewConnectionRequired` + required exceptions if multiple requests are attempted over a connection + that ends up being established as HTTP/1.1. + """ + raise NotImplementedError() # pragma: nocover + + def has_expired(self) -> bool: + """ + Return `True` if the connection is in a state where it should be closed. + + This either means that the connection is idle and it has passed the + expiry time on its keep-alive, or that server has sent an EOF. + """ + raise NotImplementedError() # pragma: nocover + + def is_idle(self) -> bool: + """ + Return `True` if the connection is currently idle. + """ + raise NotImplementedError() # pragma: nocover + + def is_closed(self) -> bool: + """ + Return `True` if the connection has been closed. + + Used when a response is closed to determine if the connection may be + returned to the connection pool or not. + """ + raise NotImplementedError() # pragma: nocover diff --git a/parrot/lib/python3.10/site-packages/httpcore/_sync/socks_proxy.py b/parrot/lib/python3.10/site-packages/httpcore/_sync/socks_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..407351d06b21954cad45dca7d2065bf1d24d88fd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_sync/socks_proxy.py @@ -0,0 +1,340 @@ +import logging +import ssl +import typing + +from socksio import socks5 + +from .._backends.sync import SyncBackend +from .._backends.base import NetworkBackend, NetworkStream +from .._exceptions import ConnectionNotAvailable, ProxyError +from .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +logger = logging.getLogger("httpcore.socks") + + +AUTH_METHODS = { + b"\x00": "NO AUTHENTICATION REQUIRED", + b"\x01": "GSSAPI", + b"\x02": "USERNAME/PASSWORD", + b"\xff": "NO ACCEPTABLE METHODS", +} + +REPLY_CODES = { + b"\x00": "Succeeded", + b"\x01": "General SOCKS server failure", + b"\x02": "Connection not allowed by ruleset", + b"\x03": "Network unreachable", + b"\x04": "Host unreachable", + b"\x05": "Connection refused", + b"\x06": "TTL expired", + b"\x07": "Command not supported", + b"\x08": "Address type not supported", +} + + +def _init_socks5_connection( + stream: NetworkStream, + *, + host: bytes, + port: int, + auth: typing.Optional[typing.Tuple[bytes, bytes]] = None, +) -> None: + conn = socks5.SOCKS5Connection() + + # Auth method request + auth_method = ( + socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED + if auth is None + else socks5.SOCKS5AuthMethod.USERNAME_PASSWORD + ) + conn.send(socks5.SOCKS5AuthMethodsRequest([auth_method])) + outgoing_bytes = conn.data_to_send() + stream.write(outgoing_bytes) + + # Auth method response + incoming_bytes = stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5AuthReply) + if response.method != auth_method: + requested = AUTH_METHODS.get(auth_method, "UNKNOWN") + responded = AUTH_METHODS.get(response.method, "UNKNOWN") + raise ProxyError( + f"Requested {requested} from proxy server, but got {responded}." + ) + + if response.method == socks5.SOCKS5AuthMethod.USERNAME_PASSWORD: + # Username/password request + assert auth is not None + username, password = auth + conn.send(socks5.SOCKS5UsernamePasswordRequest(username, password)) + outgoing_bytes = conn.data_to_send() + stream.write(outgoing_bytes) + + # Username/password response + incoming_bytes = stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5UsernamePasswordReply) + if not response.success: + raise ProxyError("Invalid username/password") + + # Connect request + conn.send( + socks5.SOCKS5CommandRequest.from_address( + socks5.SOCKS5Command.CONNECT, (host, port) + ) + ) + outgoing_bytes = conn.data_to_send() + stream.write(outgoing_bytes) + + # Connect response + incoming_bytes = stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socks5.SOCKS5Reply) + if response.reply_code != socks5.SOCKS5ReplyCode.SUCCEEDED: + reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN") + raise ProxyError(f"Proxy Server could not connect: {reply_code}.") + + +class SOCKSProxy(ConnectionPool): + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: typing.Union[URL, bytes, str], + proxy_auth: typing.Optional[ + typing.Tuple[typing.Union[bytes, str], typing.Union[bytes, str]] + ] = None, + ssl_context: typing.Optional[ssl.SSLContext] = None, + max_connections: typing.Optional[int] = 10, + max_keepalive_connections: typing.Optional[int] = None, + keepalive_expiry: typing.Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + network_backend: typing.Optional[NetworkBackend] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + if proxy_auth is not None: + username, password = proxy_auth + username_bytes = enforce_bytes(username, name="proxy_auth") + password_bytes = enforce_bytes(password, name="proxy_auth") + self._proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = ( + username_bytes, + password_bytes, + ) + else: + self._proxy_auth = None + + def create_connection(self, origin: Origin) -> ConnectionInterface: + return Socks5Connection( + proxy_origin=self._proxy_url.origin, + remote_origin=origin, + proxy_auth=self._proxy_auth, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class Socks5Connection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = None, + ssl_context: typing.Optional[ssl.SSLContext] = None, + keepalive_expiry: typing.Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: typing.Optional[NetworkBackend] = None, + ) -> None: + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._proxy_auth = proxy_auth + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + + self._network_backend: NetworkBackend = ( + SyncBackend() if network_backend is None else network_backend + ) + self._connect_lock = Lock() + self._connection: typing.Optional[ConnectionInterface] = None + self._connect_failed = False + + def handle_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + with self._connect_lock: + if self._connection is None: + try: + # Connect to the proxy + kwargs = { + "host": self._proxy_origin.host.decode("ascii"), + "port": self._proxy_origin.port, + "timeout": timeout, + } + with Trace("connect_tcp", logger, request, kwargs) as trace: + stream = self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + + # Connect to the remote host using socks5 + kwargs = { + "stream": stream, + "host": self._remote_origin.host.decode("ascii"), + "port": self._remote_origin.port, + "auth": self._proxy_auth, + } + with Trace( + "setup_socks5_connection", logger, request, kwargs + ) as trace: + _init_socks5_connection(**kwargs) + trace.return_value = stream + + # Upgrade the stream to SSL + if self._remote_origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ( + ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ) + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace("start_tls", logger, request, kwargs) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or ( + self._http2 and not self._http1 + ): # pragma: nocover + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): # pragma: nocover + raise ConnectionNotAvailable() + + return self._connection.handle_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + if self._connection is not None: + self._connection.close() + + def is_available(self) -> bool: + if self._connection is None: # pragma: nocover + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._remote_origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: # pragma: nocover + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/parrot/lib/python3.10/site-packages/httpcore/_synchronization.py b/parrot/lib/python3.10/site-packages/httpcore/_synchronization.py new file mode 100644 index 0000000000000000000000000000000000000000..bae27c1b11255891997ae21c0f1c240f547a65a5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_synchronization.py @@ -0,0 +1,279 @@ +import threading +from types import TracebackType +from typing import Optional, Type + +import sniffio + +from ._exceptions import ExceptionMapping, PoolTimeout, map_exceptions + +# Our async synchronization primatives use either 'anyio' or 'trio' depending +# on if they're running under asyncio or trio. + +try: + import trio +except ImportError: # pragma: nocover + trio = None # type: ignore + +try: + import anyio +except ImportError: # pragma: nocover + anyio = None # type: ignore + + +class AsyncLock: + def __init__(self) -> None: + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a lock with the correct implementation. + """ + self._backend = sniffio.current_async_library() + if self._backend == "trio": + if trio is None: # pragma: nocover + raise RuntimeError( + "Running under trio, requires the 'trio' package to be installed." + ) + self._trio_lock = trio.Lock() + else: + if anyio is None: # pragma: nocover + raise RuntimeError( + "Running under asyncio requires the 'anyio' package to be installed." + ) + self._anyio_lock = anyio.Lock() + + async def __aenter__(self) -> "AsyncLock": + if not self._backend: + self.setup() + + if self._backend == "trio": + await self._trio_lock.acquire() + else: + await self._anyio_lock.acquire() + + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + if self._backend == "trio": + self._trio_lock.release() + else: + self._anyio_lock.release() + + +class AsyncEvent: + def __init__(self) -> None: + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a lock with the correct implementation. + """ + self._backend = sniffio.current_async_library() + if self._backend == "trio": + if trio is None: # pragma: nocover + raise RuntimeError( + "Running under trio requires the 'trio' package to be installed." + ) + self._trio_event = trio.Event() + else: + if anyio is None: # pragma: nocover + raise RuntimeError( + "Running under asyncio requires the 'anyio' package to be installed." + ) + self._anyio_event = anyio.Event() + + def set(self) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + self._trio_event.set() + else: + self._anyio_event.set() + + async def wait(self, timeout: Optional[float] = None) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + if trio is None: # pragma: nocover + raise RuntimeError( + "Running under trio requires the 'trio' package to be installed." + ) + + trio_exc_map: ExceptionMapping = {trio.TooSlowError: PoolTimeout} + timeout_or_inf = float("inf") if timeout is None else timeout + with map_exceptions(trio_exc_map): + with trio.fail_after(timeout_or_inf): + await self._trio_event.wait() + else: + if anyio is None: # pragma: nocover + raise RuntimeError( + "Running under asyncio requires the 'anyio' package to be installed." + ) + + anyio_exc_map: ExceptionMapping = {TimeoutError: PoolTimeout} + with map_exceptions(anyio_exc_map): + with anyio.fail_after(timeout): + await self._anyio_event.wait() + + +class AsyncSemaphore: + def __init__(self, bound: int) -> None: + self._bound = bound + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a semaphore with the correct implementation. + """ + self._backend = sniffio.current_async_library() + if self._backend == "trio": + if trio is None: # pragma: nocover + raise RuntimeError( + "Running under trio requires the 'trio' package to be installed." + ) + + self._trio_semaphore = trio.Semaphore( + initial_value=self._bound, max_value=self._bound + ) + else: + if anyio is None: # pragma: nocover + raise RuntimeError( + "Running under asyncio requires the 'anyio' package to be installed." + ) + + self._anyio_semaphore = anyio.Semaphore( + initial_value=self._bound, max_value=self._bound + ) + + async def acquire(self) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + await self._trio_semaphore.acquire() + else: + await self._anyio_semaphore.acquire() + + async def release(self) -> None: + if self._backend == "trio": + self._trio_semaphore.release() + else: + self._anyio_semaphore.release() + + +class AsyncShieldCancellation: + # For certain portions of our codebase where we're dealing with + # closing connections during exception handling we want to shield + # the operation from being cancelled. + # + # with AsyncShieldCancellation(): + # ... # clean-up operations, shielded from cancellation. + + def __init__(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a shielded scope with the correct implementation. + """ + self._backend = sniffio.current_async_library() + + if self._backend == "trio": + if trio is None: # pragma: nocover + raise RuntimeError( + "Running under trio requires the 'trio' package to be installed." + ) + + self._trio_shield = trio.CancelScope(shield=True) + else: + if anyio is None: # pragma: nocover + raise RuntimeError( + "Running under asyncio requires the 'anyio' package to be installed." + ) + + self._anyio_shield = anyio.CancelScope(shield=True) + + def __enter__(self) -> "AsyncShieldCancellation": + if self._backend == "trio": + self._trio_shield.__enter__() + else: + self._anyio_shield.__enter__() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + if self._backend == "trio": + self._trio_shield.__exit__(exc_type, exc_value, traceback) + else: + self._anyio_shield.__exit__(exc_type, exc_value, traceback) + + +# Our thread-based synchronization primitives... + + +class Lock: + def __init__(self) -> None: + self._lock = threading.Lock() + + def __enter__(self) -> "Lock": + self._lock.acquire() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + self._lock.release() + + +class Event: + def __init__(self) -> None: + self._event = threading.Event() + + def set(self) -> None: + self._event.set() + + def wait(self, timeout: Optional[float] = None) -> None: + if not self._event.wait(timeout=timeout): + raise PoolTimeout() # pragma: nocover + + +class Semaphore: + def __init__(self, bound: int) -> None: + self._semaphore = threading.Semaphore(value=bound) + + def acquire(self) -> None: + self._semaphore.acquire() + + def release(self) -> None: + self._semaphore.release() + + +class ShieldCancellation: + # Thread-synchronous codebases don't support cancellation semantics. + # We have this class because we need to mirror the async and sync + # cases within our package, but it's just a no-op. + def __enter__(self) -> "ShieldCancellation": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + pass diff --git a/parrot/lib/python3.10/site-packages/httpcore/_trace.py b/parrot/lib/python3.10/site-packages/httpcore/_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..b122a53e88f17e1e450f63b05ede3e28e8a7992a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_trace.py @@ -0,0 +1,105 @@ +import inspect +import logging +from types import TracebackType +from typing import Any, Dict, Optional, Type + +from ._models import Request + + +class Trace: + def __init__( + self, + name: str, + logger: logging.Logger, + request: Optional[Request] = None, + kwargs: Optional[Dict[str, Any]] = None, + ) -> None: + self.name = name + self.logger = logger + self.trace_extension = ( + None if request is None else request.extensions.get("trace") + ) + self.debug = self.logger.isEnabledFor(logging.DEBUG) + self.kwargs = kwargs or {} + self.return_value: Any = None + self.should_trace = self.debug or self.trace_extension is not None + self.prefix = self.logger.name.split(".")[-1] + + def trace(self, name: str, info: Dict[str, Any]) -> None: + if self.trace_extension is not None: + prefix_and_name = f"{self.prefix}.{name}" + ret = self.trace_extension(prefix_and_name, info) + if inspect.iscoroutine(ret): # pragma: no cover + raise TypeError( + "If you are using a synchronous interface, " + "the callback of the `trace` extension should " + "be a normal function instead of an asynchronous function." + ) + + if self.debug: + if not info or "return_value" in info and info["return_value"] is None: + message = name + else: + args = " ".join([f"{key}={value!r}" for key, value in info.items()]) + message = f"{name} {args}" + self.logger.debug(message) + + def __enter__(self) -> "Trace": + if self.should_trace: + info = self.kwargs + self.trace(f"{self.name}.started", info) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + if self.should_trace: + if exc_value is None: + info = {"return_value": self.return_value} + self.trace(f"{self.name}.complete", info) + else: + info = {"exception": exc_value} + self.trace(f"{self.name}.failed", info) + + async def atrace(self, name: str, info: Dict[str, Any]) -> None: + if self.trace_extension is not None: + prefix_and_name = f"{self.prefix}.{name}" + coro = self.trace_extension(prefix_and_name, info) + if not inspect.iscoroutine(coro): # pragma: no cover + raise TypeError( + "If you're using an asynchronous interface, " + "the callback of the `trace` extension should " + "be an asynchronous function rather than a normal function." + ) + await coro + + if self.debug: + if not info or "return_value" in info and info["return_value"] is None: + message = name + else: + args = " ".join([f"{key}={value!r}" for key, value in info.items()]) + message = f"{name} {args}" + self.logger.debug(message) + + async def __aenter__(self) -> "Trace": + if self.should_trace: + info = self.kwargs + await self.atrace(f"{self.name}.started", info) + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + if self.should_trace: + if exc_value is None: + info = {"return_value": self.return_value} + await self.atrace(f"{self.name}.complete", info) + else: + info = {"exception": exc_value} + await self.atrace(f"{self.name}.failed", info) diff --git a/parrot/lib/python3.10/site-packages/httpcore/_utils.py b/parrot/lib/python3.10/site-packages/httpcore/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..df5dea8fe472697afea4156d2916389e2f70d684 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/httpcore/_utils.py @@ -0,0 +1,36 @@ +import select +import socket +import sys +import typing + + +def is_socket_readable(sock: typing.Optional[socket.socket]) -> bool: + """ + Return whether a socket, as identifed by its file descriptor, is readable. + "A socket is readable" means that the read buffer isn't empty, i.e. that calling + .recv() on it would immediately return some data. + """ + # NOTE: we want check for readability without actually attempting to read, because + # we don't want to block forever if it's not readable. + + # In the case that the socket no longer exists, or cannot return a file + # descriptor, we treat it as being readable, as if it the next read operation + # on it is ready to return the terminating `b""`. + sock_fd = None if sock is None else sock.fileno() + if sock_fd is None or sock_fd < 0: # pragma: nocover + return True + + # The implementation below was stolen from: + # https://github.com/python-trio/trio/blob/20ee2b1b7376db637435d80e266212a35837ddcc/trio/_socket.py#L471-L478 + # See also: https://github.com/encode/httpcore/pull/193#issuecomment-703129316 + + # Use select.select on Windows, and when poll is unavailable and select.poll + # everywhere else. (E.g. When eventlet is in use. See #327) + if ( + sys.platform == "win32" or getattr(select, "poll", None) is None + ): # pragma: nocover + rready, _, _ = select.select([sock_fd], [], [], 0) + return bool(rready) + p = select.poll() + p.register(sock_fd, select.POLLIN) + return bool(p.poll(0)) diff --git a/parrot/lib/python3.10/site-packages/httpcore/py.typed b/parrot/lib/python3.10/site-packages/httpcore/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/METADATA b/parrot/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..ebb7d35931ef8c9fb39b52894a5c81f903a8aa7b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/METADATA @@ -0,0 +1,41 @@ +Metadata-Version: 2.1 +Name: imageio-ffmpeg +Version: 0.5.1 +Summary: FFMPEG wrapper for Python +Home-page: https://github.com/imageio/imageio-ffmpeg +Download-URL: http://pypi.python.org/pypi/imageio-ffmpeg +Author: imageio contributors +Author-email: almar.klein@gmail.com +License: BSD-2-Clause +Keywords: video ffmpeg +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Provides: imageio_ffmpeg +Requires-Python: >=3.5 +License-File: LICENSE +Requires-Dist: setuptools + +FFMPEG wrapper for Python. + +Note that the platform-specific wheels contain the binary executable +of ffmpeg, which makes this package around 60 MiB in size. +I guess that's the cost for being able to read/write video files. + +For Linux users: the above is not the case when installing via your +Linux package manager (if that is possible), because this package would +simply depend on ffmpeg in that case. diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg/__init__.py b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b79d867e2e6508a97ea4d27d8481afd561b1dd69 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/__init__.py @@ -0,0 +1,8 @@ +""" imageio_ffmpeg, FFMPEG wrapper for Python. +""" + +# flake8: noqa + +from ._definitions import __version__ +from ._io import count_frames_and_secs, read_frames, write_frames +from ._utils import get_ffmpeg_exe, get_ffmpeg_version diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_parsing.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_parsing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2144640c5b87cea209c0f5fbdbe9439fcc9b732d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_parsing.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_definitions.py b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_definitions.py new file mode 100644 index 0000000000000000000000000000000000000000..9f6e39c460cbe3917e2642dcfc4d6a046e70bc99 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_definitions.py @@ -0,0 +1,54 @@ +import platform +import struct +import sys + +__version__ = "0.5.1" + + +def get_platform(): + bits = struct.calcsize("P") * 8 + if sys.platform.startswith("linux"): + architecture = platform.machine() + if architecture == "aarch64": + return "linuxaarch64" + return "linux{}".format(bits) + elif sys.platform.startswith("freebsd"): + return "freebsd{}".format(bits) + elif sys.platform.startswith("win"): + return "win{}".format(bits) + elif sys.platform.startswith("cygwin"): + return "win{}".format(bits) + elif sys.platform.startswith("darwin"): + return "osx{}".format(bits) + else: # pragma: no cover + return None + + +# The Linux static builds (https://johnvansickle.com/ffmpeg/) are build +# for Linux kernels 2.6.32 and up (at the time of writing, ffmpeg v4.1). +# This corresponds to CentOS 6. This means we should use manylinux2010 and not +# manylinux1. +# manylinux1: https://www.python.org/dev/peps/pep-0513 +# manylinux2010: https://www.python.org/dev/peps/pep-0571 + + +# Platform string -> ffmpeg filename +FNAME_PER_PLATFORM = { + "osx64": "ffmpeg-osx64-v4.2.2", # 10.10+ + "win32": "ffmpeg-win32-v4.2.2.exe", # Windows 7+ + "win64": "ffmpeg-win64-v4.2.2.exe", + # "linux32": "ffmpeg-linux32-v4.2.2", + "linux64": "ffmpeg-linux64-v4.2.2", # Kernel 3.2.0+ + "linuxaarch64": "ffmpeg-linuxaarch64-v4.2.2", +} + +osxplats = "macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64" + +# Wheel tag -> platform string +WHEEL_BUILDS = { + "py3-none-manylinux2010_x86_64": "linux64", + "py3-none-manylinux2014_aarch64": "linuxaarch64", + "py3-none-" + osxplats: "osx64", + "py3-none-win32": "win32", + "py3-none-win_amd64": "win64", +} diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_io.py b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_io.py new file mode 100644 index 0000000000000000000000000000000000000000..faf1ee89ea5bca973e946114a18ef5701a82f42f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_io.py @@ -0,0 +1,693 @@ +import pathlib +import subprocess +import sys +import time +from collections import defaultdict +from functools import lru_cache + +from ._parsing import LogCatcher, cvsecs, parse_ffmpeg_header +from ._utils import _popen_kwargs, get_ffmpeg_exe, logger + +ISWIN = sys.platform.startswith("win") + +h264_encoder_preference = defaultdict(lambda: -1) +# The libx264 was the default encoder for a longe time with imageio +h264_encoder_preference["libx264"] = 100 + +# Encoder with the nvidia graphics card dedicated hardware +h264_encoder_preference["h264_nvenc"] = 90 +# Deprecated names for the same encoder +h264_encoder_preference["nvenc_h264"] = 90 +h264_encoder_preference["nvenc"] = 90 + +# vaapi provides hardware encoding with intel integrated graphics chipsets +h264_encoder_preference["h264_vaapi"] = 80 + +# openh264 is cisco's open source encoder +h264_encoder_preference["libopenh264"] = 70 + +h264_encoder_preference["libx264rgb"] = 50 + + +def ffmpeg_test_encoder(encoder): + # Use the null streams to validate if we can encode anything + # https://trac.ffmpeg.org/wiki/Null + cmd = [ + get_ffmpeg_exe(), + "-hide_banner", + "-f", + "lavfi", + "-i", + "nullsrc=s=256x256:d=8", + "-vcodec", + encoder, + "-f", + "null", + "-", + ] + p = subprocess.run( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + return p.returncode == 0 + + +def get_compiled_h264_encoders(): + cmd = [get_ffmpeg_exe(), "-hide_banner", "-encoders"] + p = subprocess.run( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout = p.stdout.decode().replace("\r", "") + # 2022/04/08: hmaarrfk + # I couldn't find a good way to get the list of available encoders from + # the ffmpeg command + # The ffmpeg command return a table that looks like + # Notice the leading space at the very beginning + # On ubuntu with libffmpeg-nvenc-dev we get + # $ ffmpeg -hide_banner -encoders | grep -i h.264 + # + # Encoders: + # V..... = Video + # A..... = Audio + # S..... = Subtitle + # .F.... = Frame-level multithreading + # ..S... = Slice-level multithreading + # ...X.. = Codec is experimental + # ....B. = Supports draw_horiz_band + # .....D = Supports direct rendering method 1 + # ------ + # V..... libx264 libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (codec h264) + # V..... libx264rgb libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 RGB (codec h264) + # V....D h264_nvenc NVIDIA NVENC H.264 encoder (codec h264) + # V..... h264_omx OpenMAX IL H.264 video encoder (codec h264) + # V..... h264_qsv H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (Intel Quick Sync Video acceleration) (codec h264) + # V..... h264_v4l2m2m V4L2 mem2mem H.264 encoder wrapper (codec h264) + # V....D h264_vaapi H.264/AVC (VAAPI) (codec h264) + # V..... nvenc NVIDIA NVENC H.264 encoder (codec h264) + # V..... nvenc_h264 NVIDIA NVENC H.264 encoder (codec h264) + # + # However, just because ffmpeg was compiled with the options enabled + # it doesn't mean that it will be successful + header_footer = stdout.split("------") + footer = header_footer[1].strip("\n") + encoders = [] + for line in footer.split("\n"): + # Strip to remove any leading spaces + line = line.strip() + encoder = line.split(" ")[1] + + if encoder in h264_encoder_preference: + # These encoders are known to support H.264 + # We forcibly include them in case their description changes to + # not include the string "H.264" + encoders.append(encoder) + elif (line[0] == "V") and ("H.264" in line): + encoders.append(encoder) + + encoders.sort(reverse=True, key=lambda x: h264_encoder_preference[x]) + if "h264_nvenc" in encoders: + # Remove deprecated names for the same encoder + for encoder in ["nvenc", "nvenc_h264"]: + if encoder in encoders: + encoders.remove(encoder) + # Return an immutable tuple to avoid users corrupting the lru_cache + return tuple(encoders) + + +@lru_cache() +def get_first_available_h264_encoder(): + compiled_encoders = get_compiled_h264_encoders() + for encoder in compiled_encoders: + if ffmpeg_test_encoder(encoder): + return encoder + else: + raise RuntimeError( + "No valid H.264 encoder was found with the ffmpeg installation" + ) + + +def count_frames_and_secs(path): + """ + Get the number of frames and number of seconds for the given video + file. Note that this operation can be quite slow for large files. + + Disclaimer: I've seen this produce different results from actually reading + the frames with older versions of ffmpeg (2.x). Therefore I cannot say + with 100% certainty that the returned values are always exact. + """ + # https://stackoverflow.com/questions/2017843/fetch-frame-count-with-ffmpeg + + if isinstance(path, pathlib.PurePath): + path = str(path) + if not isinstance(path, str): + raise TypeError("Video path must be a string or pathlib.Path.") + + cmd = [ + get_ffmpeg_exe(), + "-i", + path, + "-map", + "0:v:0", + "-vf", + "null", + "-f", + "null", + "-", + ] + try: + out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, **_popen_kwargs()) + except subprocess.CalledProcessError as err: + out = err.output.decode(errors="ignore") + raise RuntimeError( + "FFMPEG call failed with {}:\n{}".format(err.returncode, out) + ) + + # Note that other than with the subprocess calls below, ffmpeg wont hang here. + # Worst case Python will stop/crash and ffmpeg will continue running until done. + + nframes = nsecs = None + for line in reversed(out.splitlines()): + if line.startswith(b"frame="): + line = line.decode(errors="ignore") + i = line.find("frame=") + if i >= 0: + s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip() + nframes = int(s) + i = line.find("time=") + if i >= 0: + s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip() + nsecs = cvsecs(*s.split(":")) + return nframes, nsecs + + raise RuntimeError("Could not get number of frames") # pragma: no cover + + +def read_frames( + path, + pix_fmt="rgb24", + bpp=None, + input_params=None, + output_params=None, + bits_per_pixel=None, +): + """ + Create a generator to iterate over the frames in a video file. + + It first yields a small metadata dictionary that contains: + + * ffmpeg_version: the ffmpeg version in use (as a string). + * codec: a hint about the codec used to encode the video, e.g. "h264". + * source_size: the width and height of the encoded video frames. + * size: the width and height of the frames that will be produced. + * fps: the frames per second. Can be zero if it could not be detected. + * duration: duration in seconds. Can be zero if it could not be detected. + + After that, it yields frames until the end of the video is reached. Each + frame is a bytes object. + + This function makes no assumptions about the number of frames in + the data. For one because this is hard to predict exactly, but also + because it may depend on the provided output_params. If you want + to know the number of frames in a video file, use count_frames_and_secs(). + It is also possible to estimate the number of frames from the fps and + duration, but note that even if both numbers are present, the resulting + value is not always correct. + + Example: + + gen = read_frames(path) + meta = gen.__next__() + for frame in gen: + print(len(frame)) + + Parameters: + path (str): the filename of the file to read from. + pix_fmt (str): the pixel format of the frames to be read. + The default is "rgb24" (frames are uint8 RGB images). + input_params (list): Additional ffmpeg input command line parameters. + output_params (list): Additional ffmpeg output command line parameters. + bits_per_pixel (int): The number of bits per pixel in the output frames. + This depends on the given pix_fmt. Default is 24 (RGB) + bpp (int): DEPRECATED, USE bits_per_pixel INSTEAD. The number of bytes per pixel in the output frames. + This depends on the given pix_fmt. Some pixel formats like yuv420p have 12 bits per pixel + and cannot be set in bytes as integer. For this reason the bpp argument is deprecated. + """ + + # ----- Input args + + if isinstance(path, pathlib.PurePath): + path = str(path) + if not isinstance(path, str): + raise TypeError("Video path must be a string or pathlib.Path.") + # Note: Dont check whether it exists. The source could be e.g. a camera. + + pix_fmt = pix_fmt or "rgb24" + bpp = bpp or 3 + bits_per_pixel = bits_per_pixel or bpp * 8 + input_params = input_params or [] + output_params = output_params or [] + + assert isinstance(pix_fmt, str), "pix_fmt must be a string" + assert isinstance(bits_per_pixel, int), "bpp and bits_per_pixel must be an int" + assert isinstance(input_params, list), "input_params must be a list" + assert isinstance(output_params, list), "output_params must be a list" + + # ----- Prepare + + pre_output_params = ["-pix_fmt", pix_fmt, "-vcodec", "rawvideo", "-f", "image2pipe"] + + cmd = [get_ffmpeg_exe()] + cmd += input_params + ["-i", path] + cmd += pre_output_params + output_params + ["-"] + + process = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + **_popen_kwargs(prevent_sigint=True) + ) + + log_catcher = LogCatcher(process.stderr) + + # Init policy by which to terminate ffmpeg. May be set to "kill" later. + stop_policy = "timeout" # not wait; ffmpeg should be able to quit quickly + + # Enter try block directly after opening the process. + # We terminate ffmpeg in the finally clause. + # Generators are automatically closed when they get deleted, + # so the finally block is guaranteed to run. + try: + # ----- Load meta data + + # Wait for the log catcher to get the meta information + etime = time.time() + 10.0 + while log_catcher.is_alive() and not log_catcher.header and time.time() < etime: + time.sleep(0.01) + + # Check whether we have the information + if not log_catcher.header: + err2 = log_catcher.get_text(0.2) + fmt = "Could not load meta information\n=== stderr ===\n{}" + raise IOError(fmt.format(err2)) + elif "No such file or directory" in log_catcher.header: + raise IOError("{} not found! Wrong path?".format(path)) + + meta = parse_ffmpeg_header(log_catcher.header) + yield meta + + # ----- Read frames + + width, height = meta["size"] + framesize_bits = width * height * bits_per_pixel + framesize_bytes = framesize_bits / 8 + assert ( + framesize_bytes.is_integer() + ), "incorrect bits_per_pixel, framesize in bytes must be an int" + framesize_bytes = int(framesize_bytes) + framenr = 0 + + while True: + framenr += 1 + try: + bb = bytes() + while len(bb) < framesize_bytes: + extra_bytes = process.stdout.read(framesize_bytes - len(bb)) + if not extra_bytes: + if len(bb) == 0: + return + else: + raise RuntimeError( + "End of file reached before full frame could be read." + ) + bb += extra_bytes + yield bb + except Exception as err: + err1 = str(err) + err2 = log_catcher.get_text(0.4) + fmt = "Could not read frame {}:\n{}\n=== stderr ===\n{}" + raise RuntimeError(fmt.format(framenr, err1, err2)) + + except GeneratorExit: + # Note that GeneratorExit does not inherit from Exception but BaseException + pass + + except Exception: + # Normal exceptions fall through + raise + + except BaseException: + # Detect KeyboardInterrupt / SystemExit: don't wait for ffmpeg to quit + stop_policy = "kill" + raise + + finally: + # Stop the LogCatcher thread, which reads from stderr. + log_catcher.stop_me() + + # Make sure that ffmpeg is terminated. + if process.poll() is None: + # Ask ffmpeg to quit + try: + # I read somewhere that modern ffmpeg on Linux prefers a + # "ctrl-c", but tests so far suggests sending q is more robust. + # > p.send_signal(signal.SIGINT) + # Sending q via communicate works, but can hang (see #17) + # > p.communicate(b"q") + # So let's do similar to what communicate does, but without + # reading stdout (which may block). It looks like only closing + # stdout is enough (tried Windows+Linux), but let's play safe. + # Found that writing to stdin can cause "Invalid argument" on + # Windows # and "Broken Pipe" on Unix. + # p.stdin.write(b"q") # commented out in v0.4.1 + process.stdout.close() + process.stdin.close() + # p.stderr.close() -> not here, the log_catcher closes it + except Exception as err: # pragma: no cover + logger.warning("Error while attempting stop ffmpeg (r): " + str(err)) + + if stop_policy == "timeout": + # Wait until timeout, produce a warning and kill if it still exists + try: + etime = time.time() + 1.5 + while time.time() < etime and process.poll() is None: + time.sleep(0.01) + finally: + if process.poll() is None: # pragma: no cover + logger.warning("We had to kill ffmpeg to stop it.") + process.kill() + + else: # stop_policy == "kill" + # Just kill it + process.kill() + + +def write_frames( + path, + size, + pix_fmt_in="rgb24", + pix_fmt_out="yuv420p", + fps=16, + quality=5, + bitrate=None, + codec=None, + macro_block_size=16, + ffmpeg_log_level="warning", + ffmpeg_timeout=None, + input_params=None, + output_params=None, + audio_path=None, + audio_codec=None, +): + """ + Create a generator to write frames (bytes objects) into a video file. + + The frames are written by using the generator's `send()` method. Frames + can be anything that can be written to a file. Typically these are + bytes objects, but c-contiguous Numpy arrays also work. + + Example: + + gen = write_frames(path, size) + gen.send(None) # seed the generator + for frame in frames: + gen.send(frame) + gen.close() # don't forget this + + Parameters: + path (str): the filename to write to. + size (tuple): the width and height of the frames. + pix_fmt_in (str): the pixel format of incoming frames. + E.g. "gray", "gray8a", "rgb24", or "rgba". Default "rgb24". + pix_fmt_out (str): the pixel format to store frames. Default yuv420p". + fps (float): The frames per second. Default 16. + quality (float): A measure for quality between 0 and 10. Default 5. + Ignored if bitrate is given. + bitrate (str): The bitrate, e.g. "192k". The defaults are pretty good. + codec (str): The codec. Default "libx264" for .mp4 (if available from + the ffmpeg executable) or "msmpeg4" for .wmv. + macro_block_size (int): You probably want to align the size of frames + to this value to avoid image resizing. Default 16. Can be set + to 1 to avoid block alignment, though this is not recommended. + ffmpeg_log_level (str): The ffmpeg logging level. Default "warning". + ffmpeg_timeout (float): Timeout in seconds to wait for ffmpeg process + to finish. Value of 0 or None will wait forever (default). The time that + ffmpeg needs depends on CPU speed, compression, and frame size. + input_params (list): Additional ffmpeg input command line parameters. + output_params (list): Additional ffmpeg output command line parameters. + audio_path (str): A input file path for encoding with an audio stream. + Default None, no audio. + audio_codec (str): The audio codec to use if audio_path is provided. + "copy" will try to use audio_path's audio codec without re-encoding. + Default None, but some formats must have certain codecs specified. + """ + + # ----- Input args + + if isinstance(path, pathlib.PurePath): + path = str(path) + if not isinstance(path, str): + raise TypeError("Video path must be a string or pathlib.Path.") + + # The pix_fmt_out yuv420p is the best for the outpur to work in + # QuickTime and most other players. These players only support + # the YUV planar color space with 4:2:0 chroma subsampling for + # H.264 video. Otherwise, depending on the source, ffmpeg may + # output to a pixel format that may be incompatible with these + # players. See https://trac.ffmpeg.org/wiki/Encode/H.264#Encodingfordumbplayers + + pix_fmt_in = pix_fmt_in or "rgb24" + pix_fmt_out = pix_fmt_out or "yuv420p" + fps = fps or 16 + # bitrate, codec, macro_block_size can all be None or ... + macro_block_size = macro_block_size or 16 + ffmpeg_log_level = ffmpeg_log_level or "warning" + input_params = input_params or [] + output_params = output_params or [] + ffmpeg_timeout = ffmpeg_timeout or 0 + + floatish = float, int + if isinstance(size, (tuple, list)): + assert len(size) == 2, "size must be a 2-tuple" + assert isinstance(size[0], int) and isinstance( + size[1], int + ), "size must be ints" + sizestr = "{:d}x{:d}".format(*size) + # elif isinstance(size, str): + # assert "x" in size, "size as string must have format NxM" + # sizestr = size + else: + assert False, "size must be str or tuple" + assert isinstance(pix_fmt_in, str), "pix_fmt_in must be str" + assert isinstance(pix_fmt_out, str), "pix_fmt_out must be str" + assert isinstance(fps, floatish), "fps must be float" + if quality is not None: + assert isinstance(quality, floatish), "quality must be float" + assert 1 <= quality <= 10, "quality must be between 1 and 10 inclusive" + assert isinstance(macro_block_size, int), "macro_block_size must be int" + assert isinstance(ffmpeg_log_level, str), "ffmpeg_log_level must be str" + assert isinstance(ffmpeg_timeout, floatish), "ffmpeg_timeout must be float" + assert isinstance(input_params, list), "input_params must be a list" + assert isinstance(output_params, list), "output_params must be a list" + + # ----- Prepare + + # Get parameters + if not codec: + if path.lower().endswith(".wmv"): + # This is a safer default codec on windows to get videos that + # will play in powerpoint and other apps. H264 is not always + # available on windows. + codec = "msmpeg4" + else: + codec = get_first_available_h264_encoder() + + audio_params = ["-an"] + if audio_path is not None and not path.lower().endswith(".gif"): + audio_params = ["-i", audio_path] + if audio_codec is not None: + output_params += ["-acodec", audio_codec] + output_params += ["-map", "0:v:0", "-map", "1:a:0"] + + # Get command + cmd = [ + get_ffmpeg_exe(), + "-y", + "-f", + "rawvideo", + "-vcodec", + "rawvideo", + "-s", + sizestr, + ] + cmd += ["-pix_fmt", pix_fmt_in, "-r", "{:.02f}".format(fps)] + input_params + cmd += ["-i", "-"] + audio_params + cmd += ["-vcodec", codec, "-pix_fmt", pix_fmt_out] + + # Add fixed bitrate or variable bitrate compression flags + if bitrate is not None: + cmd += ["-b:v", str(bitrate)] + elif quality is not None: # If None, then we don't add anything + quality = 1 - quality / 10.0 + if codec == "libx264": + # crf ranges 0 to 51, 51 being worst. + quality = int(quality * 51) + cmd += ["-crf", str(quality)] # for h264 + else: # Many codecs accept q:v + # q:v range can vary, 1-31, 31 being worst + # But q:v does not always have the same range. + # May need a way to find range for any codec. + quality = int(quality * 30) + 1 + cmd += ["-qscale:v", str(quality)] # for others + + # Note, for most codecs, the image dimensions must be divisible by + # 16 the default for the macro_block_size is 16. Check if image is + # divisible, if not have ffmpeg upsize to nearest size and warn + # user they should correct input image if this is not desired. + if macro_block_size > 1: + if size[0] % macro_block_size > 0 or size[1] % macro_block_size > 0: + out_w = size[0] + out_h = size[1] + if size[0] % macro_block_size > 0: + out_w += macro_block_size - (size[0] % macro_block_size) + if size[1] % macro_block_size > 0: + out_h += macro_block_size - (size[1] % macro_block_size) + cmd += ["-vf", "scale={}:{}".format(out_w, out_h)] + logger.warning( + "IMAGEIO FFMPEG_WRITER WARNING: input image is not" + " divisible by macro_block_size={}, resizing from {} " + "to {} to ensure video compatibility with most codecs " + "and players. To prevent resizing, make your input " + "image divisible by the macro_block_size or set the " + "macro_block_size to 1 (risking incompatibility).".format( + macro_block_size, size[:2], (out_w, out_h) + ) + ) + + # Rather than redirect stderr to a pipe, just set minimal + # output from ffmpeg by default. That way if there are warnings + # the user will see them. + cmd += ["-v", ffmpeg_log_level] + cmd += output_params + cmd.append(path) + cmd_str = " ".join(cmd) + if any( + [level in ffmpeg_log_level for level in ("info", "verbose", "debug", "trace")] + ): + logger.info("RUNNING FFMPEG COMMAND: " + cmd_str) + + # Launch process + p = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=None, + **_popen_kwargs(prevent_sigint=True) + ) + + # Note that directing stderr to a pipe on windows will cause ffmpeg + # to hang if the buffer is not periodically cleared using + # StreamCatcher or other means. + # Setting bufsize to 0 or a small value does not seem to have much effect + # (tried on Windows and Linux). I suspect that ffmpeg buffers + # multiple frames (before encoding in a batch). + + # Init policy by which to terminate ffmpeg. May be set to "kill" later. + stop_policy = "timeout" + if not ffmpeg_timeout: + stop_policy = "wait" + + # ----- Write frames + + # Enter try block directly after opening the process. + # We terminate ffmpeg in the finally clause. + # Generators are automatically closed when they get deleted, + # so the finally block is guaranteed to run. + try: + # Just keep going until the generator.close() is called (raises GeneratorExit). + # This could also happen when the generator is deleted somehow. + nframes = 0 + while True: + # Get frame + bb = yield + + # framesize = size[0] * size[1] * depth * bpp + # assert isinstance(bb, bytes), "Frame must be send as bytes" + # assert len(bb) == framesize, "Frame must have width*height*depth*bpp bytes" + # Actually, we accept anything that can be written to file. + # This e.g. allows writing numpy arrays without having to make a copy ... + + # Write + try: + p.stdin.write(bb) + except Exception as err: + # Show the command and stderr from pipe + msg = ( + "{0:}\n\nFFMPEG COMMAND:\n{1:}\n\nFFMPEG STDERR " + "OUTPUT:\n".format(err, cmd_str) + ) + raise IOError(msg) + + nframes += 1 + + except GeneratorExit: + # Note that GeneratorExit does not inherit from Exception but BaseException + # Detect premature closing + if nframes == 0: + logger.warning("No frames have been written; the written video is invalid.") + + except Exception: + # Normal exceptions fall through + raise + + except BaseException: + # Detect KeyboardInterrupt / SystemExit: don't wait for ffmpeg to quit + stop_policy = "kill" + raise + + finally: + # Make sure that ffmpeg is terminated. + if p.poll() is None: + # Tell ffmpeg that we're done + try: + p.stdin.close() + except Exception as err: # pragma: no cover + logger.warning("Error while attempting stop ffmpeg (w): " + str(err)) + + if stop_policy == "timeout": + # Wait until timeout, produce a warning and kill if it still exists + try: + etime = time.time() + ffmpeg_timeout + while (time.time() < etime) and p.poll() is None: + time.sleep(0.01) + finally: + if p.poll() is None: # pragma: no cover + logger.warning( + "We had to kill ffmpeg to stop it. " + + "Consider increasing ffmpeg_timeout, " + + "or setting it to zero (no timeout)." + ) + p.kill() + + elif stop_policy == "wait": + # Wait forever, kill if it if we're interrupted + try: + while p.poll() is None: + time.sleep(0.01) + finally: # the above can raise e.g. by ctrl-c or systemexit + if p.poll() is None: # pragma: no cover + p.kill() + + else: # stop_policy == "kill": + # Just kill it + p.kill() + # Just to be safe, wrap in try/except + try: + p.stdout.close() + except Exception: + pass diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_parsing.py b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..8dd71b82e73dc54c3f1b575f403d632f85e8aa52 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_parsing.py @@ -0,0 +1,208 @@ +import re +import threading +import time + +from ._utils import logger + + +class LogCatcher(threading.Thread): + """Thread to keep reading from stderr so that the buffer does not + fill up and stalls the ffmpeg process. On stderr a message is send + on every few frames with some meta information. We only keep the + last ones. + """ + + def __init__(self, file): + self._file = file + self._header = "" + self._lines = [] + self._remainder = b"" + threading.Thread.__init__(self) + self.daemon = True # do not let this thread hold up Python shutdown + self._should_stop = False + self.start() + + def stop_me(self): + self._should_stop = True + + @property + def header(self): + """Get header text. Empty string if the header is not yet parsed.""" + return self._header + + def get_text(self, timeout=0): + """Get the whole text written to stderr so far. To preserve + memory, only the last 50 to 100 frames are kept. + + If a timeout is given, wait for this thread to finish. When + something goes wrong, we stop ffmpeg and want a full report of + stderr, but this thread might need a tiny bit more time. + """ + + # Wait? + if timeout > 0: + etime = time.time() + timeout + while self.is_alive() and time.time() < etime: # pragma: no cover + time.sleep(0.01) + # Return str + lines = b"\n".join(self._lines) + return self._header + "\n" + lines.decode("utf-8", "ignore") + + def run(self): + # Create ref here so it still exists even if Py is shutting down + limit_lines_local = limit_lines + + while not self._should_stop: + time.sleep(0) + # Read one line. Detect when closed, and exit + try: + line = self._file.read(20) + except ValueError: # pragma: no cover + break + if not line: + break + # Process to divide in lines + line = line.replace(b"\r", b"\n").replace(b"\n\n", b"\n") + lines = line.split(b"\n") + lines[0] = self._remainder + lines[0] + self._remainder = lines.pop(-1) + # Process each line + self._lines.extend(lines) + if not self._header: + if get_output_video_line(self._lines): + header = b"\n".join(self._lines) + self._header += header.decode("utf-8", "ignore") + elif self._lines: + self._lines = limit_lines_local(self._lines) + + # Close the file when we're done + # See #61 and #69 + try: + self._file.close() + except Exception: + pass + + +def get_output_video_line(lines): + """Get the line that defines the video stream that ffmpeg outputs, + and which we read. + """ + in_output = False + for line in lines: + sline = line.lstrip() + if sline.startswith(b"Output "): + in_output = True + elif in_output: + if sline.startswith(b"Stream ") and b" Video:" in sline: + return line + + +def limit_lines(lines, N=32): + """When number of lines > 2*N, reduce to N.""" + if len(lines) > 2 * N: + lines = [b"... showing only last few lines ..."] + lines[-N:] + return lines + + +def cvsecs(*args): + """converts a time to second. Either cvsecs(min, secs) or + cvsecs(hours, mins, secs). + """ + if len(args) == 1: + return float(args[0]) + elif len(args) == 2: + return 60 * float(args[0]) + float(args[1]) + elif len(args) == 3: + return 3600 * float(args[0]) + 60 * float(args[1]) + float(args[2]) + + +def parse_ffmpeg_header(text): + lines = text.splitlines() + meta = {} + + # meta["header"] = text # Can enable this for debugging + + # Get version + ver = lines[0].split("version", 1)[-1].split("Copyright")[0] + meta["ffmpeg_version"] = ver.strip() + " " + lines[1].strip() + + # get the output line that speaks about video + videolines = [ + l for l in lines if l.lstrip().startswith("Stream ") and " Video: " in l + ] + + # Codec and pix_fmt hint + line = videolines[0] + meta["codec"] = line.split("Video: ", 1)[-1].lstrip().split(" ", 1)[0].strip() + meta["pix_fmt"] = re.split( + # use a negative lookahead regexp to ignore commas that are contained + # within a parenthesis + # this helps consider a pix_fmt of the kind + # yuv420p(tv, progressive) + # as what it is, instead of erroneously reporting as + # yuv420p(tv + r",\s*(?![^()]*\))", + line.split("Video: ", 1)[-1], + )[1].strip() + + # get the output line that speaks about audio + audiolines = [ + l for l in lines if l.lstrip().startswith("Stream ") and " Audio: " in l + ] + + if len(audiolines) > 0: + audio_line = audiolines[0] + meta["audio_codec"] = ( + audio_line.split("Audio: ", 1)[-1].lstrip().split(" ", 1)[0].strip() + ) + + # get the frame rate. + # matches can be empty, see #171, assume nframes = inf + # the regexp omits values of "1k tbr" which seems a specific edge-case #262 + # it seems that tbr is generally to be preferred #262 + fps = 0 + for line in [videolines[0]]: + matches = re.findall(r" ([0-9]+\.?[0-9]*) (fps)", line) + if matches: + fps = float(matches[0][0].strip()) + meta["fps"] = fps + + # get the size of the original stream, of the form 460x320 (w x h) + line = videolines[0] + match = re.search(" [0-9]*x[0-9]*(,| )", line) + parts = line[match.start() : match.end() - 1].split("x") + meta["source_size"] = tuple(map(int, parts)) + + # get the size of what we receive, of the form 460x320 (w x h) + line = videolines[-1] # Pipe output + match = re.search(" [0-9]*x[0-9]*(,| )", line) + parts = line[match.start() : match.end() - 1].split("x") + meta["size"] = tuple(map(int, parts)) + + # Check the two sizes + if meta["source_size"] != meta["size"]: + logger.warning( + "The frame size for reading {} is " + "different from the source frame size {}.".format( + meta["size"], meta["source_size"] + ) + ) + + # get the rotate metadata + reo_rotate = re.compile(r"rotate\s+:\s([0-9]+)") + match = reo_rotate.search(text) + rotate = 0 + if match is not None: + rotate = match.groups()[0] + meta["rotate"] = int(rotate) + + # get duration (in seconds) + line = [l for l in lines if "Duration: " in l][0] + match = re.search(" [0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]", line) + duration = 0 + if match is not None: + hms = line[match.start() + 1 : match.end()].split(":") + duration = cvsecs(*hms) + meta["duration"] = duration + + return meta diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_utils.py b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..be6f916ab5739aec1e2ebf25a82a1fe993011d85 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/_utils.py @@ -0,0 +1,127 @@ +import logging +import os +import subprocess +import sys +from functools import lru_cache +import importlib.resources + +from ._definitions import FNAME_PER_PLATFORM, get_platform + +logger = logging.getLogger("imageio_ffmpeg") + + +def get_ffmpeg_exe(): + """ + Get the ffmpeg executable file. This can be the binary defined by + the IMAGEIO_FFMPEG_EXE environment variable, the binary distributed + with imageio-ffmpeg, an ffmpeg binary installed with conda, or the + system ffmpeg (in that order). A RuntimeError is raised if no valid + ffmpeg could be found. + """ + + # 1. Try environment variable. - Dont test it: the user is explicit here! + exe = os.getenv("IMAGEIO_FFMPEG_EXE", None) + if exe: + return exe + + # Auto-detect + exe = _get_ffmpeg_exe() + if exe: + return exe + + # Nothing was found + raise RuntimeError( + "No ffmpeg exe could be found. Install ffmpeg on your system, " + "or set the IMAGEIO_FFMPEG_EXE environment variable." + ) + + +@lru_cache() +def _get_ffmpeg_exe(): + plat = get_platform() + + # 2. Try from here + exe = os.path.join(_get_bin_dir(), FNAME_PER_PLATFORM.get(plat, "")) + if exe and os.path.isfile(exe) and _is_valid_exe(exe): + return exe + + # 3. Try binary from conda package + # (installed e.g. via `conda install ffmpeg -c conda-forge`) + if plat.startswith("win"): + exe = os.path.join(sys.prefix, "Library", "bin", "ffmpeg.exe") + else: + exe = os.path.join(sys.prefix, "bin", "ffmpeg") + if exe and os.path.isfile(exe) and _is_valid_exe(exe): + return exe + + # 4. Try system ffmpeg command + exe = "ffmpeg" + if _is_valid_exe(exe): + return exe + + return None + + +def _get_bin_dir(): + if sys.version_info < (3, 9): + context = importlib.resources.path("imageio_ffmpeg.binaries", "__init__.py") + else: + ref = importlib.resources.files("imageio_ffmpeg.binaries") / "__init__.py" + context = importlib.resources.as_file(ref) + with context as path: + pass + # Return the dir. We assume that the data files are on a normal dir on the fs. + return str(path.parent) + + +def _popen_kwargs(prevent_sigint=False): + startupinfo = None + preexec_fn = None + creationflags = 0 + if sys.platform.startswith("win"): + # Stops executable from flashing on Windows (see #22) + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + if prevent_sigint: + # Prevent propagation of sigint (see #4) + # https://stackoverflow.com/questions/5045771 + if sys.platform.startswith("win"): + creationflags = 0x00000200 + else: + preexec_fn = os.setpgrp # the _pre_exec does not seem to work + + falsy = ("", "0", "false", "no") + if os.getenv("IMAGEIO_FFMPEG_NO_PREVENT_SIGINT", "").lower() not in falsy: + # Unset preexec_fn to work around a strange hang on fork() (see #58) + preexec_fn = None + + return { + "startupinfo": startupinfo, + "creationflags": creationflags, + "preexec_fn": preexec_fn, + } + + +def _is_valid_exe(exe): + cmd = [exe, "-version"] + try: + with open(os.devnull, "w") as null: + subprocess.check_call( + cmd, stdout=null, stderr=subprocess.STDOUT, **_popen_kwargs() + ) + return True + except (OSError, ValueError, subprocess.CalledProcessError): + return False + + +def get_ffmpeg_version(): + """ + Get the version of the used ffmpeg executable (as a string). + """ + exe = get_ffmpeg_exe() + line = subprocess.check_output([exe, "-version"], **_popen_kwargs()).split( + b"\n", 1 + )[0] + line = line.decode(errors="ignore").strip() + version = line.split("version", 1)[-1].lstrip().split(" ", 1)[0].strip() + return version diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg/binaries/README.md b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/binaries/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4f941f06087440f2d302916f7c53b394876d2dc1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/binaries/README.md @@ -0,0 +1 @@ +Exes are dropped here by the release script. diff --git a/parrot/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc b/parrot/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d22e3457916b65329eb6175e8058ee50f906003c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66408aab485cdc2a6e07bb181b58fa7bd47d110a716baaadf355cb1d5e744d41 +size 108384 diff --git a/parrot/lib/python3.10/site-packages/tests/slow/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tests/slow/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c014c7148ab3471fe4dbb3d9943696cdda90601 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tests/slow/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tests/slow/__pycache__/test_dpo_slow.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tests/slow/__pycache__/test_dpo_slow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a08601597961dbaa2c32aaeb1b3947b6ea914b0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tests/slow/__pycache__/test_dpo_slow.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tests/slow/__pycache__/testing_constants.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tests/slow/__pycache__/testing_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c74c13d47e3e78873bb7fea56cad45565ae13410 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tests/slow/__pycache__/testing_constants.cpython-310.pyc differ