diff --git a/.gitattributes b/.gitattributes index c3ef1539cd6e5610ecb173fd0448181b2512c39a..f8444a7fcb131964e7290d6eb9f797d6c59e47a7 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1695,3 +1695,4 @@ vllm/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophant parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 b/parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 new file mode 100644 index 0000000000000000000000000000000000000000..b5c8746104e2617fa4ba62bc59cc1df4f1a45edc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79b37a526b50d6ebcd2255983198276718c29c0942d1fde96306e413041e01cb +size 3075448 diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..69dd431440c7266e26056b61d7bae98be2550957 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so differ diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/_measurements.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/_measurements.py new file mode 100644 index 0000000000000000000000000000000000000000..bcd83df42be3708231870cf5eff977a6388cc3a1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/_measurements.py @@ -0,0 +1,1680 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np +from . import _ni_support +from . import _ni_label +from . import _nd_image +from . import _morphology + +__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean', + 'variance', 'standard_deviation', 'minimum', 'maximum', 'median', + 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass', + 'histogram', 'watershed_ift', 'sum_labels', 'value_indices'] + + +def label(input, structure=None, output=None): + """ + Label features in an array. + + Parameters + ---------- + input : array_like + An array-like object to be labeled. Any non-zero values in `input` are + counted as features and zero values are considered the background. + structure : array_like, optional + A structuring element that defines feature connections. + `structure` must be centrosymmetric + (see Notes). + If no structuring element is provided, + one is automatically generated with a squared connectivity equal to + one. That is, for a 2-D `input` array, the default structuring element + is:: + + [[0,1,0], + [1,1,1], + [0,1,0]] + + output : (None, data-type, array_like), optional + If `output` is a data type, it specifies the type of the resulting + labeled feature array. + If `output` is an array-like object, then `output` will be updated + with the labeled features from this function. This function can + operate in-place, by passing output=input. + Note that the output must be able to store the largest label, or this + function will raise an Exception. + + Returns + ------- + label : ndarray or int + An integer ndarray where each unique feature in `input` has a unique + label in the returned array. + num_features : int + How many objects were found. + + If `output` is None, this function returns a tuple of + (`labeled_array`, `num_features`). + + If `output` is a ndarray, then it will be updated with values in + `labeled_array` and only `num_features` will be returned by this + function. + + See Also + -------- + find_objects : generate a list of slices for the labeled features (or + objects); useful for finding features' position or + dimensions + + Notes + ----- + A centrosymmetric matrix is a matrix that is symmetric about the center. + See [1]_ for more information. + + The `structure` matrix must be centrosymmetric to ensure + two-way connections. + For instance, if the `structure` matrix is not centrosymmetric + and is defined as:: + + [[0,1,0], + [1,1,0], + [0,0,0]] + + and the `input` is:: + + [[1,2], + [0,3]] + + then the structure matrix would indicate the + entry 2 in the input is connected to 1, + but 1 is not connected to 2. + + References + ---------- + .. [1] James R. Weaver, "Centrosymmetric (cross-symmetric) + matrices, their basic properties, eigenvalues, and + eigenvectors." The American Mathematical Monthly 92.10 + (1985): 711-717. + + Examples + -------- + Create an image with some features, then label it using the default + (cross-shaped) structuring element: + + >>> from scipy.ndimage import label, generate_binary_structure + >>> import numpy as np + >>> a = np.array([[0,0,1,1,0,0], + ... [0,0,0,1,0,0], + ... [1,1,0,0,1,0], + ... [0,0,0,1,0,0]]) + >>> labeled_array, num_features = label(a) + + Each of the 4 features are labeled with a different integer: + + >>> num_features + 4 + >>> labeled_array + array([[0, 0, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [2, 2, 0, 0, 3, 0], + [0, 0, 0, 4, 0, 0]]) + + Generate a structuring element that will consider features connected even + if they touch diagonally: + + >>> s = generate_binary_structure(2,2) + + or, + + >>> s = [[1,1,1], + ... [1,1,1], + ... [1,1,1]] + + Label the image using the new structuring element: + + >>> labeled_array, num_features = label(a, structure=s) + + Show the 2 labeled features (note that features 1, 3, and 4 from above are + now considered a single feature): + + >>> num_features + 2 + >>> labeled_array + array([[0, 0, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [2, 2, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0]]) + + """ + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + if structure is None: + structure = _morphology.generate_binary_structure(input.ndim, 1) + structure = np.asarray(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have equal rank') + for ii in structure.shape: + if ii != 3: + raise ValueError('structure dimensions must be equal to 3') + + # Use 32 bits if it's large enough for this image. + # _ni_label.label() needs two entries for background and + # foreground tracking + need_64bits = input.size >= (2**31 - 2) + + if isinstance(output, np.ndarray): + if output.shape != input.shape: + raise ValueError("output shape not correct") + caller_provided_output = True + else: + caller_provided_output = False + if output is None: + output = np.empty(input.shape, np.intp if need_64bits else np.int32) + else: + output = np.empty(input.shape, output) + + # handle scalars, 0-D arrays + if input.ndim == 0 or input.size == 0: + if input.ndim == 0: + # scalar + maxlabel = 1 if (input != 0) else 0 + output[...] = maxlabel + else: + # 0-D + maxlabel = 0 + if caller_provided_output: + return maxlabel + else: + return output, maxlabel + + try: + max_label = _ni_label._label(input, structure, output) + except _ni_label.NeedMoreBits as e: + # Make another attempt with enough bits, then try to cast to the + # new type. + tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32) + max_label = _ni_label._label(input, structure, tmp_output) + output[...] = tmp_output[...] + if not np.all(output == tmp_output): + # refuse to return bad results + raise RuntimeError( + "insufficient bit-depth in requested output type" + ) from e + + if caller_provided_output: + # result was written in-place + return max_label + else: + return output, max_label + + +def find_objects(input, max_label=0): + """ + Find objects in a labeled array. + + Parameters + ---------- + input : ndarray of ints + Array containing objects defined by different labels. Labels with + value 0 are ignored. + max_label : int, optional + Maximum label to be searched for in `input`. If max_label is not + given, the positions of all objects are returned. + + Returns + ------- + object_slices : list of tuples + A list of tuples, with each tuple containing N slices (with N the + dimension of the input array). Slices correspond to the minimal + parallelepiped that contains the object. If a number is missing, + None is returned instead of a slice. The label ``l`` corresponds to + the index ``l-1`` in the returned list. + + See Also + -------- + label, center_of_mass + + Notes + ----- + This function is very useful for isolating a volume of interest inside + a 3-D array, that cannot be "seen through". + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((6,6), dtype=int) + >>> a[2:4, 2:4] = 1 + >>> a[4, 4] = 1 + >>> a[:2, :3] = 2 + >>> a[0, 5] = 3 + >>> a + array([[2, 2, 2, 0, 0, 3], + [2, 2, 2, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0]]) + >>> ndimage.find_objects(a) + [(slice(2, 5, None), slice(2, 5, None)), + (slice(0, 2, None), slice(0, 3, None)), + (slice(0, 1, None), slice(5, 6, None))] + >>> ndimage.find_objects(a, max_label=2) + [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))] + >>> ndimage.find_objects(a == 1, max_label=2) + [(slice(2, 5, None), slice(2, 5, None)), None] + + >>> loc = ndimage.find_objects(a)[0] + >>> a[loc] + array([[1, 1, 0], + [1, 1, 0], + [0, 0, 1]]) + + """ + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + + if max_label < 1: + max_label = input.max() + + return _nd_image.find_objects(input, max_label) + + +def value_indices(arr, *, ignore_value=None): + """ + Find indices of each distinct value in given array. + + Parameters + ---------- + arr : ndarray of ints + Array containing integer values. + ignore_value : int, optional + This value will be ignored in searching the `arr` array. If not + given, all values found will be included in output. Default + is None. + + Returns + ------- + indices : dictionary + A Python dictionary of array indices for each distinct value. The + dictionary is keyed by the distinct values, the entries are array + index tuples covering all occurrences of the value within the + array. + + This dictionary can occupy significant memory, usually several times + the size of the input array. + + See Also + -------- + label, maximum, median, minimum_position, extrema, sum, mean, variance, + standard_deviation, numpy.where, numpy.unique + + Notes + ----- + For a small array with few distinct values, one might use + `numpy.unique()` to find all possible values, and ``(arr == val)`` to + locate each value within that array. However, for large arrays, + with many distinct values, this can become extremely inefficient, + as locating each value would require a new search through the entire + array. Using this function, there is essentially one search, with + the indices saved for all distinct values. + + This is useful when matching a categorical image (e.g. a segmentation + or classification) to an associated image of other data, allowing + any per-class statistic(s) to then be calculated. Provides a + more flexible alternative to functions like ``scipy.ndimage.mean()`` + and ``scipy.ndimage.variance()``. + + Some other closely related functionality, with different strengths and + weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and + the `scikit-image `_ function + ``skimage.measure.regionprops()``. + + Note for IDL users: this provides functionality equivalent to IDL's + REVERSE_INDICES option (as per the IDL documentation for the + `HISTOGRAM `_ + function). + + .. versionadded:: 1.10.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import ndimage + >>> a = np.zeros((6, 6), dtype=int) + >>> a[2:4, 2:4] = 1 + >>> a[4, 4] = 1 + >>> a[:2, :3] = 2 + >>> a[0, 5] = 3 + >>> a + array([[2, 2, 2, 0, 0, 3], + [2, 2, 2, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0]]) + >>> val_indices = ndimage.value_indices(a) + + The dictionary `val_indices` will have an entry for each distinct + value in the input array. + + >>> val_indices.keys() + dict_keys([np.int64(0), np.int64(1), np.int64(2), np.int64(3)]) + + The entry for each value is an index tuple, locating the elements + with that value. + + >>> ndx1 = val_indices[1] + >>> ndx1 + (array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4])) + + This can be used to index into the original array, or any other + array with the same shape. + + >>> a[ndx1] + array([1, 1, 1, 1, 1]) + + If the zeros were to be ignored, then the resulting dictionary + would no longer have an entry for zero. + + >>> val_indices = ndimage.value_indices(a, ignore_value=0) + >>> val_indices.keys() + dict_keys([np.int64(1), np.int64(2), np.int64(3)]) + + """ + # Cope with ignore_value being None, without too much extra complexity + # in the C code. If not None, the value is passed in as a numpy array + # with the same dtype as arr. + ignore_value_arr = np.zeros((1,), dtype=arr.dtype) + ignoreIsNone = (ignore_value is None) + if not ignoreIsNone: + ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value) + + val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr) + return val_indices + + +def labeled_comprehension(input, labels, index, func, out_dtype, default, + pass_positions=False): + """ + Roughly equivalent to [func(input[labels == i]) for i in index]. + + Sequentially applies an arbitrary function (that works on array_like input) + to subsets of an N-D image array specified by `labels` and `index`. + The option exists to provide the function with positional parameters as the + second argument. + + Parameters + ---------- + input : array_like + Data from which to select `labels` to process. + labels : array_like or None + Labels to objects in `input`. + If not None, array must be same shape as `input`. + If None, `func` is applied to raveled `input`. + index : int, sequence of ints or None + Subset of `labels` to which to apply `func`. + If a scalar, a single value is returned. + If None, `func` is applied to all non-zero values of `labels`. + func : callable + Python function to apply to `labels` from `input`. + out_dtype : dtype + Dtype to use for `result`. + default : int, float or None + Default return value when a element of `index` does not exist + in `labels`. + pass_positions : bool, optional + If True, pass linear indices to `func` as a second argument. + Default is False. + + Returns + ------- + result : ndarray + Result of applying `func` to each of `labels` to `input` in `index`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> lbl, nlbl = ndimage.label(a) + >>> lbls = np.arange(1, nlbl+1) + >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0) + array([ 2.75, 5.5 , 6. ]) + + Falling back to `default`: + + >>> lbls = np.arange(1, nlbl+2) + >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1) + array([ 2.75, 5.5 , 6. , -1. ]) + + Passing positions: + + >>> def fn(val, pos): + ... print("fn says: %s : %s" % (val, pos)) + ... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum()) + ... + >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True) + fn says: [1 2 5 3] : [0 1 4 5] + fn says: [4 7] : [ 7 11] + fn says: [9 3] : [12 13] + array([ 11., 11., -12., 0.]) + + """ + + as_scalar = np.isscalar(index) + input = np.asarray(input) + + if pass_positions: + positions = np.arange(input.size).reshape(input.shape) + + if labels is None: + if index is not None: + raise ValueError("index without defined labels") + if not pass_positions: + return func(input.ravel()) + else: + return func(input.ravel(), positions.ravel()) + + try: + input, labels = np.broadcast_arrays(input, labels) + except ValueError as e: + raise ValueError("input and labels must have the same shape " + "(excepting dimensions with width 1)") from e + + if index is None: + if not pass_positions: + return func(input[labels > 0]) + else: + return func(input[labels > 0], positions[labels > 0]) + + index = np.atleast_1d(index) + if np.any(index.astype(labels.dtype).astype(index.dtype) != index): + raise ValueError(f"Cannot convert index values from <{index.dtype}> to " + f"<{labels.dtype}> (labels' type) without loss of precision") + + index = index.astype(labels.dtype) + + # optimization: find min/max in index, + # and select those parts of labels, input, and positions + lo = index.min() + hi = index.max() + mask = (labels >= lo) & (labels <= hi) + + # this also ravels the arrays + labels = labels[mask] + input = input[mask] + if pass_positions: + positions = positions[mask] + + # sort everything by labels + label_order = labels.argsort() + labels = labels[label_order] + input = input[label_order] + if pass_positions: + positions = positions[label_order] + + index_order = index.argsort() + sorted_index = index[index_order] + + def do_map(inputs, output): + """labels must be sorted""" + nidx = sorted_index.size + + # Find boundaries for each stretch of constant labels + # This could be faster, but we already paid N log N to sort labels. + lo = np.searchsorted(labels, sorted_index, side='left') + hi = np.searchsorted(labels, sorted_index, side='right') + + for i, l, h in zip(range(nidx), lo, hi): + if l == h: + continue + output[i] = func(*[inp[l:h] for inp in inputs]) + + temp = np.empty(index.shape, out_dtype) + temp[:] = default + if not pass_positions: + do_map([input], temp) + else: + do_map([input, positions], temp) + + output = np.zeros(index.shape, out_dtype) + output[index_order] = temp + if as_scalar: + output = output[0] + + return output + + +def _safely_castable_to_int(dt): + """Test whether the NumPy data type `dt` can be safely cast to an int.""" + int_size = np.dtype(int).itemsize + safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or + (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size)) + return safe + + +def _stats(input, labels=None, index=None, centered=False): + """Count, sum, and optionally compute (sum - centre)^2 of input by label + + Parameters + ---------- + input : array_like, N-D + The input data to be analyzed. + labels : array_like (N-D), optional + The labels of the data in `input`. This array must be broadcast + compatible with `input`; typically, it is the same shape as `input`. + If `labels` is None, all nonzero values in `input` are treated as + the single labeled group. + index : label or sequence of labels, optional + These are the labels of the groups for which the stats are computed. + If `index` is None, the stats are computed for the single group where + `labels` is greater than 0. + centered : bool, optional + If True, the centered sum of squares for each labeled group is + also returned. Default is False. + + Returns + ------- + counts : int or ndarray of ints + The number of elements in each labeled group. + sums : scalar or ndarray of scalars + The sums of the values in each labeled group. + sums_c : scalar or ndarray of scalars, optional + The sums of mean-centered squares of the values in each labeled group. + This is only returned if `centered` is True. + + """ + def single_group(vals): + if centered: + vals_c = vals - vals.mean() + return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum() + else: + return vals.size, vals.sum() + + if labels is None: + return single_group(input) + + # ensure input and labels match sizes + input, labels = np.broadcast_arrays(input, labels) + + if index is None: + return single_group(input[labels > 0]) + + if np.isscalar(index): + return single_group(input[labels == index]) + + def _sum_centered(labels): + # `labels` is expected to be an ndarray with the same shape as `input`. + # It must contain the label indices (which are not necessarily the labels + # themselves). + means = sums / counts + centered_input = input - means[labels] + # bincount expects 1-D inputs, so we ravel the arguments. + bc = np.bincount(labels.ravel(), + weights=(centered_input * + centered_input.conjugate()).ravel()) + return bc + + # Remap labels to unique integers if necessary, or if the largest + # label is larger than the number of values. + + if (not _safely_castable_to_int(labels.dtype) or + labels.min() < 0 or labels.max() > labels.size): + # Use np.unique to generate the label indices. `new_labels` will + # be 1-D, but it should be interpreted as the flattened N-D array of + # label indices. + unique_labels, new_labels = np.unique(labels, return_inverse=True) + new_labels = np.reshape(new_labels, (-1,)) # flatten, since it may be >1-D + counts = np.bincount(new_labels) + sums = np.bincount(new_labels, weights=input.ravel()) + if centered: + # Compute the sum of the mean-centered squares. + # We must reshape new_labels to the N-D shape of `input` before + # passing it _sum_centered. + sums_c = _sum_centered(new_labels.reshape(labels.shape)) + idxs = np.searchsorted(unique_labels, index) + # make all of idxs valid + idxs[idxs >= unique_labels.size] = 0 + found = (unique_labels[idxs] == index) + else: + # labels are an integer type allowed by bincount, and there aren't too + # many, so call bincount directly. + counts = np.bincount(labels.ravel()) + sums = np.bincount(labels.ravel(), weights=input.ravel()) + if centered: + sums_c = _sum_centered(labels) + # make sure all index values are valid + idxs = np.asanyarray(index, np.int_).copy() + found = (idxs >= 0) & (idxs < counts.size) + idxs[~found] = 0 + + counts = counts[idxs] + counts[~found] = 0 + sums = sums[idxs] + sums[~found] = 0 + + if not centered: + return (counts, sums) + else: + sums_c = sums_c[idxs] + sums_c[~found] = 0 + return (counts, sums, sums_c) + + +def sum(input, labels=None, index=None): + """ + Calculate the sum of the values of the array. + + Notes + ----- + This is an alias for `ndimage.sum_labels` kept for backwards compatibility + reasons, for new code please prefer `sum_labels`. See the `sum_labels` + docstring for more details. + + """ + return sum_labels(input, labels, index) + + +def sum_labels(input, labels=None, index=None): + """ + Calculate the sum of the values of the array. + + Parameters + ---------- + input : array_like + Values of `input` inside the regions defined by `labels` + are summed together. + labels : array_like of ints, optional + Assign labels to the values of the array. Has to have the same shape as + `input`. + index : array_like, optional + A single label number or a sequence of label numbers of + the objects to be measured. + + Returns + ------- + sum : ndarray or scalar + An array of the sums of values of `input` inside the regions defined + by `labels` with the same shape as `index`. If 'index' is None or scalar, + a scalar is returned. + + See Also + -------- + mean, median + + Examples + -------- + >>> from scipy import ndimage + >>> input = [0,1,2,3] + >>> labels = [1,1,2,2] + >>> ndimage.sum_labels(input, labels, index=[1,2]) + [1.0, 5.0] + >>> ndimage.sum_labels(input, labels, index=1) + 1 + >>> ndimage.sum_labels(input, labels) + 6 + + + """ + count, sum = _stats(input, labels, index) + return sum + + +def mean(input, labels=None, index=None): + """ + Calculate the mean of the values of an array at labels. + + Parameters + ---------- + input : array_like + Array on which to compute the mean of elements over distinct + regions. + labels : array_like, optional + Array of labels of same shape, or broadcastable to the same shape as + `input`. All elements sharing the same label form one region over + which the mean of the elements is computed. + index : int or sequence of ints, optional + Labels of the objects over which the mean is to be computed. + Default is None, in which case the mean for all values where label is + greater than 0 is calculated. + + Returns + ------- + out : list + Sequence of same length as `index`, with the mean of the different + regions labeled by the labels in `index`. + + See Also + -------- + variance, standard_deviation, minimum, maximum, sum, label + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(25).reshape((5,5)) + >>> labels = np.zeros_like(a) + >>> labels[3:5,3:5] = 1 + >>> index = np.unique(labels) + >>> labels + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 1], + [0, 0, 0, 1, 1]]) + >>> index + array([0, 1]) + >>> ndimage.mean(a, labels=labels, index=index) + [10.285714285714286, 21.0] + + """ + + count, sum = _stats(input, labels, index) + return sum / np.asanyarray(count).astype(np.float64) + + +def variance(input, labels=None, index=None): + """ + Calculate the variance of the values of an N-D image array, optionally at + specified sub-regions. + + Parameters + ---------- + input : array_like + Nd-image data to process. + labels : array_like, optional + Labels defining sub-regions in `input`. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + `labels` to include in output. If None (default), all values where + `labels` is non-zero are used. + + Returns + ------- + variance : float or ndarray + Values of variance, for each sub-region if `labels` and `index` are + specified. + + See Also + -------- + label, standard_deviation, maximum, minimum, extrema + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> ndimage.variance(a) + 7.609375 + + Features to process can be specified using `labels` and `index`: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1)) + array([ 2.1875, 2.25 , 9. ]) + + If no index is given, all non-zero `labels` are processed: + + >>> ndimage.variance(a, lbl) + 6.1875 + + """ + count, sum, sum_c_sq = _stats(input, labels, index, centered=True) + return sum_c_sq / np.asanyarray(count).astype(float) + + +def standard_deviation(input, labels=None, index=None): + """ + Calculate the standard deviation of the values of an N-D image array, + optionally at specified sub-regions. + + Parameters + ---------- + input : array_like + N-D image data to process. + labels : array_like, optional + Labels to identify sub-regions in `input`. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + `labels` to include in output. If None (default), all values where + `labels` is non-zero are used. + + Returns + ------- + standard_deviation : float or ndarray + Values of standard deviation, for each sub-region if `labels` and + `index` are specified. + + See Also + -------- + label, variance, maximum, minimum, extrema + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> ndimage.standard_deviation(a) + 2.7585095613392387 + + Features to process can be specified using `labels` and `index`: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1)) + array([ 1.479, 1.5 , 3. ]) + + If no index is given, non-zero `labels` are processed: + + >>> ndimage.standard_deviation(a, lbl) + 2.4874685927665499 + + """ + return np.sqrt(variance(input, labels, index)) + + +def _select(input, labels=None, index=None, find_min=False, find_max=False, + find_min_positions=False, find_max_positions=False, + find_median=False): + """Returns min, max, or both, plus their positions (if requested), and + median.""" + + input = np.asanyarray(input) + + find_positions = find_min_positions or find_max_positions + positions = None + if find_positions: + positions = np.arange(input.size).reshape(input.shape) + + def single_group(vals, positions): + result = [] + if find_min: + result += [vals.min()] + if find_min_positions: + result += [positions[vals == vals.min()][0]] + if find_max: + result += [vals.max()] + if find_max_positions: + result += [positions[vals == vals.max()][0]] + if find_median: + result += [np.median(vals)] + return result + + if labels is None: + return single_group(input, positions) + + # ensure input and labels match sizes + input, labels = np.broadcast_arrays(input, labels) + + if index is None: + mask = (labels > 0) + masked_positions = None + if find_positions: + masked_positions = positions[mask] + return single_group(input[mask], masked_positions) + + if np.isscalar(index): + mask = (labels == index) + masked_positions = None + if find_positions: + masked_positions = positions[mask] + return single_group(input[mask], masked_positions) + + # remap labels to unique integers if necessary, or if the largest + # label is larger than the number of values. + if (not _safely_castable_to_int(labels.dtype) or + labels.min() < 0 or labels.max() > labels.size): + # remap labels, and indexes + unique_labels, labels = np.unique(labels, return_inverse=True) + idxs = np.searchsorted(unique_labels, index) + + # make all of idxs valid + idxs[idxs >= unique_labels.size] = 0 + found = (unique_labels[idxs] == index) + else: + # labels are an integer type, and there aren't too many + idxs = np.asanyarray(index, np.int_).copy() + found = (idxs >= 0) & (idxs <= labels.max()) + + idxs[~ found] = labels.max() + 1 + + if find_median: + order = np.lexsort((input.ravel(), labels.ravel())) + else: + order = input.ravel().argsort() + input = input.ravel()[order] + labels = labels.ravel()[order] + if find_positions: + positions = positions.ravel()[order] + + result = [] + if find_min: + mins = np.zeros(labels.max() + 2, input.dtype) + mins[labels[::-1]] = input[::-1] + result += [mins[idxs]] + if find_min_positions: + minpos = np.zeros(labels.max() + 2, int) + minpos[labels[::-1]] = positions[::-1] + result += [minpos[idxs]] + if find_max: + maxs = np.zeros(labels.max() + 2, input.dtype) + maxs[labels] = input + result += [maxs[idxs]] + if find_max_positions: + maxpos = np.zeros(labels.max() + 2, int) + maxpos[labels] = positions + result += [maxpos[idxs]] + if find_median: + locs = np.arange(len(labels)) + lo = np.zeros(labels.max() + 2, np.int_) + lo[labels[::-1]] = locs[::-1] + hi = np.zeros(labels.max() + 2, np.int_) + hi[labels] = locs + lo = lo[idxs] + hi = hi[idxs] + # lo is an index to the lowest value in input for each label, + # hi is an index to the largest value. + # move them to be either the same ((hi - lo) % 2 == 0) or next + # to each other ((hi - lo) % 2 == 1), then average. + step = (hi - lo) // 2 + lo += step + hi -= step + if (np.issubdtype(input.dtype, np.integer) + or np.issubdtype(input.dtype, np.bool_)): + # avoid integer overflow or boolean addition (gh-12836) + result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0] + else: + result += [(input[lo] + input[hi]) / 2.0] + + return result + + +def minimum(input, labels=None, index=None): + """ + Calculate the minimum of the values of an array over labeled regions. + + Parameters + ---------- + input : array_like + Array_like of values. For each region specified by `labels`, the + minimal values of `input` over the region is computed. + labels : array_like, optional + An array_like of integers marking different regions over which the + minimum value of `input` is to be computed. `labels` must have the + same shape as `input`. If `labels` is not specified, the minimum + over the whole array is returned. + index : array_like, optional + A list of region labels that are taken into account for computing the + minima. If index is None, the minimum over all elements where `labels` + is non-zero is returned. + + Returns + ------- + minimum : float or list of floats + List of minima of `input` over the regions determined by `labels` and + whose index is in `index`. If `index` or `labels` are not specified, a + float is returned: the minimal value of `input` if `labels` is None, + and the minimal value of elements where `labels` is greater than zero + if `index` is None. + + See Also + -------- + label, maximum, median, minimum_position, extrema, sum, mean, variance, + standard_deviation + + Notes + ----- + The function returns a Python list and not a NumPy array, use + `np.array` to convert the list to an array. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> labels, labels_nb = ndimage.label(a) + >>> labels + array([[1, 1, 0, 0], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1)) + [1.0, 4.0, 3.0] + >>> ndimage.minimum(a) + 0.0 + >>> ndimage.minimum(a, labels=labels) + 1.0 + + """ + return _select(input, labels, index, find_min=True)[0] + + +def maximum(input, labels=None, index=None): + """ + Calculate the maximum of the values of an array over labeled regions. + + Parameters + ---------- + input : array_like + Array_like of values. For each region specified by `labels`, the + maximal values of `input` over the region is computed. + labels : array_like, optional + An array of integers marking different regions over which the + maximum value of `input` is to be computed. `labels` must have the + same shape as `input`. If `labels` is not specified, the maximum + over the whole array is returned. + index : array_like, optional + A list of region labels that are taken into account for computing the + maxima. If index is None, the maximum over all elements where `labels` + is non-zero is returned. + + Returns + ------- + output : float or list of floats + List of maxima of `input` over the regions determined by `labels` and + whose index is in `index`. If `index` or `labels` are not specified, a + float is returned: the maximal value of `input` if `labels` is None, + and the maximal value of elements where `labels` is greater than zero + if `index` is None. + + See Also + -------- + label, minimum, median, maximum_position, extrema, sum, mean, variance, + standard_deviation + + Notes + ----- + The function returns a Python list and not a NumPy array, use + `np.array` to convert the list to an array. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(16).reshape((4,4)) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> labels = np.zeros_like(a) + >>> labels[:2,:2] = 1 + >>> labels[2:, 1:3] = 2 + >>> labels + array([[1, 1, 0, 0], + [1, 1, 0, 0], + [0, 2, 2, 0], + [0, 2, 2, 0]]) + >>> from scipy import ndimage + >>> ndimage.maximum(a) + 15.0 + >>> ndimage.maximum(a, labels=labels, index=[1,2]) + [5.0, 14.0] + >>> ndimage.maximum(a, labels=labels) + 14.0 + + >>> b = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> labels, labels_nb = ndimage.label(b) + >>> labels + array([[1, 1, 0, 0], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1)) + [5.0, 7.0, 9.0] + + """ + return _select(input, labels, index, find_max=True)[0] + + +def median(input, labels=None, index=None): + """ + Calculate the median of the values of an array over labeled regions. + + Parameters + ---------- + input : array_like + Array_like of values. For each region specified by `labels`, the + median value of `input` over the region is computed. + labels : array_like, optional + An array_like of integers marking different regions over which the + median value of `input` is to be computed. `labels` must have the + same shape as `input`. If `labels` is not specified, the median + over the whole array is returned. + index : array_like, optional + A list of region labels that are taken into account for computing the + medians. If index is None, the median over all elements where `labels` + is non-zero is returned. + + Returns + ------- + median : float or list of floats + List of medians of `input` over the regions determined by `labels` and + whose index is in `index`. If `index` or `labels` are not specified, a + float is returned: the median value of `input` if `labels` is None, + and the median value of elements where `labels` is greater than zero + if `index` is None. + + See Also + -------- + label, minimum, maximum, extrema, sum, mean, variance, standard_deviation + + Notes + ----- + The function returns a Python list and not a NumPy array, use + `np.array` to convert the list to an array. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 1], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> labels, labels_nb = ndimage.label(a) + >>> labels + array([[1, 1, 0, 2], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1)) + [2.5, 4.0, 6.0] + >>> ndimage.median(a) + 1.0 + >>> ndimage.median(a, labels=labels) + 3.0 + + """ + return _select(input, labels, index, find_median=True)[0] + + +def minimum_position(input, labels=None, index=None): + """ + Find the positions of the minimums of the values of an array at labels. + + Parameters + ---------- + input : array_like + Array_like of values. + labels : array_like, optional + An array of integers marking different regions over which the + position of the minimum value of `input` is to be computed. + `labels` must have the same shape as `input`. If `labels` is not + specified, the location of the first minimum over the whole + array is returned. + + The `labels` argument only works when `index` is specified. + index : array_like, optional + A list of region labels that are taken into account for finding the + location of the minima. If `index` is None, the ``first`` minimum + over all elements where `labels` is non-zero is returned. + + The `index` argument only works when `labels` is specified. + + Returns + ------- + output : list of tuples of ints + Tuple of ints or list of tuples of ints that specify the location + of minima of `input` over the regions determined by `labels` and + whose index is in `index`. + + If `index` or `labels` are not specified, a tuple of ints is + returned specifying the location of the first minimal value of `input`. + + See Also + -------- + label, minimum, median, maximum_position, extrema, sum, mean, variance, + standard_deviation + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 20, 30], + ... [40, 80, 100], + ... [1, 100, 200]]) + >>> b = np.array([[1, 2, 0, 1], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + + >>> from scipy import ndimage + + >>> ndimage.minimum_position(a) + (2, 0) + >>> ndimage.minimum_position(b) + (0, 2) + + Features to process can be specified using `labels` and `index`: + + >>> label, pos = ndimage.label(a) + >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1)) + [(2, 0)] + + >>> label, pos = ndimage.label(b) + >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1)) + [(0, 0), (0, 3), (3, 1)] + + """ + dims = np.array(np.asarray(input).shape) + # see np.unravel_index to understand this line. + dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1] + + result = _select(input, labels, index, find_min_positions=True)[0] + + if np.isscalar(result): + return tuple((result // dim_prod) % dims) + + return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] + + +def maximum_position(input, labels=None, index=None): + """ + Find the positions of the maximums of the values of an array at labels. + + For each region specified by `labels`, the position of the maximum + value of `input` within the region is returned. + + Parameters + ---------- + input : array_like + Array_like of values. + labels : array_like, optional + An array of integers marking different regions over which the + position of the maximum value of `input` is to be computed. + `labels` must have the same shape as `input`. If `labels` is not + specified, the location of the first maximum over the whole + array is returned. + + The `labels` argument only works when `index` is specified. + index : array_like, optional + A list of region labels that are taken into account for finding the + location of the maxima. If `index` is None, the first maximum + over all elements where `labels` is non-zero is returned. + + The `index` argument only works when `labels` is specified. + + Returns + ------- + output : list of tuples of ints + List of tuples of ints that specify the location of maxima of + `input` over the regions determined by `labels` and whose index + is in `index`. + + If `index` or `labels` are not specified, a tuple of ints is + returned specifying the location of the ``first`` maximal value + of `input`. + + See Also + -------- + label, minimum, median, maximum_position, extrema, sum, mean, variance, + standard_deviation + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> ndimage.maximum_position(a) + (3, 0) + + Features to process can be specified using `labels` and `index`: + + >>> lbl = np.array([[0, 1, 2, 3], + ... [0, 1, 2, 3], + ... [0, 1, 2, 3], + ... [0, 1, 2, 3]]) + >>> ndimage.maximum_position(a, lbl, 1) + (1, 1) + + If no index is given, non-zero `labels` are processed: + + >>> ndimage.maximum_position(a, lbl) + (2, 3) + + If there are no maxima, the position of the first element is returned: + + >>> ndimage.maximum_position(a, lbl, 2) + (0, 2) + + """ + dims = np.array(np.asarray(input).shape) + # see np.unravel_index to understand this line. + dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1] + + result = _select(input, labels, index, find_max_positions=True)[0] + + if np.isscalar(result): + return tuple((result // dim_prod) % dims) + + return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] + + +def extrema(input, labels=None, index=None): + """ + Calculate the minimums and maximums of the values of an array + at labels, along with their positions. + + Parameters + ---------- + input : ndarray + N-D image data to process. + labels : ndarray, optional + Labels of features in input. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + Labels to include in output. If None (default), all values where + non-zero `labels` are used. + + Returns + ------- + minimums, maximums : int or ndarray + Values of minimums and maximums in each feature. + min_positions, max_positions : tuple or list of tuples + Each tuple gives the N-D coordinates of the corresponding minimum + or maximum. + + See Also + -------- + maximum, minimum, maximum_position, minimum_position, center_of_mass + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> ndimage.extrema(a) + (0, 9, (0, 2), (3, 0)) + + Features to process can be specified using `labels` and `index`: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1)) + (array([1, 4, 3]), + array([5, 7, 9]), + [(0, 0), (1, 3), (3, 1)], + [(1, 0), (2, 3), (3, 0)]) + + If no index is given, non-zero `labels` are processed: + + >>> ndimage.extrema(a, lbl) + (1, 9, (0, 0), (3, 0)) + + """ + dims = np.array(np.asarray(input).shape) + # see np.unravel_index to understand this line. + dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1] + + minimums, min_positions, maximums, max_positions = _select(input, labels, + index, + find_min=True, + find_max=True, + find_min_positions=True, + find_max_positions=True) + + if np.isscalar(minimums): + return (minimums, maximums, tuple((min_positions // dim_prod) % dims), + tuple((max_positions // dim_prod) % dims)) + + min_positions = [ + tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims + ] + max_positions = [ + tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims + ] + + return minimums, maximums, min_positions, max_positions + + +def center_of_mass(input, labels=None, index=None): + """ + Calculate the center of mass of the values of an array at labels. + + Parameters + ---------- + input : ndarray + Data from which to calculate center-of-mass. The masses can either + be positive or negative. + labels : ndarray, optional + Labels for objects in `input`, as generated by `ndimage.label`. + Only used with `index`. Dimensions must be the same as `input`. + index : int or sequence of ints, optional + Labels for which to calculate centers-of-mass. If not specified, + the combined center of mass of all labels greater than zero + will be calculated. Only used with `labels`. + + Returns + ------- + center_of_mass : tuple, or list of tuples + Coordinates of centers-of-mass. + + Examples + -------- + >>> import numpy as np + >>> a = np.array(([0,0,0,0], + ... [0,1,1,0], + ... [0,1,1,0], + ... [0,1,1,0])) + >>> from scipy import ndimage + >>> ndimage.center_of_mass(a) + (2.0, 1.5) + + Calculation of multiple objects in an image + + >>> b = np.array(([0,1,1,0], + ... [0,1,0,0], + ... [0,0,0,0], + ... [0,0,1,1], + ... [0,0,1,1])) + >>> lbl = ndimage.label(b)[0] + >>> ndimage.center_of_mass(b, lbl, [1,2]) + [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)] + + Negative masses are also accepted, which can occur for example when + bias is removed from measured data due to random noise. + + >>> c = np.array(([-1,0,0,0], + ... [0,-1,-1,0], + ... [0,1,-1,0], + ... [0,1,1,0])) + >>> ndimage.center_of_mass(c) + (-4.0, 1.0) + + If there are division by zero issues, the function does not raise an + error but rather issues a RuntimeWarning before returning inf and/or NaN. + + >>> d = np.array([-1, 1]) + >>> ndimage.center_of_mass(d) + (inf,) + """ + normalizer = sum(input, labels, index) + grids = np.ogrid[[slice(0, i) for i in input.shape]] + + results = [sum(input * grids[dir].astype(float), labels, index) / normalizer + for dir in range(input.ndim)] + + if np.isscalar(results[0]): + return tuple(results) + + return [tuple(v) for v in np.array(results).T] + + +def histogram(input, min, max, bins, labels=None, index=None): + """ + Calculate the histogram of the values of an array, optionally at labels. + + Histogram calculates the frequency of values in an array within bins + determined by `min`, `max`, and `bins`. The `labels` and `index` + keywords can limit the scope of the histogram to specified sub-regions + within the array. + + Parameters + ---------- + input : array_like + Data for which to calculate histogram. + min, max : int + Minimum and maximum values of range of histogram bins. + bins : int + Number of bins. + labels : array_like, optional + Labels for objects in `input`. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + Label or labels for which to calculate histogram. If None, all values + where label is greater than zero are used + + Returns + ------- + hist : ndarray + Histogram counts. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ], + ... [ 0. , 0.7778, 0. , 0. ], + ... [ 0. , 0. , 0. , 0. ], + ... [ 0. , 0. , 0.7181, 0.2787], + ... [ 0. , 0. , 0.6573, 0.3094]]) + >>> from scipy import ndimage + >>> ndimage.histogram(a, 0, 1, 10) + array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0]) + + With labels and no indices, non-zero elements are counted: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.histogram(a, 0, 1, 10, lbl) + array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0]) + + Indices can be used to count only certain objects: + + >>> ndimage.histogram(a, 0, 1, 10, lbl, 2) + array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0]) + + """ + _bins = np.linspace(min, max, bins + 1) + + def _hist(vals): + return np.histogram(vals, _bins)[0] + + return labeled_comprehension(input, labels, index, _hist, object, None, + pass_positions=False) + + +def watershed_ift(input, markers, structure=None, output=None): + """ + Apply watershed from markers using image foresting transform algorithm. + + Parameters + ---------- + input : array_like + Input. + markers : array_like + Markers are points within each watershed that form the beginning + of the process. Negative markers are considered background markers + which are processed after the other markers. + structure : structure element, optional + A structuring element defining the connectivity of the object can be + provided. If None, an element is generated with a squared + connectivity equal to one. + output : ndarray, optional + An output array can optionally be provided. The same shape as input. + + Returns + ------- + watershed_ift : ndarray + Output. Same shape as `input`. + + References + ---------- + .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image + foresting transform: theory, algorithms, and applications", + Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004. + + """ + input = np.asarray(input) + if input.dtype.type not in [np.uint8, np.uint16]: + raise TypeError('only 8 and 16 unsigned inputs are supported') + + if structure is None: + structure = _morphology.generate_binary_structure(input.ndim, 1) + structure = np.asarray(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have equal rank') + for ii in structure.shape: + if ii != 3: + raise RuntimeError('structure dimensions must be equal to 3') + + if not structure.flags.contiguous: + structure = structure.copy() + markers = np.asarray(markers) + if input.shape != markers.shape: + raise RuntimeError('input and markers must have equal shape') + + integral_types = [np.int8, + np.int16, + np.int32, + np.int64, + np.intc, + np.intp] + + if markers.dtype.type not in integral_types: + raise RuntimeError('marker should be of integer type') + + if isinstance(output, np.ndarray): + if output.dtype.type not in integral_types: + raise RuntimeError('output should be of integer type') + else: + output = markers.dtype + + output = _ni_support._get_output(output, input) + _nd_image.watershed_ift(input, markers, structure, output) + return output diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/_morphology.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/_morphology.py new file mode 100644 index 0000000000000000000000000000000000000000..22ada0b130f913021207250714ab860f483b3e1e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/_morphology.py @@ -0,0 +1,2537 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import warnings +import operator + +import numpy as np +from . import _ni_support +from . import _nd_image +from . import _filters + +__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion', + 'binary_dilation', 'binary_opening', 'binary_closing', + 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes', + 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing', + 'morphological_gradient', 'morphological_laplace', 'white_tophat', + 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt', + 'distance_transform_edt'] + + +def _center_is_true(structure, origin): + structure = np.asarray(structure) + coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, + origin)]) + return bool(structure[coor]) + + +def iterate_structure(structure, iterations, origin=None): + """ + Iterate a structure by dilating it with itself. + + Parameters + ---------- + structure : array_like + Structuring element (an array of bools, for example), to be dilated with + itself. + iterations : int + number of dilations performed on the structure with itself + origin : optional + If origin is None, only the iterated structure is returned. If + not, a tuple of the iterated structure and the modified origin is + returned. + + Returns + ------- + iterate_structure : ndarray of bools + A new structuring element obtained by dilating `structure` + (`iterations` - 1) times with itself. + + See Also + -------- + generate_binary_structure + + Examples + -------- + >>> from scipy import ndimage + >>> struct = ndimage.generate_binary_structure(2, 1) + >>> struct.astype(int) + array([[0, 1, 0], + [1, 1, 1], + [0, 1, 0]]) + >>> ndimage.iterate_structure(struct, 2).astype(int) + array([[0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0]]) + >>> ndimage.iterate_structure(struct, 3).astype(int) + array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]]) + + """ + structure = np.asarray(structure) + if iterations < 2: + return structure.copy() + ni = iterations - 1 + shape = [ii + ni * (ii - 1) for ii in structure.shape] + pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))] + slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None) + for ii in range(len(shape))) + out = np.zeros(shape, bool) + out[slc] = structure != 0 + out = binary_dilation(out, structure, iterations=ni) + if origin is None: + return out + else: + origin = _ni_support._normalize_sequence(origin, structure.ndim) + origin = [iterations * o for o in origin] + return out, origin + + +def generate_binary_structure(rank, connectivity): + """ + Generate a binary structure for binary morphological operations. + + Parameters + ---------- + rank : int + Number of dimensions of the array to which the structuring element + will be applied, as returned by `np.ndim`. + connectivity : int + `connectivity` determines which elements of the output array belong + to the structure, i.e., are considered as neighbors of the central + element. Elements up to a squared distance of `connectivity` from + the center are considered neighbors. `connectivity` may range from 1 + (no diagonal elements are neighbors) to `rank` (all elements are + neighbors). + + Returns + ------- + output : ndarray of bools + Structuring element which may be used for binary morphological + operations, with `rank` dimensions and all dimensions equal to 3. + + See Also + -------- + iterate_structure, binary_dilation, binary_erosion + + Notes + ----- + `generate_binary_structure` can only create structuring elements with + dimensions equal to 3, i.e., minimal dimensions. For larger structuring + elements, that are useful e.g., for eroding large objects, one may either + use `iterate_structure`, or create directly custom arrays with + numpy functions such as `numpy.ones`. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> struct = ndimage.generate_binary_structure(2, 1) + >>> struct + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> a = np.zeros((5,5)) + >>> a[2, 2] = 1 + >>> a + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype) + >>> b + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype) + array([[ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 1., 1., 1., 1., 1.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.]]) + >>> struct = ndimage.generate_binary_structure(2, 2) + >>> struct + array([[ True, True, True], + [ True, True, True], + [ True, True, True]], dtype=bool) + >>> struct = ndimage.generate_binary_structure(3, 1) + >>> struct # no diagonal elements + array([[[False, False, False], + [False, True, False], + [False, False, False]], + [[False, True, False], + [ True, True, True], + [False, True, False]], + [[False, False, False], + [False, True, False], + [False, False, False]]], dtype=bool) + + """ + if connectivity < 1: + connectivity = 1 + if rank < 1: + return np.array(True, dtype=bool) + output = np.fabs(np.indices([3] * rank) - 1) + output = np.add.reduce(output, 0) + return output <= connectivity + + +def _binary_erosion(input, structure, iterations, mask, output, + border_value, origin, invert, brute_force): + try: + iterations = operator.index(iterations) + except TypeError as e: + raise TypeError('iterations parameter should be an integer') from e + + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + if structure is None: + structure = generate_binary_structure(input.ndim, 1) + else: + structure = np.asarray(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have same dimensionality') + if not structure.flags.contiguous: + structure = structure.copy() + if structure.size < 1: + raise RuntimeError('structure must not be empty') + if mask is not None: + mask = np.asarray(mask) + if mask.shape != input.shape: + raise RuntimeError('mask and input must have equal sizes') + origin = _ni_support._normalize_sequence(origin, input.ndim) + cit = _center_is_true(structure, origin) + if isinstance(output, np.ndarray): + if np.iscomplexobj(output): + raise TypeError('Complex output type not supported') + else: + output = bool + output = _ni_support._get_output(output, input) + temp_needed = np.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + if iterations == 1: + _nd_image.binary_erosion(input, structure, mask, output, + border_value, origin, invert, cit, 0) + elif cit and not brute_force: + changed, coordinate_list = _nd_image.binary_erosion( + input, structure, mask, output, + border_value, origin, invert, cit, 1) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if not structure.shape[ii] & 1: + origin[ii] -= 1 + if mask is not None: + mask = np.asarray(mask, dtype=np.int8) + if not structure.flags.contiguous: + structure = structure.copy() + _nd_image.binary_erosion2(output, structure, mask, iterations - 1, + origin, invert, coordinate_list) + else: + tmp_in = np.empty_like(input, dtype=bool) + tmp_out = output + if iterations >= 1 and not iterations & 1: + tmp_in, tmp_out = tmp_out, tmp_in + changed = _nd_image.binary_erosion( + input, structure, mask, tmp_out, + border_value, origin, invert, cit, 0) + ii = 1 + while ii < iterations or (iterations < 1 and changed): + tmp_in, tmp_out = tmp_out, tmp_in + changed = _nd_image.binary_erosion( + tmp_in, structure, mask, tmp_out, + border_value, origin, invert, cit, 0) + ii += 1 + if temp_needed: + temp[...] = output + output = temp + return output + + +def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, + border_value=0, origin=0, brute_force=False): + """ + Multidimensional binary erosion with a given structuring element. + + Binary erosion is a mathematical morphology operation used for image + processing. + + Parameters + ---------- + input : array_like + Binary image to be eroded. Non-zero (True) elements form + the subset to be eroded. + structure : array_like, optional + Structuring element used for the erosion. Non-zero elements are + considered True. If no structuring element is provided, an element + is generated with a square connectivity equal to one. + iterations : int, optional + The erosion is repeated `iterations` times (one, by default). + If iterations is less than 1, the erosion is repeated until the + result does not change anymore. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated (eroded) in + the current iteration; if True all pixels are considered as candidates + for erosion, regardless of what happened in the previous iteration. + False by default. + + Returns + ------- + binary_erosion : ndarray of bools + Erosion of the input by the structuring element. + + See Also + -------- + grey_erosion, binary_dilation, binary_closing, binary_opening, + generate_binary_structure + + Notes + ----- + Erosion [1]_ is a mathematical morphology operation [2]_ that uses a + structuring element for shrinking the shapes in an image. The binary + erosion of an image by a structuring element is the locus of the points + where a superimposition of the structuring element centered on the point + is entirely contained in the set of non-zero elements of the image. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 2:5] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_erosion(a).astype(a.dtype) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> #Erosion removes objects smaller than the structure + >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + return _binary_erosion(input, structure, iterations, mask, + output, border_value, origin, 0, brute_force) + + +def binary_dilation(input, structure=None, iterations=1, mask=None, + output=None, border_value=0, origin=0, + brute_force=False): + """ + Multidimensional binary dilation with the given structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be dilated. Non-zero (True) elements form + the subset to be dilated. + structure : array_like, optional + Structuring element used for the dilation. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one. + iterations : int, optional + The dilation is repeated `iterations` times (one, by default). + If iterations is less than 1, the dilation is repeated until the + result does not change anymore. Only an integer of iterations is + accepted. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated (dilated) + in the current iteration; if True all pixels are considered as + candidates for dilation, regardless of what happened in the previous + iteration. False by default. + + Returns + ------- + binary_dilation : ndarray of bools + Dilation of the input by the structuring element. + + See Also + -------- + grey_dilation, binary_erosion, binary_closing, binary_opening, + generate_binary_structure + + Notes + ----- + Dilation [1]_ is a mathematical morphology operation [2]_ that uses a + structuring element for expanding the shapes in an image. The binary + dilation of an image by a structuring element is the locus of the points + covered by the structuring element, when its center lies within the + non-zero points of the image. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5, 5)) + >>> a[2, 2] = 1 + >>> a + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a) + array([[False, False, False, False, False], + [False, False, True, False, False], + [False, True, True, True, False], + [False, False, True, False, False], + [False, False, False, False, False]], dtype=bool) + >>> ndimage.binary_dilation(a).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> # 3x3 structuring element with connectivity 1, used by default + >>> struct1 = ndimage.generate_binary_structure(2, 1) + >>> struct1 + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> # 3x3 structuring element with connectivity 2 + >>> struct2 = ndimage.generate_binary_structure(2, 2) + >>> struct2 + array([[ True, True, True], + [ True, True, True], + [ True, True, True]], dtype=bool) + >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a, structure=struct1,\\ + ... iterations=2).astype(a.dtype) + array([[ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 1., 1., 1., 1., 1.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.]]) + + """ + input = np.asarray(input) + if structure is None: + structure = generate_binary_structure(input.ndim, 1) + origin = _ni_support._normalize_sequence(origin, input.ndim) + structure = np.asarray(structure) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if not structure.shape[ii] & 1: + origin[ii] -= 1 + + return _binary_erosion(input, structure, iterations, mask, + output, border_value, origin, 1, brute_force) + + +def binary_opening(input, structure=None, iterations=1, output=None, + origin=0, mask=None, border_value=0, brute_force=False): + """ + Multidimensional binary opening with the given structuring element. + + The *opening* of an input image by a structuring element is the + *dilation* of the *erosion* of the image by the structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be opened. Non-zero (True) elements form + the subset to be opened. + structure : array_like, optional + Structuring element used for the opening. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one (i.e., only + nearest neighbors are connected to the center, diagonally-connected + elements are not considered neighbors). + iterations : int, optional + The erosion step of the opening, then the dilation step are each + repeated `iterations` times (one, by default). If `iterations` is + less than 1, each operation is repeated until the result does + not change anymore. Only an integer of iterations is accepted. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + + .. versionadded:: 1.1.0 + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + + .. versionadded:: 1.1.0 + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated in the + current iteration; if true all pixels are considered as candidates for + update, regardless of what happened in the previous iteration. + False by default. + + .. versionadded:: 1.1.0 + + Returns + ------- + binary_opening : ndarray of bools + Opening of the input by the structuring element. + + See Also + -------- + grey_opening, binary_closing, binary_erosion, binary_dilation, + generate_binary_structure + + Notes + ----- + *Opening* [1]_ is a mathematical morphology operation [2]_ that + consists in the succession of an erosion and a dilation of the + input with the same structuring element. Opening, therefore, removes + objects smaller than the structuring element. + + Together with *closing* (`binary_closing`), opening can be used for + noise removal. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5,5), dtype=int) + >>> a[1:4, 1:4] = 1; a[4, 4] = 1 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + >>> # Opening removes small objects + >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Opening can also smooth corners + >>> ndimage.binary_opening(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + >>> # Opening is the dilation of the erosion of the input + >>> ndimage.binary_erosion(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) + >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + input = np.asarray(input) + if structure is None: + rank = input.ndim + structure = generate_binary_structure(rank, 1) + + tmp = binary_erosion(input, structure, iterations, mask, None, + border_value, origin, brute_force) + return binary_dilation(tmp, structure, iterations, mask, output, + border_value, origin, brute_force) + + +def binary_closing(input, structure=None, iterations=1, output=None, + origin=0, mask=None, border_value=0, brute_force=False): + """ + Multidimensional binary closing with the given structuring element. + + The *closing* of an input image by a structuring element is the + *erosion* of the *dilation* of the image by the structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be closed. Non-zero (True) elements form + the subset to be closed. + structure : array_like, optional + Structuring element used for the closing. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one (i.e., only + nearest neighbors are connected to the center, diagonally-connected + elements are not considered neighbors). + iterations : int, optional + The dilation step of the closing, then the erosion step are each + repeated `iterations` times (one, by default). If iterations is + less than 1, each operations is repeated until the result does + not change anymore. Only an integer of iterations is accepted. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + + .. versionadded:: 1.1.0 + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + + .. versionadded:: 1.1.0 + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated in the + current iteration; if true al pixels are considered as candidates for + update, regardless of what happened in the previous iteration. + False by default. + + .. versionadded:: 1.1.0 + + Returns + ------- + binary_closing : ndarray of bools + Closing of the input by the structuring element. + + See Also + -------- + grey_closing, binary_opening, binary_dilation, binary_erosion, + generate_binary_structure + + Notes + ----- + *Closing* [1]_ is a mathematical morphology operation [2]_ that + consists in the succession of a dilation and an erosion of the + input with the same structuring element. Closing therefore fills + holes smaller than the structuring element. + + Together with *opening* (`binary_opening`), closing can be used for + noise removal. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5,5), dtype=int) + >>> a[1:-1, 1:-1] = 1; a[2,2] = 0 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Closing removes small holes + >>> ndimage.binary_closing(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Closing is the erosion of the dilation of the input + >>> ndimage.binary_dilation(a).astype(int) + array([[0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0]]) + >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + + + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 2:5] = 1; a[1:3,3] = 0 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # In addition to removing holes, closing can also + >>> # coarsen boundaries with fine hollows. + >>> ndimage.binary_closing(a).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + input = np.asarray(input) + if structure is None: + rank = input.ndim + structure = generate_binary_structure(rank, 1) + + tmp = binary_dilation(input, structure, iterations, mask, None, + border_value, origin, brute_force) + return binary_erosion(tmp, structure, iterations, mask, output, + border_value, origin, brute_force) + + +def binary_hit_or_miss(input, structure1=None, structure2=None, + output=None, origin1=0, origin2=None): + """ + Multidimensional binary hit-or-miss transform. + + The hit-or-miss transform finds the locations of a given pattern + inside the input image. + + Parameters + ---------- + input : array_like (cast to booleans) + Binary image where a pattern is to be detected. + structure1 : array_like (cast to booleans), optional + Part of the structuring element to be fitted to the foreground + (non-zero elements) of `input`. If no value is provided, a + structure of square connectivity 1 is chosen. + structure2 : array_like (cast to booleans), optional + Second part of the structuring element that has to miss completely + the foreground. If no value is provided, the complementary of + `structure1` is taken. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin1 : int or tuple of ints, optional + Placement of the first part of the structuring element `structure1`, + by default 0 for a centered structure. + origin2 : int or tuple of ints, optional + Placement of the second part of the structuring element `structure2`, + by default 0 for a centered structure. If a value is provided for + `origin1` and not for `origin2`, then `origin2` is set to `origin1`. + + Returns + ------- + binary_hit_or_miss : ndarray + Hit-or-miss transform of `input` with the given structuring + element (`structure1`, `structure2`). + + See Also + -------- + binary_erosion + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) + >>> structure1 + array([[1, 0, 0], + [0, 1, 1], + [0, 1, 1]]) + >>> # Find the matches of structure1 in the array a + >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # Change the origin of the filter + >>> # origin1=1 is equivalent to origin1=(1,1) here + >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\ + ... origin1=1).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + input = np.asarray(input) + if structure1 is None: + structure1 = generate_binary_structure(input.ndim, 1) + if structure2 is None: + structure2 = np.logical_not(structure1) + origin1 = _ni_support._normalize_sequence(origin1, input.ndim) + if origin2 is None: + origin2 = origin1 + else: + origin2 = _ni_support._normalize_sequence(origin2, input.ndim) + + tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, + 0, False) + inplace = isinstance(output, np.ndarray) + result = _binary_erosion(input, structure2, 1, None, output, 0, + origin2, 1, False) + if inplace: + np.logical_not(output, output) + np.logical_and(tmp1, output, output) + else: + np.logical_not(result, result) + return np.logical_and(tmp1, result) + + +def binary_propagation(input, structure=None, mask=None, + output=None, border_value=0, origin=0): + """ + Multidimensional binary propagation with the given structuring element. + + Parameters + ---------- + input : array_like + Binary image to be propagated inside `mask`. + structure : array_like, optional + Structuring element used in the successive dilations. The output + may depend on the structuring element, especially if `mask` has + several connex components. If no structuring element is + provided, an element is generated with a squared connectivity equal + to one. + mask : array_like, optional + Binary mask defining the region into which `input` is allowed to + propagate. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + + Returns + ------- + binary_propagation : ndarray + Binary propagation of `input` inside `mask`. + + Notes + ----- + This function is functionally equivalent to calling binary_dilation + with the number of iterations less than one: iterative dilation until + the result does not change anymore. + + The succession of an erosion and propagation inside the original image + can be used instead of an *opening* for deleting small objects while + keeping the contours of larger objects untouched. + + References + ---------- + .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15. + .. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of + image processing", 1998 + ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> input = np.zeros((8, 8), dtype=int) + >>> input[2, 2] = 1 + >>> mask = np.zeros((8, 8), dtype=int) + >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1 + >>> input + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + >>> mask + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 1]]) + >>> ndimage.binary_propagation(input, mask=mask).astype(int) + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_propagation(input, mask=mask,\\ + ... structure=np.ones((3,3))).astype(int) + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + + >>> # Comparison between opening and erosion+propagation + >>> a = np.zeros((6,6), dtype=int) + >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1 + >>> a + array([[1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 1]]) + >>> ndimage.binary_opening(a).astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0]]) + >>> b = ndimage.binary_erosion(a) + >>> b.astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_propagation(b, mask=a).astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + + """ + return binary_dilation(input, structure, -1, mask, output, + border_value, origin) + + +def binary_fill_holes(input, structure=None, output=None, origin=0): + """ + Fill the holes in binary objects. + + + Parameters + ---------- + input : array_like + N-D binary array with holes to be filled + structure : array_like, optional + Structuring element used in the computation; large-size elements + make computations faster but may miss holes separated from the + background by thin regions. The default element (with a square + connectivity equal to one) yields the intuitive result where all + holes in the input have been filled. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int, tuple of ints, optional + Position of the structuring element. + + Returns + ------- + out : ndarray + Transformation of the initial image `input` where holes have been + filled. + + See Also + -------- + binary_dilation, binary_propagation, label + + Notes + ----- + The algorithm used in this function consists in invading the complementary + of the shapes in `input` from the outer boundary of the image, + using binary dilations. Holes are not connected to the boundary and are + therefore not invaded. The result is the complementary subset of the + invaded region. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5, 5), dtype=int) + >>> a[1:4, 1:4] = 1 + >>> a[2,2] = 0 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> ndimage.binary_fill_holes(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Too big structuring element + >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + + """ + mask = np.logical_not(input) + tmp = np.zeros(mask.shape, bool) + inplace = isinstance(output, np.ndarray) + if inplace: + binary_dilation(tmp, structure, -1, mask, output, 1, origin) + np.logical_not(output, output) + else: + output = binary_dilation(tmp, structure, -1, mask, None, 1, + origin) + np.logical_not(output, output) + return output + + +def grey_erosion(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Calculate a greyscale erosion, using either a structuring element, + or a footprint corresponding to a flat structuring element. + + Grayscale erosion is a mathematical morphology operation. For the + simple case of a full and flat structuring element, it can be viewed + as a minimum filter over a sliding window. + + Parameters + ---------- + input : array_like + Array over which the grayscale erosion is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + erosion. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale erosion. Non-zero values give the set of + neighbors of the center over which the minimum is chosen. + structure : array of ints, optional + Structuring element used for the grayscale erosion. `structure` + may be a non-flat structuring element. The `structure` array applies a + subtractive offset for each pixel in the neighborhood. + output : array, optional + An array used for storing the output of the erosion may be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + output : ndarray + Grayscale erosion of `input`. + + See Also + -------- + binary_erosion, grey_dilation, grey_opening, grey_closing + generate_binary_structure, minimum_filter + + Notes + ----- + The grayscale erosion of an image input by a structuring element s defined + over a domain E is given by: + + (input+s)(x) = min {input(y) - s(x-y), for y in E} + + In particular, for structuring elements defined as + s(y) = 0 for y in E, the grayscale erosion computes the minimum of the + input image inside a sliding window defined by E. + + Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 1:6] = 3 + >>> a[4,4] = 2; a[2,3] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 3, 3, 1, 3, 3, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 3, 3, 3, 2, 3, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_erosion(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 3, 2, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> footprint = ndimage.generate_binary_structure(2, 1) + >>> footprint + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> # Diagonally-connected elements are not considered neighbors + >>> ndimage.grey_erosion(a, footprint=footprint) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 3, 1, 2, 0, 0], + [0, 0, 3, 2, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + if size is None and footprint is None and structure is None: + raise ValueError("size, footprint, or structure must be specified") + + return _filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 1) + + +def grey_dilation(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Calculate a greyscale dilation, using either a structuring element, + or a footprint corresponding to a flat structuring element. + + Grayscale dilation is a mathematical morphology operation. For the + simple case of a full and flat structuring element, it can be viewed + as a maximum filter over a sliding window. + + Parameters + ---------- + input : array_like + Array over which the grayscale dilation is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + dilation. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale dilation. Non-zero values give the set of + neighbors of the center over which the maximum is chosen. + structure : array of ints, optional + Structuring element used for the grayscale dilation. `structure` + may be a non-flat structuring element. The `structure` array applies an + additive offset for each pixel in the neighborhood. + output : array, optional + An array used for storing the output of the dilation may be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + grey_dilation : ndarray + Grayscale dilation of `input`. + + See Also + -------- + binary_dilation, grey_erosion, grey_closing, grey_opening + generate_binary_structure, maximum_filter + + Notes + ----- + The grayscale dilation of an image input by a structuring element s defined + over a domain E is given by: + + (input+s)(x) = max {input(y) + s(x-y), for y in E} + + In particular, for structuring elements defined as + s(y) = 0 for y in E, the grayscale dilation computes the maximum of the + input image inside a sliding window defined by E. + + Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> a[4,4] = 2; a[2,3] = 3 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, footprint=np.ones((3,3))) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> s = ndimage.generate_binary_structure(2,1) + >>> s + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> ndimage.grey_dilation(a, footprint=s) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 1, 3, 2, 1, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3))) + array([[1, 1, 1, 1, 1, 1, 1], + [1, 2, 4, 4, 4, 2, 1], + [1, 2, 4, 4, 4, 2, 1], + [1, 2, 4, 4, 4, 3, 1], + [1, 2, 2, 3, 3, 3, 1], + [1, 2, 2, 3, 3, 3, 1], + [1, 1, 1, 1, 1, 1, 1]]) + + """ + if size is None and footprint is None and structure is None: + raise ValueError("size, footprint, or structure must be specified") + if structure is not None: + structure = np.asarray(structure) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + if footprint is not None: + footprint = np.asarray(footprint) + footprint = footprint[tuple([slice(None, None, -1)] * + footprint.ndim)] + + input = np.asarray(input) + origin = _ni_support._normalize_sequence(origin, input.ndim) + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if footprint is not None: + sz = footprint.shape[ii] + elif structure is not None: + sz = structure.shape[ii] + elif np.isscalar(size): + sz = size + else: + sz = size[ii] + if not sz & 1: + origin[ii] -= 1 + + return _filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 0) + + +def grey_opening(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multidimensional grayscale opening. + + A grayscale opening consists in the succession of a grayscale erosion, + and a grayscale dilation. + + Parameters + ---------- + input : array_like + Array over which the grayscale opening is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + opening. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale opening. + structure : array of ints, optional + Structuring element used for the grayscale opening. `structure` + may be a non-flat structuring element. The `structure` array applies + offsets to the pixels in a neighborhood (the offset is additive during + dilation and subtractive during erosion). + output : array, optional + An array used for storing the output of the opening may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + grey_opening : ndarray + Result of the grayscale opening of `input` with `structure`. + + See Also + -------- + binary_opening, grey_dilation, grey_erosion, grey_closing + generate_binary_structure + + Notes + ----- + The action of a grayscale opening with a flat structuring element amounts + to smoothen high local maxima, whereas binary opening erases small objects. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(36).reshape((6,6)) + >>> a[3, 3] = 50 + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 50, 22, 23], + [24, 25, 26, 27, 28, 29], + [30, 31, 32, 33, 34, 35]]) + >>> ndimage.grey_opening(a, size=(3,3)) + array([[ 0, 1, 2, 3, 4, 4], + [ 6, 7, 8, 9, 10, 10], + [12, 13, 14, 15, 16, 16], + [18, 19, 20, 22, 22, 22], + [24, 25, 26, 27, 28, 28], + [24, 25, 26, 27, 28, 28]]) + >>> # Note that the local maximum a[3,3] has disappeared + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin) + return grey_dilation(tmp, size, footprint, structure, output, mode, + cval, origin) + + +def grey_closing(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multidimensional grayscale closing. + + A grayscale closing consists in the succession of a grayscale dilation, + and a grayscale erosion. + + Parameters + ---------- + input : array_like + Array over which the grayscale closing is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + closing. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale closing. + structure : array of ints, optional + Structuring element used for the grayscale closing. `structure` + may be a non-flat structuring element. The `structure` array applies + offsets to the pixels in a neighborhood (the offset is additive during + dilation and subtractive during erosion) + output : array, optional + An array used for storing the output of the closing may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + grey_closing : ndarray + Result of the grayscale closing of `input` with `structure`. + + See Also + -------- + binary_closing, grey_dilation, grey_erosion, grey_opening, + generate_binary_structure + + Notes + ----- + The action of a grayscale closing with a flat structuring element amounts + to smoothen deep local minima, whereas binary closing fills small holes. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(36).reshape((6,6)) + >>> a[3,3] = 0 + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 0, 22, 23], + [24, 25, 26, 27, 28, 29], + [30, 31, 32, 33, 34, 35]]) + >>> ndimage.grey_closing(a, size=(3,3)) + array([[ 7, 7, 8, 9, 10, 11], + [ 7, 7, 8, 9, 10, 11], + [13, 13, 14, 15, 16, 17], + [19, 19, 20, 20, 22, 23], + [25, 25, 26, 27, 28, 29], + [31, 31, 32, 33, 34, 35]]) + >>> # Note that the local minimum a[3,3] has disappeared + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + return grey_erosion(tmp, size, footprint, structure, output, mode, + cval, origin) + + +def morphological_gradient(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multidimensional morphological gradient. + + The morphological gradient is calculated as the difference between a + dilation and an erosion of the input with a given structuring element. + + Parameters + ---------- + input : array_like + Array over which to compute the morphlogical gradient. + size : tuple of ints + Shape of a flat and full structuring element used for the mathematical + morphology operations. Optional if `footprint` or `structure` is + provided. A larger `size` yields a more blurred gradient. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the morphology operations. Larger footprints + give a more blurred morphological gradient. + structure : array of ints, optional + Structuring element used for the morphology operations. `structure` may + be a non-flat structuring element. The `structure` array applies + offsets to the pixels in a neighborhood (the offset is additive during + dilation and subtractive during erosion) + output : array, optional + An array used for storing the output of the morphological gradient + may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + morphological_gradient : ndarray + Morphological gradient of `input`. + + See Also + -------- + grey_dilation, grey_erosion, gaussian_gradient_magnitude + + Notes + ----- + For a flat structuring element, the morphological gradient + computed at a given point corresponds to the maximal difference + between elements of the input among the elements covered by the + structuring element centered on the point. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> ndimage.morphological_gradient(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # The morphological gradient is computed as the difference + >>> # between a dilation and an erosion + >>> ndimage.grey_dilation(a, size=(3,3)) -\\ + ... ndimage.grey_erosion(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> a[4,4] = 2; a[2,3] = 3 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.morphological_gradient(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 2, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + if isinstance(output, np.ndarray): + grey_erosion(input, size, footprint, structure, output, mode, + cval, origin) + return np.subtract(tmp, output, output) + else: + return (tmp - grey_erosion(input, size, footprint, structure, + None, mode, cval, origin)) + + +def morphological_laplace(input, size=None, footprint=None, + structure=None, output=None, + mode="reflect", cval=0.0, origin=0): + """ + Multidimensional morphological laplace. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints + Shape of a flat and full structuring element used for the mathematical + morphology operations. Optional if `footprint` or `structure` is + provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the morphology operations. + structure : array of ints, optional + Structuring element used for the morphology operations. `structure` may + be a non-flat structuring element. The `structure` array applies + offsets to the pixels in a neighborhood (the offset is additive during + dilation and subtractive during erosion) + output : ndarray, optional + An output array can optionally be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The mode parameter determines how the array borders are handled. + For 'constant' mode, values beyond borders are set to be `cval`. + Default is 'reflect'. + cval : scalar, optional + Value to fill past edges of input if mode is 'constant'. + Default is 0.0 + origin : origin, optional + The origin parameter controls the placement of the filter. + + Returns + ------- + morphological_laplace : ndarray + Output + + """ + tmp1 = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + if isinstance(output, np.ndarray): + grey_erosion(input, size, footprint, structure, output, mode, + cval, origin) + np.add(tmp1, output, output) + np.subtract(output, input, output) + return np.subtract(output, input, output) + else: + tmp2 = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin) + np.add(tmp1, tmp2, tmp2) + np.subtract(tmp2, input, tmp2) + np.subtract(tmp2, input, tmp2) + return tmp2 + + +def white_tophat(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multidimensional white tophat filter. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints + Shape of a flat and full structuring element used for the filter. + Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of elements of a flat structuring element + used for the white tophat filter. + structure : array of ints, optional + Structuring element used for the filter. `structure` may be a non-flat + structuring element. The `structure` array applies offsets to the + pixels in a neighborhood (the offset is additive during dilation and + subtractive during erosion) + output : array, optional + An array used for storing the output of the filter may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. + Default is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default is 0. + + Returns + ------- + output : ndarray + Result of the filter of `input` with `structure`. + + See Also + -------- + black_tophat + + Examples + -------- + Subtract gray background from a bright peak. + + >>> from scipy.ndimage import generate_binary_structure, white_tophat + >>> import numpy as np + >>> square = generate_binary_structure(rank=2, connectivity=3) + >>> bright_on_gray = np.array([[2, 3, 3, 3, 2], + ... [3, 4, 5, 4, 3], + ... [3, 5, 9, 5, 3], + ... [3, 4, 5, 4, 3], + ... [2, 3, 3, 3, 2]]) + >>> white_tophat(input=bright_on_gray, structure=square) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 5, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin) + tmp = grey_dilation(tmp, size, footprint, structure, output, mode, + cval, origin) + if tmp is None: + tmp = output + + if input.dtype == np.bool_ and tmp.dtype == np.bool_: + np.bitwise_xor(input, tmp, out=tmp) + else: + np.subtract(input, tmp, out=tmp) + return tmp + + +def black_tophat(input, size=None, footprint=None, + structure=None, output=None, mode="reflect", + cval=0.0, origin=0): + """ + Multidimensional black tophat filter. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints, optional + Shape of a flat and full structuring element used for the filter. + Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the black tophat filter. + structure : array of ints, optional + Structuring element used for the filter. `structure` may be a non-flat + structuring element. The `structure` array applies offsets to the + pixels in a neighborhood (the offset is additive during dilation and + subtractive during erosion) + output : array, optional + An array used for storing the output of the filter may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + black_tophat : ndarray + Result of the filter of `input` with `structure`. + + See Also + -------- + white_tophat, grey_opening, grey_closing + + Examples + -------- + Change dark peak to bright peak and subtract background. + + >>> from scipy.ndimage import generate_binary_structure, black_tophat + >>> import numpy as np + >>> square = generate_binary_structure(rank=2, connectivity=3) + >>> dark_on_gray = np.array([[7, 6, 6, 6, 7], + ... [6, 5, 4, 5, 6], + ... [6, 4, 0, 4, 6], + ... [6, 5, 4, 5, 6], + ... [7, 6, 6, 6, 7]]) + >>> black_tophat(input=dark_on_gray, structure=square) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 5, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + tmp = grey_erosion(tmp, size, footprint, structure, output, mode, + cval, origin) + if tmp is None: + tmp = output + + if input.dtype == np.bool_ and tmp.dtype == np.bool_: + np.bitwise_xor(tmp, input, out=tmp) + else: + np.subtract(tmp, input, out=tmp) + return tmp + + +def distance_transform_bf(input, metric="euclidean", sampling=None, + return_distances=True, return_indices=False, + distances=None, indices=None): + """ + Distance transform function by a brute force algorithm. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element to each foreground element is returned in a separate array. + + Parameters + ---------- + input : array_like + Input + metric : {'euclidean', 'taxicab', 'chessboard'}, optional + 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'. + The default is 'euclidean'. + sampling : float, or sequence of float, optional + This parameter is only used when `metric` is 'euclidean'. + Spacing of elements along each dimension. If a sequence, must be of + length equal to the input rank; if a single number, this is used for + all axes. If not specified, a grid spacing of unity is implied. + return_distances : bool, optional + Whether to calculate the distance transform. + Default is True. + return_indices : bool, optional + Whether to calculate the feature transform. + Default is False. + distances : ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. + `return_distances` must be True. + It must be the same shape as `input`, and of type float64 if `metric` + is 'euclidean', uint32 otherwise. + indices : int32 ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. + `return_indicies` must be True. + Its shape must be `(input.ndim,) + input.shape`. + + Returns + ------- + distances : ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is True and `distances` is not supplied. + It will have the same shape as the input array. + indices : int32 ndarray, optional + The calculated feature transform. It has an input-shaped array for each + dimension of the input. See distance_transform_edt documentation for an + example. + Returned only when `return_indices` is True and `indices` is not + supplied. + + See Also + -------- + distance_transform_cdt : Faster distance transform for taxicab and + chessboard metrics + distance_transform_edt : Faster distance transform for euclidean metric + + Notes + ----- + This function employs a slow brute force algorithm. See also the + function `distance_transform_cdt` for more efficient taxicab [1]_ and + chessboard algorithms [2]_. + + References + ---------- + .. [1] Taxicab distance. Wikipedia, 2023. + https://en.wikipedia.org/wiki/Taxicab_geometry + .. [2] Chessboard distance. Wikipedia, 2023. + https://en.wikipedia.org/wiki/Chebyshev_distance + + Examples + -------- + Import the necessary modules. + + >>> import numpy as np + >>> from scipy.ndimage import distance_transform_bf + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1 import ImageGrid + + First, we create a toy binary image. + + >>> def add_circle(center_x, center_y, radius, image, fillvalue=1): + ... # fill circular area with 1 + ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]] + ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2 + ... circle_shape = np.sqrt(circle) < radius + ... image[circle_shape] = fillvalue + ... return image + >>> image = np.zeros((100, 100), dtype=np.uint8) + >>> image[35:65, 20:80] = 1 + >>> image = add_circle(28, 65, 10, image) + >>> image = add_circle(37, 30, 10, image) + >>> image = add_circle(70, 45, 20, image) + >>> image = add_circle(45, 80, 10, image) + + Next, we set up the figure. + + >>> fig = plt.figure(figsize=(8, 8)) # set up the figure structure + >>> grid = ImageGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=(0.4, 0.3), + ... label_mode="1", share_all=True, + ... cbar_location="right", cbar_mode="each", + ... cbar_size="7%", cbar_pad="2%") + >>> for ax in grid: + ... ax.axis('off') # remove axes from images + + The top left image is the original binary image. + + >>> binary_image = grid[0].imshow(image, cmap='gray') + >>> cbar_binary_image = grid.cbar_axes[0].colorbar(binary_image) + >>> cbar_binary_image.set_ticks([0, 1]) + >>> grid[0].set_title("Binary image: foreground in white") + + The distance transform calculates the distance between foreground pixels + and the image background according to a distance metric. Available metrics + in `distance_transform_bf` are: ``euclidean`` (default), ``taxicab`` + and ``chessboard``. The top right image contains the distance transform + based on the ``euclidean`` metric. + + >>> distance_transform_euclidean = distance_transform_bf(image) + >>> euclidean_transform = grid[1].imshow(distance_transform_euclidean, + ... cmap='gray') + >>> cbar_euclidean = grid.cbar_axes[1].colorbar(euclidean_transform) + >>> colorbar_ticks = [0, 10, 20] + >>> cbar_euclidean.set_ticks(colorbar_ticks) + >>> grid[1].set_title("Euclidean distance") + + The lower left image contains the distance transform using the ``taxicab`` + metric. + + >>> distance_transform_taxicab = distance_transform_bf(image, + ... metric='taxicab') + >>> taxicab_transformation = grid[2].imshow(distance_transform_taxicab, + ... cmap='gray') + >>> cbar_taxicab = grid.cbar_axes[2].colorbar(taxicab_transformation) + >>> cbar_taxicab.set_ticks(colorbar_ticks) + >>> grid[2].set_title("Taxicab distance") + + Finally, the lower right image contains the distance transform using the + ``chessboard`` metric. + + >>> distance_transform_cb = distance_transform_bf(image, + ... metric='chessboard') + >>> chessboard_transformation = grid[3].imshow(distance_transform_cb, + ... cmap='gray') + >>> cbar_taxicab = grid.cbar_axes[3].colorbar(chessboard_transformation) + >>> cbar_taxicab.set_ticks(colorbar_ticks) + >>> grid[3].set_title("Chessboard distance") + >>> plt.show() + + """ + ft_inplace = isinstance(indices, np.ndarray) + dt_inplace = isinstance(distances, np.ndarray) + _distance_tranform_arg_check( + dt_inplace, ft_inplace, return_distances, return_indices + ) + + tmp1 = np.asarray(input) != 0 + struct = generate_binary_structure(tmp1.ndim, tmp1.ndim) + tmp2 = binary_dilation(tmp1, struct) + tmp2 = np.logical_xor(tmp1, tmp2) + tmp1 = tmp1.astype(np.int8) - tmp2.astype(np.int8) + metric = metric.lower() + if metric == 'euclidean': + metric = 1 + elif metric in ['taxicab', 'cityblock', 'manhattan']: + metric = 2 + elif metric == 'chessboard': + metric = 3 + else: + raise RuntimeError('distance metric not supported') + if sampling is not None: + sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim) + sampling = np.asarray(sampling, dtype=np.float64) + if not sampling.flags.contiguous: + sampling = sampling.copy() + if return_indices: + ft = np.zeros(tmp1.shape, dtype=np.int32) + else: + ft = None + if return_distances: + if distances is None: + if metric == 1: + dt = np.zeros(tmp1.shape, dtype=np.float64) + else: + dt = np.zeros(tmp1.shape, dtype=np.uint32) + else: + if distances.shape != tmp1.shape: + raise RuntimeError('distances array has wrong shape') + if metric == 1: + if distances.dtype.type != np.float64: + raise RuntimeError('distances array must be float64') + else: + if distances.dtype.type != np.uint32: + raise RuntimeError('distances array must be uint32') + dt = distances + else: + dt = None + + _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft) + if return_indices: + if isinstance(indices, np.ndarray): + if indices.dtype.type != np.int32: + raise RuntimeError('indices array must be int32') + if indices.shape != (tmp1.ndim,) + tmp1.shape: + raise RuntimeError('indices array has wrong shape') + tmp2 = indices + else: + tmp2 = np.indices(tmp1.shape, dtype=np.int32) + ft = np.ravel(ft) + for ii in range(tmp2.shape[0]): + rtmp = np.ravel(tmp2[ii, ...])[ft] + rtmp.shape = tmp1.shape + tmp2[ii, ...] = rtmp + ft = tmp2 + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def distance_transform_cdt(input, metric='chessboard', return_distances=True, + return_indices=False, distances=None, indices=None): + """ + Distance transform for chamfer type of transforms. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element to each foreground element is returned in a separate array. + + Parameters + ---------- + input : array_like + Input. Values of 0 are treated as background. + metric : {'chessboard', 'taxicab'} or array_like, optional + The `metric` determines the type of chamfering that is done. If the + `metric` is equal to 'taxicab' a structure is generated using + `generate_binary_structure` with a squared distance equal to 1. If + the `metric` is equal to 'chessboard', a `metric` is generated + using `generate_binary_structure` with a squared distance equal to + the dimensionality of the array. These choices correspond to the + common interpretations of the 'taxicab' and the 'chessboard' + distance metrics in two dimensions. + A custom metric may be provided, in the form of a matrix where + each dimension has a length of three. + 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'. + The default is 'chessboard'. + return_distances : bool, optional + Whether to calculate the distance transform. + Default is True. + return_indices : bool, optional + Whether to calculate the feature transform. + Default is False. + distances : int32 ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. + `return_distances` must be True. + It must be the same shape as `input`. + indices : int32 ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. + `return_indicies` must be True. + Its shape must be `(input.ndim,) + input.shape`. + + Returns + ------- + distances : int32 ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is True, and `distances` is not supplied. + It will have the same shape as the input array. + indices : int32 ndarray, optional + The calculated feature transform. It has an input-shaped array for each + dimension of the input. See distance_transform_edt documentation for an + example. + Returned only when `return_indices` is True, and `indices` is not + supplied. + + See Also + -------- + distance_transform_edt : Fast distance transform for euclidean metric + distance_transform_bf : Distance transform for different metrics using + a slower brute force algorithm + + Examples + -------- + Import the necessary modules. + + >>> import numpy as np + >>> from scipy.ndimage import distance_transform_cdt + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1 import ImageGrid + + First, we create a toy binary image. + + >>> def add_circle(center_x, center_y, radius, image, fillvalue=1): + ... # fill circular area with 1 + ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]] + ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2 + ... circle_shape = np.sqrt(circle) < radius + ... image[circle_shape] = fillvalue + ... return image + >>> image = np.zeros((100, 100), dtype=np.uint8) + >>> image[35:65, 20:80] = 1 + >>> image = add_circle(28, 65, 10, image) + >>> image = add_circle(37, 30, 10, image) + >>> image = add_circle(70, 45, 20, image) + >>> image = add_circle(45, 80, 10, image) + + Next, we set up the figure. + + >>> fig = plt.figure(figsize=(5, 15)) + >>> grid = ImageGrid(fig, 111, nrows_ncols=(3, 1), axes_pad=(0.5, 0.3), + ... label_mode="1", share_all=True, + ... cbar_location="right", cbar_mode="each", + ... cbar_size="7%", cbar_pad="2%") + >>> for ax in grid: + ... ax.axis('off') + >>> top, middle, bottom = grid + >>> colorbar_ticks = [0, 10, 20] + + The top image contains the original binary image. + + >>> binary_image = top.imshow(image, cmap='gray') + >>> cbar_binary_image = top.cax.colorbar(binary_image) + >>> cbar_binary_image.set_ticks([0, 1]) + >>> top.set_title("Binary image: foreground in white") + + The middle image contains the distance transform using the ``taxicab`` + metric. + + >>> distance_taxicab = distance_transform_cdt(image, metric="taxicab") + >>> taxicab_transform = middle.imshow(distance_taxicab, cmap='gray') + >>> cbar_taxicab = middle.cax.colorbar(taxicab_transform) + >>> cbar_taxicab.set_ticks(colorbar_ticks) + >>> middle.set_title("Taxicab metric") + + The bottom image contains the distance transform using the ``chessboard`` + metric. + + >>> distance_chessboard = distance_transform_cdt(image, + ... metric="chessboard") + >>> chessboard_transform = bottom.imshow(distance_chessboard, cmap='gray') + >>> cbar_chessboard = bottom.cax.colorbar(chessboard_transform) + >>> cbar_chessboard.set_ticks(colorbar_ticks) + >>> bottom.set_title("Chessboard metric") + >>> plt.tight_layout() + >>> plt.show() + + """ + ft_inplace = isinstance(indices, np.ndarray) + dt_inplace = isinstance(distances, np.ndarray) + _distance_tranform_arg_check( + dt_inplace, ft_inplace, return_distances, return_indices + ) + input = np.asarray(input) + if isinstance(metric, str): + if metric in ['taxicab', 'cityblock', 'manhattan']: + rank = input.ndim + metric = generate_binary_structure(rank, 1) + elif metric == 'chessboard': + rank = input.ndim + metric = generate_binary_structure(rank, rank) + else: + raise ValueError('invalid metric provided') + else: + try: + metric = np.asarray(metric) + except Exception as e: + raise ValueError('invalid metric provided') from e + for s in metric.shape: + if s != 3: + raise ValueError('metric sizes must be equal to 3') + + if not metric.flags.contiguous: + metric = metric.copy() + if dt_inplace: + if distances.dtype.type != np.int32: + raise ValueError('distances must be of int32 type') + if distances.shape != input.shape: + raise ValueError('distances has wrong shape') + dt = distances + dt[...] = np.where(input, -1, 0).astype(np.int32) + else: + dt = np.where(input, -1, 0).astype(np.int32) + + rank = dt.ndim + if return_indices: + ft = np.arange(dt.size, dtype=np.int32) + ft.shape = dt.shape + else: + ft = None + + _nd_image.distance_transform_op(metric, dt, ft) + dt = dt[tuple([slice(None, None, -1)] * rank)] + if return_indices: + ft = ft[tuple([slice(None, None, -1)] * rank)] + _nd_image.distance_transform_op(metric, dt, ft) + dt = dt[tuple([slice(None, None, -1)] * rank)] + if return_indices: + ft = ft[tuple([slice(None, None, -1)] * rank)] + ft = np.ravel(ft) + if ft_inplace: + if indices.dtype.type != np.int32: + raise ValueError('indices array must be int32') + if indices.shape != (dt.ndim,) + dt.shape: + raise ValueError('indices array has wrong shape') + tmp = indices + else: + tmp = np.indices(dt.shape, dtype=np.int32) + for ii in range(tmp.shape[0]): + rtmp = np.ravel(tmp[ii, ...])[ft] + rtmp.shape = dt.shape + tmp[ii, ...] = rtmp + ft = tmp + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def distance_transform_edt(input, sampling=None, return_distances=True, + return_indices=False, distances=None, indices=None): + """ + Exact Euclidean distance transform. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element to each foreground element is returned in a separate array. + + Parameters + ---------- + input : array_like + Input data to transform. Can be any type but will be converted + into binary: 1 wherever input equates to True, 0 elsewhere. + sampling : float, or sequence of float, optional + Spacing of elements along each dimension. If a sequence, must be of + length equal to the input rank; if a single number, this is used for + all axes. If not specified, a grid spacing of unity is implied. + return_distances : bool, optional + Whether to calculate the distance transform. + Default is True. + return_indices : bool, optional + Whether to calculate the feature transform. + Default is False. + distances : float64 ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. + `return_distances` must be True. + It must be the same shape as `input`. + indices : int32 ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. + `return_indicies` must be True. + Its shape must be `(input.ndim,) + input.shape`. + + Returns + ------- + distances : float64 ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is True and `distances` is not supplied. + It will have the same shape as the input array. + indices : int32 ndarray, optional + The calculated feature transform. It has an input-shaped array for each + dimension of the input. See example below. + Returned only when `return_indices` is True and `indices` is not + supplied. + + Notes + ----- + The Euclidean distance transform gives values of the Euclidean + distance:: + + n + y_i = sqrt(sum (x[i]-b[i])**2) + i + + where b[i] is the background point (value 0) with the smallest + Euclidean distance to input points x[i], and n is the + number of dimensions. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.array(([0,1,1,1,1], + ... [0,0,1,1,1], + ... [0,1,1,1,1], + ... [0,1,1,1,0], + ... [0,1,1,0,0])) + >>> ndimage.distance_transform_edt(a) + array([[ 0. , 1. , 1.4142, 2.2361, 3. ], + [ 0. , 0. , 1. , 2. , 2. ], + [ 0. , 1. , 1.4142, 1.4142, 1. ], + [ 0. , 1. , 1.4142, 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + With a sampling of 2 units along x, 1 along y: + + >>> ndimage.distance_transform_edt(a, sampling=[2,1]) + array([[ 0. , 1. , 2. , 2.8284, 3.6056], + [ 0. , 0. , 1. , 2. , 3. ], + [ 0. , 1. , 2. , 2.2361, 2. ], + [ 0. , 1. , 2. , 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + Asking for indices as well: + + >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True) + >>> inds + array([[[0, 0, 1, 1, 3], + [1, 1, 1, 1, 3], + [2, 2, 1, 3, 3], + [3, 3, 4, 4, 3], + [4, 4, 4, 4, 4]], + [[0, 0, 1, 1, 4], + [0, 1, 1, 1, 4], + [0, 0, 1, 4, 4], + [0, 0, 3, 3, 4], + [0, 0, 3, 3, 4]]]) + + With arrays provided for inplace outputs: + + >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32) + >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices) + array([[ 0. , 1. , 1.4142, 2.2361, 3. ], + [ 0. , 0. , 1. , 2. , 2. ], + [ 0. , 1. , 1.4142, 1.4142, 1. ], + [ 0. , 1. , 1.4142, 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + >>> indices + array([[[0, 0, 1, 1, 3], + [1, 1, 1, 1, 3], + [2, 2, 1, 3, 3], + [3, 3, 4, 4, 3], + [4, 4, 4, 4, 4]], + [[0, 0, 1, 1, 4], + [0, 1, 1, 1, 4], + [0, 0, 1, 4, 4], + [0, 0, 3, 3, 4], + [0, 0, 3, 3, 4]]]) + + """ + ft_inplace = isinstance(indices, np.ndarray) + dt_inplace = isinstance(distances, np.ndarray) + _distance_tranform_arg_check( + dt_inplace, ft_inplace, return_distances, return_indices + ) + + # calculate the feature transform + input = np.atleast_1d(np.where(input, 1, 0).astype(np.int8)) + if sampling is not None: + sampling = _ni_support._normalize_sequence(sampling, input.ndim) + sampling = np.asarray(sampling, dtype=np.float64) + if not sampling.flags.contiguous: + sampling = sampling.copy() + + if ft_inplace: + ft = indices + if ft.shape != (input.ndim,) + input.shape: + raise RuntimeError('indices array has wrong shape') + if ft.dtype.type != np.int32: + raise RuntimeError('indices array must be int32') + else: + ft = np.zeros((input.ndim,) + input.shape, dtype=np.int32) + + _nd_image.euclidean_feature_transform(input, sampling, ft) + # if requested, calculate the distance transform + if return_distances: + dt = ft - np.indices(input.shape, dtype=ft.dtype) + dt = dt.astype(np.float64) + if sampling is not None: + for ii in range(len(sampling)): + dt[ii, ...] *= sampling[ii] + np.multiply(dt, dt, dt) + if dt_inplace: + dt = np.add.reduce(dt, axis=0) + if distances.shape != dt.shape: + raise RuntimeError('distances array has wrong shape') + if distances.dtype.type != np.float64: + raise RuntimeError('distances array must be float64') + np.sqrt(dt, distances) + else: + dt = np.add.reduce(dt, axis=0) + dt = np.sqrt(dt) + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def _distance_tranform_arg_check(distances_out, indices_out, + return_distances, return_indices): + """Raise a RuntimeError if the arguments are invalid""" + error_msgs = [] + if (not return_distances) and (not return_indices): + error_msgs.append( + 'at least one of return_distances/return_indices must be True') + if distances_out and not return_distances: + error_msgs.append( + 'return_distances must be True if distances is supplied' + ) + if indices_out and not return_indices: + error_msgs.append('return_indices must be True if indices is supplied') + if error_msgs: + raise RuntimeError(', '.join(error_msgs)) diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py new file mode 100644 index 0000000000000000000000000000000000000000..ae8875f2ad20244604e02a0d8649a815956d4975 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py @@ -0,0 +1,119 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from collections.abc import Iterable +import operator +import warnings +import numpy as np + + +def _extend_mode_to_code(mode): + """Convert an extension mode to the corresponding integer code. + """ + if mode == 'nearest': + return 0 + elif mode == 'wrap': + return 1 + elif mode in ['reflect', 'grid-mirror']: + return 2 + elif mode == 'mirror': + return 3 + elif mode == 'constant': + return 4 + elif mode == 'grid-wrap': + return 5 + elif mode == 'grid-constant': + return 6 + else: + raise RuntimeError('boundary mode not supported') + + +def _normalize_sequence(input, rank): + """If input is a scalar, create a sequence of length equal to the + rank by duplicating the input. If input is a sequence, + check if its length is equal to the length of array. + """ + is_str = isinstance(input, str) + if not is_str and isinstance(input, Iterable): + normalized = list(input) + if len(normalized) != rank: + err = "sequence argument must have length equal to input rank" + raise RuntimeError(err) + else: + normalized = [input] * rank + return normalized + + +def _get_output(output, input, shape=None, complex_output=False): + if shape is None: + shape = input.shape + if output is None: + if not complex_output: + output = np.zeros(shape, dtype=input.dtype.name) + else: + complex_type = np.promote_types(input.dtype, np.complex64) + output = np.zeros(shape, dtype=complex_type) + elif isinstance(output, (type, np.dtype)): + # Classes (like `np.float32`) and dtypes are interpreted as dtype + if complex_output and np.dtype(output).kind != 'c': + warnings.warn("promoting specified output dtype to complex", stacklevel=3) + output = np.promote_types(output, np.complex64) + output = np.zeros(shape, dtype=output) + elif isinstance(output, str): + output = np.dtype(output) + if complex_output and output.kind != 'c': + raise RuntimeError("output must have complex dtype") + elif not issubclass(output.type, np.number): + raise RuntimeError("output must have numeric dtype") + output = np.zeros(shape, dtype=output) + elif output.shape != shape: + raise RuntimeError("output shape not correct") + elif complex_output and output.dtype.kind != 'c': + raise RuntimeError("output must have complex dtype") + return output + + +def _check_axes(axes, ndim): + if axes is None: + return tuple(range(ndim)) + elif np.isscalar(axes): + axes = (operator.index(axes),) + elif isinstance(axes, Iterable): + for ax in axes: + axes = tuple(operator.index(ax) for ax in axes) + if ax < -ndim or ax > ndim - 1: + raise ValueError(f"specified axis: {ax} is out of range") + axes = tuple(ax % ndim if ax < 0 else ax for ax in axes) + else: + message = "axes must be an integer, iterable of integers, or None" + raise ValueError(message) + if len(tuple(set(axes))) != len(axes): + raise ValueError("axes must be unique") + return axes diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/filters.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..e16d9d279a9585b2454c46ee09cf22143de833a6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/filters.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'correlate1d', 'convolve1d', 'gaussian_filter1d', + 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace', + 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude', + 'gaussian_gradient_magnitude', 'correlate', 'convolve', + 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', + 'maximum_filter1d', 'minimum_filter', 'maximum_filter', + 'rank_filter', 'median_filter', 'percentile_filter', + 'generic_filter1d', 'generic_filter' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='filters', + private_modules=['_filters'], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/measurements.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/measurements.py new file mode 100644 index 0000000000000000000000000000000000000000..22f76b01840ffb829205bd1d28a7ad1f9ac5db61 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/measurements.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'label', 'find_objects', 'labeled_comprehension', + 'sum', 'mean', 'variance', 'standard_deviation', + 'minimum', 'maximum', 'median', 'minimum_position', + 'maximum_position', 'extrema', 'center_of_mass', + 'histogram', 'watershed_ift', 'sum_labels' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='measurements', + private_modules=['_measurements'], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/morphology.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/morphology.py new file mode 100644 index 0000000000000000000000000000000000000000..e522e7df3a4b06b7e04ed8c2d0ecaff2a98b951d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/morphology.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'iterate_structure', 'generate_binary_structure', + 'binary_erosion', 'binary_dilation', 'binary_opening', + 'binary_closing', 'binary_hit_or_miss', 'binary_propagation', + 'binary_fill_holes', 'grey_erosion', 'grey_dilation', + 'grey_opening', 'grey_closing', 'morphological_gradient', + 'morphological_laplace', 'white_tophat', 'black_tophat', + 'distance_transform_bf', 'distance_transform_cdt', + 'distance_transform_edt' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='morphology', + private_modules=['_morphology'], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..c92cfb558a0fafac3b881540afaf6b05165f5dc5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py @@ -0,0 +1,1327 @@ +import sys + +import numpy as np +from numpy.testing import (assert_, assert_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose, + suppress_warnings) +import pytest +from pytest import raises as assert_raises +import scipy.ndimage as ndimage + +from . import types + +eps = 1e-12 + +ndimage_to_numpy_mode = { + 'mirror': 'reflect', + 'reflect': 'symmetric', + 'grid-mirror': 'symmetric', + 'grid-wrap': 'wrap', + 'nearest': 'edge', + 'grid-constant': 'constant', +} + + +class TestNdimageInterpolation: + + @pytest.mark.parametrize( + 'mode, expected_value', + [('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]), + ('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]), + ('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]), + ('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]), + ('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]), + ('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]), + ('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])] + ) + def test_boundaries(self, mode, expected_value): + def shift(x): + return (x[0] + 0.5,) + + data = np.array([1, 2, 3, 4.]) + assert_array_equal( + expected_value, + ndimage.geometric_transform(data, shift, cval=-1, mode=mode, + output_shape=(7,), order=1)) + + @pytest.mark.parametrize( + 'mode, expected_value', + [('nearest', [1, 1, 2, 3]), + ('wrap', [3, 1, 2, 3]), + ('grid-wrap', [4, 1, 2, 3]), + ('mirror', [2, 1, 2, 3]), + ('reflect', [1, 1, 2, 3]), + ('constant', [-1, 1, 2, 3]), + ('grid-constant', [-1, 1, 2, 3])] + ) + def test_boundaries2(self, mode, expected_value): + def shift(x): + return (x[0] - 0.9,) + + data = np.array([1, 2, 3, 4]) + assert_array_equal( + expected_value, + ndimage.geometric_transform(data, shift, cval=-1, mode=mode, + output_shape=(4,))) + + @pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror', + 'grid-wrap', 'grid-constant', + 'nearest']) + @pytest.mark.parametrize('order', range(6)) + def test_boundary_spline_accuracy(self, mode, order): + """Tests based on examples from gh-2640""" + data = np.arange(-6, 7, dtype=float) + x = np.linspace(-8, 15, num=1000) + y = ndimage.map_coordinates(data, [x], order=order, mode=mode) + + # compute expected value using explicit padding via np.pad + npad = 32 + pad_mode = ndimage_to_numpy_mode.get(mode) + padded = np.pad(data, npad, mode=pad_mode) + expected = ndimage.map_coordinates(padded, [npad + x], order=order, + mode=mode) + + atol = 1e-5 if mode == 'grid-constant' else 1e-12 + assert_allclose(y, expected, rtol=1e-7, atol=atol) + + @pytest.mark.parametrize('order', range(2, 6)) + @pytest.mark.parametrize('dtype', types) + def test_spline01(self, dtype, order): + data = np.ones([], dtype) + out = ndimage.spline_filter(data, order=order) + assert_array_almost_equal(out, 1) + + @pytest.mark.parametrize('order', range(2, 6)) + @pytest.mark.parametrize('dtype', types) + def test_spline02(self, dtype, order): + data = np.array([1], dtype) + out = ndimage.spline_filter(data, order=order) + assert_array_almost_equal(out, [1]) + + @pytest.mark.parametrize('order', range(2, 6)) + @pytest.mark.parametrize('dtype', types) + def test_spline03(self, dtype, order): + data = np.ones([], dtype) + out = ndimage.spline_filter(data, order, output=dtype) + assert_array_almost_equal(out, 1) + + @pytest.mark.parametrize('order', range(2, 6)) + @pytest.mark.parametrize('dtype', types) + def test_spline04(self, dtype, order): + data = np.ones([4], dtype) + out = ndimage.spline_filter(data, order) + assert_array_almost_equal(out, [1, 1, 1, 1]) + + @pytest.mark.parametrize('order', range(2, 6)) + @pytest.mark.parametrize('dtype', types) + def test_spline05(self, dtype, order): + data = np.ones([4, 4], dtype) + out = ndimage.spline_filter(data, order=order) + assert_array_almost_equal(out, [[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform01(self, order): + data = np.array([1]) + + def mapping(x): + return x + + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [1]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform02(self, order): + data = np.ones([4]) + + def mapping(x): + return x + + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [1, 1, 1, 1]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform03(self, order): + data = np.ones([4]) + + def mapping(x): + return (x[0] - 1,) + + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [0, 1, 1, 1]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform04(self, order): + data = np.array([4, 1, 3, 2]) + + def mapping(x): + return (x[0] - 1,) + + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [0, 4, 1, 3]) + + @pytest.mark.parametrize('order', range(0, 6)) + @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) + def test_geometric_transform05(self, order, dtype): + data = np.array([[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]], dtype=dtype) + expected = np.array([[0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1]], dtype=dtype) + if data.dtype.kind == 'c': + data -= 1j * data + expected -= 1j * expected + + def mapping(x): + return (x[0], x[1] - 1) + + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform06(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + + def mapping(x): + return (x[0], x[1] - 1) + + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [[0, 4, 1, 3], + [0, 7, 6, 8], + [0, 3, 5, 3]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform07(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + + def mapping(x): + return (x[0] - 1, x[1]) + + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [4, 1, 3, 2], + [7, 6, 8, 5]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform08(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + + def mapping(x): + return (x[0] - 1, x[1] - 1) + + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform10(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + + def mapping(x): + return (x[0] - 1, x[1] - 1) + + if (order > 1): + filtered = ndimage.spline_filter(data, order=order) + else: + filtered = data + out = ndimage.geometric_transform(filtered, mapping, data.shape, + order=order, prefilter=False) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform13(self, order): + data = np.ones([2], np.float64) + + def mapping(x): + return (x[0] // 2,) + + out = ndimage.geometric_transform(data, mapping, [4], order=order) + assert_array_almost_equal(out, [1, 1, 1, 1]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform14(self, order): + data = [1, 5, 2, 6, 3, 7, 4, 4] + + def mapping(x): + return (2 * x[0],) + + out = ndimage.geometric_transform(data, mapping, [4], order=order) + assert_array_almost_equal(out, [1, 2, 3, 4]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform15(self, order): + data = [1, 2, 3, 4] + + def mapping(x): + return (x[0] / 2,) + + out = ndimage.geometric_transform(data, mapping, [8], order=order) + assert_array_almost_equal(out[::2], [1, 2, 3, 4]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform16(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9.0, 10, 11, 12]] + + def mapping(x): + return (x[0], x[1] * 2) + + out = ndimage.geometric_transform(data, mapping, (3, 2), + order=order) + assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform17(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0] * 2, x[1]) + + out = ndimage.geometric_transform(data, mapping, (1, 4), + order=order) + assert_array_almost_equal(out, [[1, 2, 3, 4]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform18(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0] * 2, x[1] * 2) + + out = ndimage.geometric_transform(data, mapping, (1, 2), + order=order) + assert_array_almost_equal(out, [[1, 3]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform19(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0], x[1] / 2) + + out = ndimage.geometric_transform(data, mapping, (3, 8), + order=order) + assert_array_almost_equal(out[..., ::2], data) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform20(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0] / 2, x[1]) + + out = ndimage.geometric_transform(data, mapping, (6, 4), + order=order) + assert_array_almost_equal(out[::2, ...], data) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform21(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0] / 2, x[1] / 2) + + out = ndimage.geometric_transform(data, mapping, (6, 8), + order=order) + assert_array_almost_equal(out[::2, ::2], data) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform22(self, order): + data = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], np.float64) + + def mapping1(x): + return (x[0] / 2, x[1] / 2) + + def mapping2(x): + return (x[0] * 2, x[1] * 2) + + out = ndimage.geometric_transform(data, mapping1, + (6, 8), order=order) + out = ndimage.geometric_transform(out, mapping2, + (3, 4), order=order) + assert_array_almost_equal(out, data) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform23(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (1, x[0] * 2) + + out = ndimage.geometric_transform(data, mapping, (2,), order=order) + out = out.astype(np.int32) + assert_array_almost_equal(out, [5, 7]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_geometric_transform24(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x, a, b): + return (a, x[0] * b) + + out = ndimage.geometric_transform( + data, mapping, (2,), order=order, extra_arguments=(1,), + extra_keywords={'b': 2}) + assert_array_almost_equal(out, [5, 7]) + + def test_geometric_transform_grid_constant_order1(self): + # verify interpolation outside the original bounds + x = np.array([[1, 2, 3], + [4, 5, 6]], dtype=float) + + def mapping(x): + return (x[0] - 0.5), (x[1] - 0.5) + + expected_result = np.array([[0.25, 0.75, 1.25], + [1.25, 3.00, 4.00]]) + assert_array_almost_equal( + ndimage.geometric_transform(x, mapping, mode='grid-constant', + order=1), + expected_result, + ) + + @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest', + 'mirror', 'reflect']) + @pytest.mark.parametrize('order', range(6)) + def test_geometric_transform_vs_padded(self, order, mode): + x = np.arange(144, dtype=float).reshape(12, 12) + + def mapping(x): + return (x[0] - 0.4), (x[1] + 2.3) + + # Manually pad and then extract center after the transform to get the + # expected result. + npad = 24 + pad_mode = ndimage_to_numpy_mode.get(mode) + xp = np.pad(x, npad, mode=pad_mode) + center_slice = tuple([slice(npad, -npad)] * x.ndim) + expected_result = ndimage.geometric_transform( + xp, mapping, mode=mode, order=order)[center_slice] + + assert_allclose( + ndimage.geometric_transform(x, mapping, mode=mode, + order=order), + expected_result, + rtol=1e-7, + ) + + def test_geometric_transform_endianness_with_output_parameter(self): + # geometric transform given output ndarray or dtype with + # non-native endianness. see issue #4127 + data = np.array([1]) + + def mapping(x): + return x + + for out in [data.dtype, data.dtype.newbyteorder(), + np.empty_like(data), + np.empty_like(data).astype(data.dtype.newbyteorder())]: + returned = ndimage.geometric_transform(data, mapping, data.shape, + output=out) + result = out if returned is None else returned + assert_array_almost_equal(result, [1]) + + def test_geometric_transform_with_string_output(self): + data = np.array([1]) + + def mapping(x): + return x + + out = ndimage.geometric_transform(data, mapping, output='f') + assert_(out.dtype is np.dtype('f')) + assert_array_almost_equal(out, [1]) + + @pytest.mark.parametrize('order', range(0, 6)) + @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) + def test_map_coordinates01(self, order, dtype): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + expected = np.array([[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + if data.dtype.kind == 'c': + data = data - 1j * data + expected = expected - 1j * expected + + idx = np.indices(data.shape) + idx -= 1 + + out = ndimage.map_coordinates(data, idx, order=order) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_map_coordinates02(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + idx = np.indices(data.shape, np.float64) + idx -= 0.5 + + out1 = ndimage.shift(data, 0.5, order=order) + out2 = ndimage.map_coordinates(data, idx, order=order) + assert_array_almost_equal(out1, out2) + + def test_map_coordinates03(self): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]], order='F') + idx = np.indices(data.shape) - 1 + out = ndimage.map_coordinates(data, idx) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + assert_array_almost_equal(out, ndimage.shift(data, (1, 1))) + idx = np.indices(data[::2].shape) - 1 + out = ndimage.map_coordinates(data[::2], idx) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3]]) + assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1))) + idx = np.indices(data[:, ::2].shape) - 1 + out = ndimage.map_coordinates(data[:, ::2], idx) + assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]]) + assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1))) + + def test_map_coordinates_endianness_with_output_parameter(self): + # output parameter given as array or dtype with either endianness + # see issue #4127 + data = np.array([[1, 2], [7, 6]]) + expected = np.array([[0, 0], [0, 1]]) + idx = np.indices(data.shape) + idx -= 1 + for out in [ + data.dtype, + data.dtype.newbyteorder(), + np.empty_like(expected), + np.empty_like(expected).astype(expected.dtype.newbyteorder()) + ]: + returned = ndimage.map_coordinates(data, idx, output=out) + result = out if returned is None else returned + assert_array_almost_equal(result, expected) + + def test_map_coordinates_with_string_output(self): + data = np.array([[1]]) + idx = np.indices(data.shape) + out = ndimage.map_coordinates(data, idx, output='f') + assert_(out.dtype is np.dtype('f')) + assert_array_almost_equal(out, [[1]]) + + @pytest.mark.skipif('win32' in sys.platform or np.intp(0).itemsize < 8, + reason='do not run on 32 bit or windows ' + '(no sparse memory)') + def test_map_coordinates_large_data(self): + # check crash on large data + try: + n = 30000 + a = np.empty(n**2, dtype=np.float32).reshape(n, n) + # fill the part we might read + a[n - 3:, n - 3:] = 0 + ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1) + except MemoryError as e: + raise pytest.skip('Not enough memory available') from e + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform01(self, order): + data = np.array([1]) + out = ndimage.affine_transform(data, [[1]], order=order) + assert_array_almost_equal(out, [1]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform02(self, order): + data = np.ones([4]) + out = ndimage.affine_transform(data, [[1]], order=order) + assert_array_almost_equal(out, [1, 1, 1, 1]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform03(self, order): + data = np.ones([4]) + out = ndimage.affine_transform(data, [[1]], -1, order=order) + assert_array_almost_equal(out, [0, 1, 1, 1]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform04(self, order): + data = np.array([4, 1, 3, 2]) + out = ndimage.affine_transform(data, [[1]], -1, order=order) + assert_array_almost_equal(out, [0, 4, 1, 3]) + + @pytest.mark.parametrize('order', range(0, 6)) + @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) + def test_affine_transform05(self, order, dtype): + data = np.array([[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]], dtype=dtype) + expected = np.array([[0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1]], dtype=dtype) + if data.dtype.kind == 'c': + data -= 1j * data + expected -= 1j * expected + out = ndimage.affine_transform(data, [[1, 0], [0, 1]], + [0, -1], order=order) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform06(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + out = ndimage.affine_transform(data, [[1, 0], [0, 1]], + [0, -1], order=order) + assert_array_almost_equal(out, [[0, 4, 1, 3], + [0, 7, 6, 8], + [0, 3, 5, 3]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform07(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + out = ndimage.affine_transform(data, [[1, 0], [0, 1]], + [-1, 0], order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [4, 1, 3, 2], + [7, 6, 8, 5]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform08(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + out = ndimage.affine_transform(data, [[1, 0], [0, 1]], + [-1, -1], order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform09(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + if (order > 1): + filtered = ndimage.spline_filter(data, order=order) + else: + filtered = data + out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]], + [-1, -1], order=order, + prefilter=False) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform10(self, order): + data = np.ones([2], np.float64) + out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,), + order=order) + assert_array_almost_equal(out, [1, 1, 1, 0]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform11(self, order): + data = [1, 5, 2, 6, 3, 7, 4, 4] + out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order) + assert_array_almost_equal(out, [1, 2, 3, 4]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform12(self, order): + data = [1, 2, 3, 4] + out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order) + assert_array_almost_equal(out[::2], [1, 2, 3, 4]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform13(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9.0, 10, 11, 12]] + out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2), + order=order) + assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform14(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4), + order=order) + assert_array_almost_equal(out, [[1, 2, 3, 4]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform15(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2), + order=order) + assert_array_almost_equal(out, [[1, 3]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform16(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0, + (3, 8), order=order) + assert_array_almost_equal(out[..., ::2], data) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform17(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0, + (6, 4), order=order) + assert_array_almost_equal(out[::2, ...], data) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform18(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0, + (6, 8), order=order) + assert_array_almost_equal(out[::2, ::2], data) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform19(self, order): + data = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], np.float64) + out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0, + (6, 8), order=order) + out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0, + (3, 4), order=order) + assert_array_almost_equal(out, data) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform20(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + out = ndimage.affine_transform(data, [[0], [2]], 0, (2,), + order=order) + assert_array_almost_equal(out, [1, 3]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform21(self, order): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + out = ndimage.affine_transform(data, [[2], [0]], 0, (2,), + order=order) + assert_array_almost_equal(out, [1, 9]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform22(self, order): + # shift and offset interaction; see issue #1547 + data = np.array([4, 1, 3, 2]) + out = ndimage.affine_transform(data, [[2]], [-1], (3,), + order=order) + assert_array_almost_equal(out, [0, 1, 2]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform23(self, order): + # shift and offset interaction; see issue #1547 + data = np.array([4, 1, 3, 2]) + out = ndimage.affine_transform(data, [[0.5]], [-1], (8,), + order=order) + assert_array_almost_equal(out[::2], [0, 4, 1, 3]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform24(self, order): + # consistency between diagonal and non-diagonal case; see issue #1547 + data = np.array([4, 1, 3, 2]) + with suppress_warnings() as sup: + sup.filter(UserWarning, + 'The behavior of affine_transform with a 1-D array .* ' + 'has changed') + out1 = ndimage.affine_transform(data, [2], -1, order=order) + out2 = ndimage.affine_transform(data, [[2]], -1, order=order) + assert_array_almost_equal(out1, out2) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform25(self, order): + # consistency between diagonal and non-diagonal case; see issue #1547 + data = np.array([4, 1, 3, 2]) + with suppress_warnings() as sup: + sup.filter(UserWarning, + 'The behavior of affine_transform with a 1-D array .* ' + 'has changed') + out1 = ndimage.affine_transform(data, [0.5], -1, order=order) + out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order) + assert_array_almost_equal(out1, out2) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform26(self, order): + # test homogeneous coordinates + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + if (order > 1): + filtered = ndimage.spline_filter(data, order=order) + else: + filtered = data + tform_original = np.eye(2) + offset_original = -np.ones((2, 1)) + tform_h1 = np.hstack((tform_original, offset_original)) + tform_h2 = np.vstack((tform_h1, [[0, 0, 1]])) + out1 = ndimage.affine_transform(filtered, tform_original, + offset_original.ravel(), + order=order, prefilter=False) + out2 = ndimage.affine_transform(filtered, tform_h1, order=order, + prefilter=False) + out3 = ndimage.affine_transform(filtered, tform_h2, order=order, + prefilter=False) + for out in [out1, out2, out3]: + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + def test_affine_transform27(self): + # test valid homogeneous transformation matrix + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + tform_h1 = np.hstack((np.eye(2), -np.ones((2, 1)))) + tform_h2 = np.vstack((tform_h1, [[5, 2, 1]])) + assert_raises(ValueError, ndimage.affine_transform, data, tform_h2) + + def test_affine_transform_1d_endianness_with_output_parameter(self): + # 1d affine transform given output ndarray or dtype with + # either endianness. see issue #7388 + data = np.ones((2, 2)) + for out in [np.empty_like(data), + np.empty_like(data).astype(data.dtype.newbyteorder()), + data.dtype, data.dtype.newbyteorder()]: + with suppress_warnings() as sup: + sup.filter(UserWarning, + 'The behavior of affine_transform with a 1-D array ' + '.* has changed') + returned = ndimage.affine_transform(data, [1, 1], output=out) + result = out if returned is None else returned + assert_array_almost_equal(result, [[1, 1], [1, 1]]) + + def test_affine_transform_multi_d_endianness_with_output_parameter(self): + # affine transform given output ndarray or dtype with either endianness + # see issue #4127 + data = np.array([1]) + for out in [data.dtype, data.dtype.newbyteorder(), + np.empty_like(data), + np.empty_like(data).astype(data.dtype.newbyteorder())]: + returned = ndimage.affine_transform(data, [[1]], output=out) + result = out if returned is None else returned + assert_array_almost_equal(result, [1]) + + def test_affine_transform_output_shape(self): + # don't require output_shape when out of a different size is given + data = np.arange(8, dtype=np.float64) + out = np.ones((16,)) + + ndimage.affine_transform(data, [[1]], output=out) + assert_array_almost_equal(out[:8], data) + + # mismatched output shape raises an error + with pytest.raises(RuntimeError): + ndimage.affine_transform( + data, [[1]], output=out, output_shape=(12,)) + + def test_affine_transform_with_string_output(self): + data = np.array([1]) + out = ndimage.affine_transform(data, [[1]], output='f') + assert_(out.dtype is np.dtype('f')) + assert_array_almost_equal(out, [1]) + + @pytest.mark.parametrize('shift', + [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)]) + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform_shift_via_grid_wrap(self, shift, order): + # For mode 'grid-wrap', integer shifts should match np.roll + x = np.array([[0, 1], + [2, 3]]) + affine = np.zeros((2, 3)) + affine[:2, :2] = np.eye(2) + affine[:, 2] = shift + assert_array_almost_equal( + ndimage.affine_transform(x, affine, mode='grid-wrap', order=order), + np.roll(x, shift, axis=(0, 1)), + ) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_affine_transform_shift_reflect(self, order): + # shift by x.shape results in reflection + x = np.array([[0, 1, 2], + [3, 4, 5]]) + affine = np.zeros((2, 3)) + affine[:2, :2] = np.eye(2) + affine[:, 2] = x.shape + assert_array_almost_equal( + ndimage.affine_transform(x, affine, mode='reflect', order=order), + x[::-1, ::-1], + ) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift01(self, order): + data = np.array([1]) + out = ndimage.shift(data, [1], order=order) + assert_array_almost_equal(out, [0]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift02(self, order): + data = np.ones([4]) + out = ndimage.shift(data, [1], order=order) + assert_array_almost_equal(out, [0, 1, 1, 1]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift03(self, order): + data = np.ones([4]) + out = ndimage.shift(data, -1, order=order) + assert_array_almost_equal(out, [1, 1, 1, 0]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift04(self, order): + data = np.array([4, 1, 3, 2]) + out = ndimage.shift(data, 1, order=order) + assert_array_almost_equal(out, [0, 4, 1, 3]) + + @pytest.mark.parametrize('order', range(0, 6)) + @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) + def test_shift05(self, order, dtype): + data = np.array([[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]], dtype=dtype) + expected = np.array([[0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1]], dtype=dtype) + if data.dtype.kind == 'c': + data -= 1j * data + expected -= 1j * expected + out = ndimage.shift(data, [0, 1], order=order) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('order', range(0, 6)) + @pytest.mark.parametrize('mode', ['constant', 'grid-constant']) + @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) + def test_shift_with_nonzero_cval(self, order, mode, dtype): + data = np.array([[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]], dtype=dtype) + + expected = np.array([[0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1]], dtype=dtype) + + if data.dtype.kind == 'c': + data -= 1j * data + expected -= 1j * expected + cval = 5.0 + expected[:, 0] = cval # specific to shift of [0, 1] used below + out = ndimage.shift(data, [0, 1], order=order, mode=mode, cval=cval) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift06(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + out = ndimage.shift(data, [0, 1], order=order) + assert_array_almost_equal(out, [[0, 4, 1, 3], + [0, 7, 6, 8], + [0, 3, 5, 3]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift07(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + out = ndimage.shift(data, [1, 0], order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [4, 1, 3, 2], + [7, 6, 8, 5]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift08(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + out = ndimage.shift(data, [1, 1], order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift09(self, order): + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + if (order > 1): + filtered = ndimage.spline_filter(data, order=order) + else: + filtered = data + out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + @pytest.mark.parametrize('shift', + [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)]) + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift_grid_wrap(self, shift, order): + # For mode 'grid-wrap', integer shifts should match np.roll + x = np.array([[0, 1], + [2, 3]]) + assert_array_almost_equal( + ndimage.shift(x, shift, mode='grid-wrap', order=order), + np.roll(x, shift, axis=(0, 1)), + ) + + @pytest.mark.parametrize('shift', + [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)]) + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift_grid_constant1(self, shift, order): + # For integer shifts, 'constant' and 'grid-constant' should be equal + x = np.arange(20).reshape((5, 4)) + assert_array_almost_equal( + ndimage.shift(x, shift, mode='grid-constant', order=order), + ndimage.shift(x, shift, mode='constant', order=order), + ) + + def test_shift_grid_constant_order1(self): + x = np.array([[1, 2, 3], + [4, 5, 6]], dtype=float) + expected_result = np.array([[0.25, 0.75, 1.25], + [1.25, 3.00, 4.00]]) + assert_array_almost_equal( + ndimage.shift(x, (0.5, 0.5), mode='grid-constant', order=1), + expected_result, + ) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_shift_reflect(self, order): + # shift by x.shape results in reflection + x = np.array([[0, 1, 2], + [3, 4, 5]]) + assert_array_almost_equal( + ndimage.shift(x, x.shape, mode='reflect', order=order), + x[::-1, ::-1], + ) + + @pytest.mark.parametrize('order', range(0, 6)) + @pytest.mark.parametrize('prefilter', [False, True]) + def test_shift_nearest_boundary(self, order, prefilter): + # verify that shifting at least order // 2 beyond the end of the array + # gives a value equal to the edge value. + x = np.arange(16) + kwargs = dict(mode='nearest', order=order, prefilter=prefilter) + assert_array_almost_equal( + ndimage.shift(x, order // 2 + 1, **kwargs)[0], x[0], + ) + assert_array_almost_equal( + ndimage.shift(x, -order // 2 - 1, **kwargs)[-1], x[-1], + ) + + @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest', + 'mirror', 'reflect']) + @pytest.mark.parametrize('order', range(6)) + def test_shift_vs_padded(self, order, mode): + x = np.arange(144, dtype=float).reshape(12, 12) + shift = (0.4, -2.3) + + # manually pad and then extract center to get expected result + npad = 32 + pad_mode = ndimage_to_numpy_mode.get(mode) + xp = np.pad(x, npad, mode=pad_mode) + center_slice = tuple([slice(npad, -npad)] * x.ndim) + expected_result = ndimage.shift( + xp, shift, mode=mode, order=order)[center_slice] + + assert_allclose( + ndimage.shift(x, shift, mode=mode, order=order), + expected_result, + rtol=1e-7, + ) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_zoom1(self, order): + for z in [2, [2, 2]]: + arr = np.array(list(range(25))).reshape((5, 5)).astype(float) + arr = ndimage.zoom(arr, z, order=order) + assert_equal(arr.shape, (10, 10)) + assert_(np.all(arr[-1, :] != 0)) + assert_(np.all(arr[-1, :] >= (20 - eps))) + assert_(np.all(arr[0, :] <= (5 + eps))) + assert_(np.all(arr >= (0 - eps))) + assert_(np.all(arr <= (24 + eps))) + + def test_zoom2(self): + arr = np.arange(12).reshape((3, 4)) + out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5) + assert_array_equal(out, arr) + + def test_zoom3(self): + arr = np.array([[1, 2]]) + out1 = ndimage.zoom(arr, (2, 1)) + out2 = ndimage.zoom(arr, (1, 2)) + + assert_array_almost_equal(out1, np.array([[1, 2], [1, 2]])) + assert_array_almost_equal(out2, np.array([[1, 1, 2, 2]])) + + @pytest.mark.parametrize('order', range(0, 6)) + @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) + def test_zoom_affine01(self, order, dtype): + data = np.asarray([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], dtype=dtype) + if data.dtype.kind == 'c': + data -= 1j * data + with suppress_warnings() as sup: + sup.filter(UserWarning, + 'The behavior of affine_transform with a 1-D array .* ' + 'has changed') + out = ndimage.affine_transform(data, [0.5, 0.5], 0, + (6, 8), order=order) + assert_array_almost_equal(out[::2, ::2], data) + + def test_zoom_infinity(self): + # Ticket #1419 regression test + dim = 8 + ndimage.zoom(np.zeros((dim, dim)), 1. / dim, mode='nearest') + + def test_zoom_zoomfactor_one(self): + # Ticket #1122 regression test + arr = np.zeros((1, 5, 5)) + zoom = (1.0, 2.0, 2.0) + + out = ndimage.zoom(arr, zoom, cval=7) + ref = np.zeros((1, 10, 10)) + assert_array_almost_equal(out, ref) + + def test_zoom_output_shape_roundoff(self): + arr = np.zeros((3, 11, 25)) + zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25) + out = ndimage.zoom(arr, zoom) + assert_array_equal(out.shape, (4, 15, 29)) + + @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)]) + @pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect', + 'mirror', 'grid-wrap', 'grid-mirror', + 'grid-constant']) + def test_zoom_by_int_order0(self, zoom, mode): + # order 0 zoom should be the same as replication via np.kron + # Note: This is not True for general x shapes when grid_mode is False, + # but works here for all modes because the size ratio happens to + # always be an integer when x.shape = (2, 2). + x = np.array([[0, 1], + [2, 3]], dtype=float) + # x = np.arange(16, dtype=float).reshape(4, 4) + assert_array_almost_equal( + ndimage.zoom(x, zoom, order=0, mode=mode), + np.kron(x, np.ones(zoom)) + ) + + @pytest.mark.parametrize('shape', [(2, 3), (4, 4)]) + @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)]) + @pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror', + 'grid-wrap', 'grid-constant']) + def test_zoom_grid_by_int_order0(self, shape, zoom, mode): + # When grid_mode is True, order 0 zoom should be the same as + # replication via np.kron. The only exceptions to this are the + # non-grid modes 'constant' and 'wrap'. + x = np.arange(np.prod(shape), dtype=float).reshape(shape) + assert_array_almost_equal( + ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True), + np.kron(x, np.ones(zoom)) + ) + + @pytest.mark.parametrize('mode', ['constant', 'wrap']) + def test_zoom_grid_mode_warnings(self, mode): + # Warn on use of non-grid modes when grid_mode is True + x = np.arange(9, dtype=float).reshape((3, 3)) + with pytest.warns(UserWarning, + match="It is recommended to use mode"): + ndimage.zoom(x, 2, mode=mode, grid_mode=True), + + @pytest.mark.parametrize('order', range(0, 6)) + def test_rotate01(self, order): + data = np.array([[0, 0, 0, 0], + [0, 1, 1, 0], + [0, 0, 0, 0]], dtype=np.float64) + out = ndimage.rotate(data, 0, order=order) + assert_array_almost_equal(out, data) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_rotate02(self, order): + data = np.array([[0, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 0]], dtype=np.float64) + expected = np.array([[0, 0, 0], + [0, 0, 0], + [0, 1, 0], + [0, 0, 0]], dtype=np.float64) + out = ndimage.rotate(data, 90, order=order) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('order', range(0, 6)) + @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) + def test_rotate03(self, order, dtype): + data = np.array([[0, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 0, 0, 0]], dtype=dtype) + expected = np.array([[0, 0, 0], + [0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0]], dtype=dtype) + if data.dtype.kind == 'c': + data -= 1j * data + expected -= 1j * expected + out = ndimage.rotate(data, 90, order=order) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_rotate04(self, order): + data = np.array([[0, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 0, 0, 0]], dtype=np.float64) + expected = np.array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 1, 0, 0]], dtype=np.float64) + out = ndimage.rotate(data, 90, reshape=False, order=order) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_rotate05(self, order): + data = np.empty((4, 3, 3)) + for i in range(3): + data[:, :, i] = np.array([[0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0]], dtype=np.float64) + expected = np.array([[0, 0, 0, 0], + [0, 1, 1, 0], + [0, 0, 0, 0]], dtype=np.float64) + out = ndimage.rotate(data, 90, order=order) + for i in range(3): + assert_array_almost_equal(out[:, :, i], expected) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_rotate06(self, order): + data = np.empty((3, 4, 3)) + for i in range(3): + data[:, :, i] = np.array([[0, 0, 0, 0], + [0, 1, 1, 0], + [0, 0, 0, 0]], dtype=np.float64) + expected = np.array([[0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0]], dtype=np.float64) + out = ndimage.rotate(data, 90, order=order) + for i in range(3): + assert_array_almost_equal(out[:, :, i], expected) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_rotate07(self, order): + data = np.array([[[0, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 0, 0, 0]]] * 2, dtype=np.float64) + data = data.transpose() + expected = np.array([[[0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 0]]] * 2, dtype=np.float64) + expected = expected.transpose([2, 1, 0]) + out = ndimage.rotate(data, 90, axes=(0, 1), order=order) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('order', range(0, 6)) + def test_rotate08(self, order): + data = np.array([[[0, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 0, 0, 0]]] * 2, dtype=np.float64) + data = data.transpose() + expected = np.array([[[0, 0, 1, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]] * 2, dtype=np.float64) + expected = expected.transpose() + out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order) + assert_array_almost_equal(out, expected) + + def test_rotate09(self): + data = np.array([[0, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 0, 0, 0]] * 2, dtype=np.float64) + with assert_raises(ValueError): + ndimage.rotate(data, 90, axes=(0, data.ndim)) + + def test_rotate10(self): + data = np.arange(45, dtype=np.float64).reshape((3, 5, 3)) + + # The output of ndimage.rotate before refactoring + expected = np.array([[[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [6.54914793, 7.54914793, 8.54914793], + [10.84520162, 11.84520162, 12.84520162], + [0.0, 0.0, 0.0]], + [[6.19286575, 7.19286575, 8.19286575], + [13.4730712, 14.4730712, 15.4730712], + [21.0, 22.0, 23.0], + [28.5269288, 29.5269288, 30.5269288], + [35.80713425, 36.80713425, 37.80713425]], + [[0.0, 0.0, 0.0], + [31.15479838, 32.15479838, 33.15479838], + [35.45085207, 36.45085207, 37.45085207], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]]]) + + out = ndimage.rotate(data, angle=12, reshape=False) + assert_array_almost_equal(out, expected) + + def test_rotate_exact_180(self): + a = np.tile(np.arange(5), (5, 1)) + b = ndimage.rotate(ndimage.rotate(a, 180), -180) + assert_equal(a, b) + + +def test_zoom_output_shape(): + """Ticket #643""" + x = np.arange(12).reshape((3, 4)) + ndimage.zoom(x, 2, output=np.zeros((6, 8))) diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py new file mode 100644 index 0000000000000000000000000000000000000000..a55b1a6014348ba022f9982900a3cf5e1bcf62af --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py @@ -0,0 +1,1419 @@ +import os.path + +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + suppress_warnings, +) +import pytest +from pytest import raises as assert_raises + +import scipy.ndimage as ndimage + + +from . import types + + +class Test_measurements_stats: + """ndimage._measurements._stats() is a utility used by other functions.""" + + def test_a(self): + x = [0, 1, 2, 6] + labels = [0, 0, 1, 1] + index = [0, 1] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums = ndimage._measurements._stats( + x, labels=labels, index=index) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + + def test_b(self): + # Same data as test_a, but different labels. The label 9 exceeds the + # length of 'labels', so this test will follow a different code path. + x = [0, 1, 2, 6] + labels = [0, 0, 9, 9] + index = [0, 9] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums = ndimage._measurements._stats( + x, labels=labels, index=index) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + + def test_a_centered(self): + x = [0, 1, 2, 6] + labels = [0, 0, 1, 1] + index = [0, 1] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums, centers = ndimage._measurements._stats( + x, labels=labels, index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + def test_b_centered(self): + x = [0, 1, 2, 6] + labels = [0, 0, 9, 9] + index = [0, 9] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums, centers = ndimage._measurements._stats( + x, labels=labels, index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + def test_nonint_labels(self): + x = [0, 1, 2, 6] + labels = [0.0, 0.0, 9.0, 9.0] + index = [0.0, 9.0] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums, centers = ndimage._measurements._stats( + x, labels=labels, index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + +class Test_measurements_select: + """ndimage._measurements._select() is a utility used by other functions.""" + + def test_basic(self): + x = [0, 1, 6, 2] + cases = [ + ([0, 0, 1, 1], [0, 1]), # "Small" integer labels + ([0, 0, 9, 9], [0, 9]), # A label larger than len(labels) + ([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]), # Non-integer labels + ] + for labels, index in cases: + result = ndimage._measurements._select( + x, labels=labels, index=index) + assert_(len(result) == 0) + result = ndimage._measurements._select( + x, labels=labels, index=index, find_max=True) + assert_(len(result) == 1) + assert_array_equal(result[0], [1, 6]) + result = ndimage._measurements._select( + x, labels=labels, index=index, find_min=True) + assert_(len(result) == 1) + assert_array_equal(result[0], [0, 2]) + result = ndimage._measurements._select( + x, labels=labels, index=index, find_min=True, + find_min_positions=True) + assert_(len(result) == 2) + assert_array_equal(result[0], [0, 2]) + assert_array_equal(result[1], [0, 3]) + assert_equal(result[1].dtype.kind, 'i') + result = ndimage._measurements._select( + x, labels=labels, index=index, find_max=True, + find_max_positions=True) + assert_(len(result) == 2) + assert_array_equal(result[0], [1, 6]) + assert_array_equal(result[1], [1, 2]) + assert_equal(result[1].dtype.kind, 'i') + + +def test_label01(): + data = np.ones([]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, 1) + assert_equal(n, 1) + + +def test_label02(): + data = np.zeros([]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, 0) + assert_equal(n, 0) + + +def test_label03(): + data = np.ones([1]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [1]) + assert_equal(n, 1) + + +def test_label04(): + data = np.zeros([1]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [0]) + assert_equal(n, 0) + + +def test_label05(): + data = np.ones([5]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [1, 1, 1, 1, 1]) + assert_equal(n, 1) + + +def test_label06(): + data = np.array([1, 0, 1, 1, 0, 1]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3]) + assert_equal(n, 3) + + +def test_label07(): + data = np.array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + assert_equal(n, 0) + + +def test_label08(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + assert_equal(n, 4) + + +def test_label09(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]]) + struct = ndimage.generate_binary_structure(2, 2) + out, n = ndimage.label(data, struct) + assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [2, 2, 0, 0, 0, 0], + [2, 2, 0, 0, 0, 0], + [0, 0, 0, 3, 3, 0]]) + assert_equal(n, 3) + + +def test_label10(): + data = np.array([[0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + struct = ndimage.generate_binary_structure(2, 2) + out, n = ndimage.label(data, struct) + assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + assert_equal(n, 1) + + +def test_label11(): + for type in types: + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]], type) + out, n = ndimage.label(data) + expected = [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]] + assert_array_almost_equal(out, expected) + assert_equal(n, 4) + + +def test_label11_inplace(): + for type in types: + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]], type) + n = ndimage.label(data, output=data) + expected = [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]] + assert_array_almost_equal(data, expected) + assert_equal(n, 4) + + +def test_label12(): + for type in types: + data = np.array([[0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 0, 1, 1, 0]], type) + out, n = ndimage.label(data) + expected = [[0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 0, 1, 1, 0]] + assert_array_almost_equal(out, expected) + assert_equal(n, 1) + + +def test_label13(): + for type in types: + data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], + [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], + type) + out, n = ndimage.label(data) + expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], + [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] + assert_array_almost_equal(out, expected) + assert_equal(n, 1) + + +def test_label_output_typed(): + data = np.ones([5]) + for t in types: + output = np.zeros([5], dtype=t) + n = ndimage.label(data, output=output) + assert_array_almost_equal(output, 1) + assert_equal(n, 1) + + +def test_label_output_dtype(): + data = np.ones([5]) + for t in types: + output, n = ndimage.label(data, output=t) + assert_array_almost_equal(output, 1) + assert output.dtype == t + + +def test_label_output_wrong_size(): + data = np.ones([5]) + for t in types: + output = np.zeros([10], t) + assert_raises((RuntimeError, ValueError), + ndimage.label, data, output=output) + + +def test_label_structuring_elements(): + data = np.loadtxt(os.path.join(os.path.dirname( + __file__), "data", "label_inputs.txt")) + strels = np.loadtxt(os.path.join( + os.path.dirname(__file__), "data", "label_strels.txt")) + results = np.loadtxt(os.path.join( + os.path.dirname(__file__), "data", "label_results.txt")) + data = data.reshape((-1, 7, 7)) + strels = strels.reshape((-1, 3, 3)) + results = results.reshape((-1, 7, 7)) + r = 0 + for i in range(data.shape[0]): + d = data[i, :, :] + for j in range(strels.shape[0]): + s = strels[j, :, :] + assert_equal(ndimage.label(d, s)[0], results[r, :, :]) + r += 1 + + +def test_ticket_742(): + def SE(img, thresh=.7, size=4): + mask = img > thresh + rank = len(mask.shape) + la, co = ndimage.label(mask, + ndimage.generate_binary_structure(rank, rank)) + _ = ndimage.find_objects(la) + + if np.dtype(np.intp) != np.dtype('i'): + shape = (3, 1240, 1240) + a = np.random.rand(np.prod(shape)).reshape(shape) + # shouldn't crash + SE(a) + + +def test_gh_issue_3025(): + """Github issue #3025 - improper merging of labels""" + d = np.zeros((60, 320)) + d[:, :257] = 1 + d[:, 260:] = 1 + d[36, 257] = 1 + d[35, 258] = 1 + d[35, 259] = 1 + assert ndimage.label(d, np.ones((3, 3)))[1] == 1 + + +def test_label_default_dtype(): + test_array = np.random.rand(10, 10) + label, no_features = ndimage.label(test_array > 0.5) + assert_(label.dtype in (np.int32, np.int64)) + # Shouldn't raise an exception + ndimage.find_objects(label) + + +def test_find_objects01(): + data = np.ones([], dtype=int) + out = ndimage.find_objects(data) + assert_(out == [()]) + + +def test_find_objects02(): + data = np.zeros([], dtype=int) + out = ndimage.find_objects(data) + assert_(out == []) + + +def test_find_objects03(): + data = np.ones([1], dtype=int) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None),)]) + + +def test_find_objects04(): + data = np.zeros([1], dtype=int) + out = ndimage.find_objects(data) + assert_equal(out, []) + + +def test_find_objects05(): + data = np.ones([5], dtype=int) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 5, None),)]) + + +def test_find_objects06(): + data = np.array([1, 0, 2, 2, 0, 3]) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None),), + (slice(2, 4, None),), + (slice(5, 6, None),)]) + + +def test_find_objects07(): + data = np.array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + out = ndimage.find_objects(data) + assert_equal(out, []) + + +def test_find_objects08(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), + (slice(1, 3, None), slice(2, 5, None)), + (slice(3, 5, None), slice(0, 2, None)), + (slice(5, 6, None), slice(3, 5, None))]) + + +def test_find_objects09(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), + (slice(1, 3, None), slice(2, 5, None)), + None, + (slice(5, 6, None), slice(3, 5, None))]) + + +def test_value_indices01(): + "Test dictionary keys and entries" + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + vi = ndimage.value_indices(data, ignore_value=0) + true_keys = [1, 2, 4] + assert_equal(list(vi.keys()), true_keys) + + truevi = {} + for k in true_keys: + truevi[k] = np.where(data == k) + + vi = ndimage.value_indices(data, ignore_value=0) + assert_equal(vi, truevi) + + +def test_value_indices02(): + "Test input checking" + data = np.zeros((5, 4), dtype=np.float32) + msg = "Parameter 'arr' must be an integer array" + with assert_raises(ValueError, match=msg): + ndimage.value_indices(data) + + +def test_value_indices03(): + "Test different input array shapes, from 1-D to 4-D" + for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]: + a = np.array((12*[1]+12*[2]+12*[3]), dtype=np.int32).reshape(shape) + trueKeys = np.unique(a) + vi = ndimage.value_indices(a) + assert_equal(list(vi.keys()), list(trueKeys)) + for k in trueKeys: + trueNdx = np.where(a == k) + assert_equal(vi[k], trueNdx) + + +def test_sum01(): + for type in types: + input = np.array([], type) + output = ndimage.sum(input) + assert_equal(output, 0.0) + + +def test_sum02(): + for type in types: + input = np.zeros([0, 4], type) + output = ndimage.sum(input) + assert_equal(output, 0.0) + + +def test_sum03(): + for type in types: + input = np.ones([], type) + output = ndimage.sum(input) + assert_almost_equal(output, 1.0) + + +def test_sum04(): + for type in types: + input = np.array([1, 2], type) + output = ndimage.sum(input) + assert_almost_equal(output, 3.0) + + +def test_sum05(): + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input) + assert_almost_equal(output, 10.0) + + +def test_sum06(): + labels = np.array([], bool) + for type in types: + input = np.array([], type) + output = ndimage.sum(input, labels=labels) + assert_equal(output, 0.0) + + +def test_sum07(): + labels = np.ones([0, 4], bool) + for type in types: + input = np.zeros([0, 4], type) + output = ndimage.sum(input, labels=labels) + assert_equal(output, 0.0) + + +def test_sum08(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([1, 2], type) + output = ndimage.sum(input, labels=labels) + assert_equal(output, 1.0) + + +def test_sum09(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input, labels=labels) + assert_almost_equal(output, 4.0) + + +def test_sum10(): + labels = np.array([1, 0], bool) + input = np.array([[1, 2], [3, 4]], bool) + output = ndimage.sum(input, labels=labels) + assert_almost_equal(output, 2.0) + + +def test_sum11(): + labels = np.array([1, 2], np.int8) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input, labels=labels, + index=2) + assert_almost_equal(output, 6.0) + + +def test_sum12(): + labels = np.array([[1, 2], [2, 4]], np.int8) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input, labels=labels, index=[4, 8, 2]) + assert_array_almost_equal(output, [4.0, 0.0, 5.0]) + + +def test_sum_labels(): + labels = np.array([[1, 2], [2, 4]], np.int8) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2]) + output_labels = ndimage.sum_labels( + input, labels=labels, index=[4, 8, 2]) + + assert (output_sum == output_labels).all() + assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0]) + + +def test_mean01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.mean(input, labels=labels) + assert_almost_equal(output, 2.0) + + +def test_mean02(): + labels = np.array([1, 0], bool) + input = np.array([[1, 2], [3, 4]], bool) + output = ndimage.mean(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_mean03(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.mean(input, labels=labels, + index=2) + assert_almost_equal(output, 3.0) + + +def test_mean04(): + labels = np.array([[1, 2], [2, 4]], np.int8) + with np.errstate(all='ignore'): + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.mean(input, labels=labels, + index=[4, 8, 2]) + assert_array_almost_equal(output[[0, 2]], [4.0, 2.5]) + assert_(np.isnan(output[1])) + + +def test_minimum01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_minimum02(): + labels = np.array([1, 0], bool) + input = np.array([[2, 2], [2, 4]], bool) + output = ndimage.minimum(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_minimum03(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum(input, labels=labels, + index=2) + assert_almost_equal(output, 2.0) + + +def test_minimum04(): + labels = np.array([[1, 2], [2, 3]]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum(input, labels=labels, + index=[2, 3, 8]) + assert_array_almost_equal(output, [2.0, 4.0, 0.0]) + + +def test_maximum01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum(input, labels=labels) + assert_almost_equal(output, 3.0) + + +def test_maximum02(): + labels = np.array([1, 0], bool) + input = np.array([[2, 2], [2, 4]], bool) + output = ndimage.maximum(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_maximum03(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum(input, labels=labels, + index=2) + assert_almost_equal(output, 4.0) + + +def test_maximum04(): + labels = np.array([[1, 2], [2, 3]]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum(input, labels=labels, + index=[2, 3, 8]) + assert_array_almost_equal(output, [3.0, 4.0, 0.0]) + + +def test_maximum05(): + # Regression test for ticket #501 (Trac) + x = np.array([-3, -2, -1]) + assert_equal(ndimage.maximum(x), -1) + + +def test_median01(): + a = np.array([[1, 2, 0, 1], + [5, 3, 0, 4], + [0, 0, 0, 7], + [9, 3, 0, 0]]) + labels = np.array([[1, 1, 0, 2], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + output = ndimage.median(a, labels=labels, index=[1, 2, 3]) + assert_array_almost_equal(output, [2.5, 4.0, 6.0]) + + +def test_median02(): + a = np.array([[1, 2, 0, 1], + [5, 3, 0, 4], + [0, 0, 0, 7], + [9, 3, 0, 0]]) + output = ndimage.median(a) + assert_almost_equal(output, 1.0) + + +def test_median03(): + a = np.array([[1, 2, 0, 1], + [5, 3, 0, 4], + [0, 0, 0, 7], + [9, 3, 0, 0]]) + labels = np.array([[1, 1, 0, 2], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + output = ndimage.median(a, labels=labels) + assert_almost_equal(output, 3.0) + + +def test_median_gh12836_bool(): + # test boolean addition fix on example from gh-12836 + a = np.asarray([1, 1], dtype=bool) + output = ndimage.median(a, labels=np.ones((2,)), index=[1]) + assert_array_almost_equal(output, [1.0]) + + +def test_median_no_int_overflow(): + # test integer overflow fix on example from gh-12836 + a = np.asarray([65, 70], dtype=np.int8) + output = ndimage.median(a, labels=np.ones((2,)), index=[1]) + assert_array_almost_equal(output, [67.5]) + + +def test_variance01(): + with np.errstate(all='ignore'): + for type in types: + input = np.array([], type) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + output = ndimage.variance(input) + assert_(np.isnan(output)) + + +def test_variance02(): + for type in types: + input = np.array([1], type) + output = ndimage.variance(input) + assert_almost_equal(output, 0.0) + + +def test_variance03(): + for type in types: + input = np.array([1, 3], type) + output = ndimage.variance(input) + assert_almost_equal(output, 1.0) + + +def test_variance04(): + input = np.array([1, 0], bool) + output = ndimage.variance(input) + assert_almost_equal(output, 0.25) + + +def test_variance05(): + labels = [2, 2, 3] + for type in types: + input = np.array([1, 3, 8], type) + output = ndimage.variance(input, labels, 2) + assert_almost_equal(output, 1.0) + + +def test_variance06(): + labels = [2, 2, 3, 3, 4] + with np.errstate(all='ignore'): + for type in types: + input = np.array([1, 3, 8, 10, 8], type) + output = ndimage.variance(input, labels, [2, 3, 4]) + assert_array_almost_equal(output, [1.0, 1.0, 0.0]) + + +def test_standard_deviation01(): + with np.errstate(all='ignore'): + for type in types: + input = np.array([], type) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + output = ndimage.standard_deviation(input) + assert_(np.isnan(output)) + + +def test_standard_deviation02(): + for type in types: + input = np.array([1], type) + output = ndimage.standard_deviation(input) + assert_almost_equal(output, 0.0) + + +def test_standard_deviation03(): + for type in types: + input = np.array([1, 3], type) + output = ndimage.standard_deviation(input) + assert_almost_equal(output, np.sqrt(1.0)) + + +def test_standard_deviation04(): + input = np.array([1, 0], bool) + output = ndimage.standard_deviation(input) + assert_almost_equal(output, 0.5) + + +def test_standard_deviation05(): + labels = [2, 2, 3] + for type in types: + input = np.array([1, 3, 8], type) + output = ndimage.standard_deviation(input, labels, 2) + assert_almost_equal(output, 1.0) + + +def test_standard_deviation06(): + labels = [2, 2, 3, 3, 4] + with np.errstate(all='ignore'): + for type in types: + input = np.array([1, 3, 8, 10, 8], type) + output = ndimage.standard_deviation(input, labels, [2, 3, 4]) + assert_array_almost_equal(output, [1.0, 1.0, 0.0]) + + +def test_standard_deviation07(): + labels = [1] + with np.errstate(all='ignore'): + for type in types: + input = np.array([-0.00619519], type) + output = ndimage.standard_deviation(input, labels, [1]) + assert_array_almost_equal(output, [0]) + + +def test_minimum_position01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum_position(input, labels=labels) + assert_equal(output, (0, 0)) + + +def test_minimum_position02(): + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], type) + output = ndimage.minimum_position(input) + assert_equal(output, (1, 2)) + + +def test_minimum_position03(): + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], bool) + output = ndimage.minimum_position(input) + assert_equal(output, (1, 2)) + + +def test_minimum_position04(): + input = np.array([[5, 4, 2, 5], + [3, 7, 1, 2], + [1, 5, 1, 1]], bool) + output = ndimage.minimum_position(input) + assert_equal(output, (0, 0)) + + +def test_minimum_position05(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 2, 3]], type) + output = ndimage.minimum_position(input, labels) + assert_equal(output, (2, 0)) + + +def test_minimum_position06(): + labels = [1, 2, 3, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], type) + output = ndimage.minimum_position(input, labels, 2) + assert_equal(output, (0, 1)) + + +def test_minimum_position07(): + labels = [1, 2, 3, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], type) + output = ndimage.minimum_position(input, labels, + [2, 3]) + assert_equal(output[0], (0, 1)) + assert_equal(output[1], (1, 2)) + + +def test_maximum_position01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum_position(input, + labels=labels) + assert_equal(output, (1, 0)) + + +def test_maximum_position02(): + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input) + assert_equal(output, (1, 2)) + + +def test_maximum_position03(): + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], bool) + output = ndimage.maximum_position(input) + assert_equal(output, (0, 0)) + + +def test_maximum_position04(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels) + assert_equal(output, (1, 1)) + + +def test_maximum_position05(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels, 1) + assert_equal(output, (0, 0)) + + +def test_maximum_position06(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels, + [1, 2]) + assert_equal(output[0], (0, 0)) + assert_equal(output[1], (1, 1)) + + +def test_maximum_position07(): + # Test float labels + labels = np.array([1.0, 2.5, 0.0, 4.5]) + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels, + [1.0, 4.5]) + assert_equal(output[0], (0, 0)) + assert_equal(output[1], (0, 3)) + + +def test_extrema01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output1 = ndimage.extrema(input, labels=labels) + output2 = ndimage.minimum(input, labels=labels) + output3 = ndimage.maximum(input, labels=labels) + output4 = ndimage.minimum_position(input, + labels=labels) + output5 = ndimage.maximum_position(input, + labels=labels) + assert_equal(output1, (output2, output3, output4, output5)) + + +def test_extrema02(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output1 = ndimage.extrema(input, labels=labels, + index=2) + output2 = ndimage.minimum(input, labels=labels, + index=2) + output3 = ndimage.maximum(input, labels=labels, + index=2) + output4 = ndimage.minimum_position(input, + labels=labels, index=2) + output5 = ndimage.maximum_position(input, + labels=labels, index=2) + assert_equal(output1, (output2, output3, output4, output5)) + + +def test_extrema03(): + labels = np.array([[1, 2], [2, 3]]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output1 = ndimage.extrema(input, labels=labels, + index=[2, 3, 8]) + output2 = ndimage.minimum(input, labels=labels, + index=[2, 3, 8]) + output3 = ndimage.maximum(input, labels=labels, + index=[2, 3, 8]) + output4 = ndimage.minimum_position(input, + labels=labels, index=[2, 3, 8]) + output5 = ndimage.maximum_position(input, + labels=labels, index=[2, 3, 8]) + assert_array_almost_equal(output1[0], output2) + assert_array_almost_equal(output1[1], output3) + assert_array_almost_equal(output1[2], output4) + assert_array_almost_equal(output1[3], output5) + + +def test_extrema04(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output1 = ndimage.extrema(input, labels, [1, 2]) + output2 = ndimage.minimum(input, labels, [1, 2]) + output3 = ndimage.maximum(input, labels, [1, 2]) + output4 = ndimage.minimum_position(input, labels, + [1, 2]) + output5 = ndimage.maximum_position(input, labels, + [1, 2]) + assert_array_almost_equal(output1[0], output2) + assert_array_almost_equal(output1[1], output3) + assert_array_almost_equal(output1[2], output4) + assert_array_almost_equal(output1[3], output5) + + +def test_center_of_mass01(): + expected = [0.0, 0.0] + for type in types: + input = np.array([[1, 0], [0, 0]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass02(): + expected = [1, 0] + for type in types: + input = np.array([[0, 0], [1, 0]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass03(): + expected = [0, 1] + for type in types: + input = np.array([[0, 1], [0, 0]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass04(): + expected = [1, 1] + for type in types: + input = np.array([[0, 0], [0, 1]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass05(): + expected = [0.5, 0.5] + for type in types: + input = np.array([[1, 1], [1, 1]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass06(): + expected = [0.5, 0.5] + input = np.array([[1, 2], [3, 1]], bool) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass07(): + labels = [1, 0] + expected = [0.5, 0.0] + input = np.array([[1, 2], [3, 1]], bool) + output = ndimage.center_of_mass(input, labels) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass08(): + labels = [1, 2] + expected = [0.5, 1.0] + input = np.array([[5, 2], [3, 1]], bool) + output = ndimage.center_of_mass(input, labels, 2) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass09(): + labels = [1, 2] + expected = [(0.5, 0.0), (0.5, 1.0)] + input = np.array([[1, 2], [1, 1]], bool) + output = ndimage.center_of_mass(input, labels, [1, 2]) + assert_array_almost_equal(output, expected) + + +def test_histogram01(): + expected = np.ones(10) + input = np.arange(10) + output = ndimage.histogram(input, 0, 10, 10) + assert_array_almost_equal(output, expected) + + +def test_histogram02(): + labels = [1, 1, 1, 1, 2, 2, 2, 2] + expected = [0, 2, 0, 1, 1] + input = np.array([1, 1, 3, 4, 3, 3, 3, 3]) + output = ndimage.histogram(input, 0, 4, 5, labels, 1) + assert_array_almost_equal(output, expected) + + +def test_histogram03(): + labels = [1, 0, 1, 1, 2, 2, 2, 2] + expected1 = [0, 1, 0, 1, 1] + expected2 = [0, 0, 0, 3, 0] + input = np.array([1, 1, 3, 4, 3, 5, 3, 3]) + output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2)) + + assert_array_almost_equal(output[0], expected1) + assert_array_almost_equal(output[1], expected2) + + +def test_stat_funcs_2d(): + a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]]) + lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]]) + + mean = ndimage.mean(a, labels=lbl, index=[1, 2]) + assert_array_equal(mean, [7.0, 4.0]) + + var = ndimage.variance(a, labels=lbl, index=[1, 2]) + assert_array_equal(var, [2.5, 1.0]) + + std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2]) + assert_array_almost_equal(std, np.sqrt([2.5, 1.0])) + + med = ndimage.median(a, labels=lbl, index=[1, 2]) + assert_array_equal(med, [7.0, 4.0]) + + min = ndimage.minimum(a, labels=lbl, index=[1, 2]) + assert_array_equal(min, [5, 3]) + + max = ndimage.maximum(a, labels=lbl, index=[1, 2]) + assert_array_equal(max, [9, 5]) + + +class TestWatershedIft: + + def test_watershed_ift01(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift02(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, -1, 1, 1, 1, -1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, 1, 1, 1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift03(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 2, 0, 3, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1]], np.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, -1, 2, -1, 3, -1, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, -1, 2, -1, 3, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift04(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 2, 0, 3, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1]], + np.int8) + out = ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift05(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 3, 0, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1]], + np.int8) + out = ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift06(self): + data = np.array([[0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift07(self): + shape = (7, 6) + data = np.zeros(shape, dtype=np.uint8) + data = data.transpose() + data[...] = np.array([[0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.int8) + out = np.zeros(shape, dtype=np.int16) + out = out.transpose() + ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]], + output=out) + expected = [[-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift08(self): + # Test cost larger than uint8. See gh-10069. + data = np.array([[256, 0], + [0, 0]], np.uint16) + markers = np.array([[1, 0], + [0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[1, 1], + [1, 1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift09(self): + # Test large cost. See gh-19575 + data = np.array([[np.iinfo(np.uint16).max, 0], + [0, 0]], np.uint16) + markers = np.array([[1, 0], + [0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[1, 1], + [1, 1]] + assert_allclose(out, expected) + + +@pytest.mark.parametrize("dt", [np.intc, np.uintc]) +def test_gh_19423(dt): + rng = np.random.default_rng(123) + max_val = 8 + image = rng.integers(low=0, high=max_val, size=(10, 12)).astype(dtype=dt) + val_idx = ndimage.value_indices(image) + assert len(val_idx.keys()) == max_val diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1ffcdf3f265ff0cc09fbcacbd6a547691cc332c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8ff704c90030228786e54282b7a385179bc5536 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h new file mode 100644 index 0000000000000000000000000000000000000000..525790b8c86b4d10f03b6f84a73a31c6a253bef1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Used to request other workers to clean up their autograd context. +class TORCH_API CleanupAutogradContextReq : public rpc::RpcCommandBase { + public: + explicit CleanupAutogradContextReq(int64_t context_id); + // Serialization and deserialization methods. + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); + + // Retrieve the context id we are cleaning up with this message. + int64_t getContextId(); + + private: + int64_t context_id_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h new file mode 100644 index 0000000000000000000000000000000000000000..6d0b6111cc88cd5a1df33d334851f8d17e166941 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h @@ -0,0 +1,98 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Represents an RPC that includes autograd information. This class basically +// wraps another `RpcCommandBase` object which represents the actual RPC and has +// additional autograd information associated with that RPC. +class TORCH_API RpcWithAutograd final : public rpc::RpcCommandBase { + public: + // Used when we are sending an RPC over the wire. + RpcWithAutograd( + rpc::worker_id_t fromWorkerId, + rpc::MessageType messageType, + const AutogradMetadata& autogradMetadata, + c10::intrusive_ptr wrappedMessage, + rpc::DeviceMap deviceMap = {}); + + // Used when receiving an RPC over the wire. + RpcWithAutograd( + rpc::worker_id_t fromWorkerId, + rpc::MessageType messageType, + const AutogradMetadata& autogradMetadata, + std::unique_ptr wrappedRpc, + rpc::MessageType wrappedMessageType, + std::vector tensors, + rpc::DeviceMap deviceMap = {}); + + c10::intrusive_ptr toMessageImpl() && override; + + static std::unique_ptr fromMessage( + const rpc::Message& message); + + // Retrieves tensors as part of this RPC, which need to be considered for + // autograd computations. + std::vector& tensors(); + + const AutogradMetadata& autogradMetadata() const; + + RpcCommandBase& wrappedRpc(); + + void setWrappedRpc(std::unique_ptr wrappedRpc); + + std::unique_ptr moveWrappedRpc() &&; + + // Message type of the wrapped RPC. + rpc::MessageType wrappedMessageType() const; + + // Retrieve the worker id from which the RPC originated. + rpc::worker_id_t fromWorkerId() const; + + // Retrieve the device map. + const rpc::DeviceMap& deviceMap(); + + private: + // WorkerId from which this RPC originated. This is necessary for knowing + // which worker we need to contact during the backward pass. + rpc::worker_id_t fromWorkerId_; + + // Message type for this call. + rpc::MessageType messageType_; + + AutogradMetadata autogradMetadata_; + + // Since wrappedMessage_ is destructively constructed from wrappedRpc_, + // they are valid exclusively. They are used for different purpose. + // wrappedRpc_ is used while constructing receive rpcWithAutograd; + // wrappedMessage_ is used while constructing send rpcWithAutograd; + + // When receive rpcWithAutograd is constructed fromMessage, it is valid; + // When send rpcWithAutograd is constructed before toMessage, it is nullptr; + std::unique_ptr wrappedRpc_; + + // Serialized message representing wrappedRpc_. Used mostly as a cache to + // avoid serializing the request twice. + // When receive rpcWithAutograd is constructed fromMessage, it is nullptr; + // When send rpcWithAutograd is constructed before toMessage, it is valid; + c10::intrusive_ptr wrappedMessage_; + + // message type of the wrappedMessage, this is stored separately since + // wrappedMessage_ is not always guaranteed to be populated. + rpc::MessageType wrappedMessageType_; + + // Tensors part of the wrappedRpc that need to be considered for autograd. + std::vector tensors_; + + // Device mapping for tensors that are sent across an RPC to another node. + rpc::DeviceMap deviceMap_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h new file mode 100644 index 0000000000000000000000000000000000000000..6dc4413cfa50980af4df98bd88c9fd57e86a2a75 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h @@ -0,0 +1,39 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Internal system RPC to invoke distributed backward pass on remote nodes when +// 'rref.backward()' is invoked. +class TORCH_API RRefBackwardReq : public rpc::RpcCommandBase { + public: + RRefBackwardReq( + const rpc::RRefId& rrefId, + int64_t autogradContextId, + bool retainGraph = false); + + const rpc::RRefId& getRRefId() const; + + int64_t getAutogradContextId() const; + + bool retainGraph() const; + + // Serialization and deserialization methods. + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); + + private: + const rpc::RRefId rrefId_; + const int64_t autogradContextId_; + const bool retainGraph_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp new file mode 100644 index 0000000000000000000000000000000000000000..df000667d26385c128a8e1cf3d625027196570c2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp @@ -0,0 +1,383 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +constexpr auto kBackendDefaultTimeout = + std::chrono::milliseconds(30 * 60 * 1000); + +namespace c10d { + +class TORCH_API Backend : public torch::CustomClassHolder { + public: + // Backend Options is a base struct that defines the basic options + // when constructing a Backend. Each Backend subclass should + // extend this struct and define its options if it wants to provide more + // config options (beyond basic ones defined here) to end user. + struct TORCH_API Options : torch::CustomClassHolder { + explicit Options( + std::string backend, + std::chrono::milliseconds timeout = kBackendDefaultTimeout) + : timeout(timeout), backend(std::move(backend)) {} + ~Options() override = default; + + std::chrono::milliseconds timeout; + + // backend name + const std::string backend; + }; + + explicit Backend(int rank, int size); + ~Backend() override = 0; + + int getRank() const { + return rank_; + } + + int getSize() const { + return size_; + } + + // Returns an unique opaque ID of this backend that can be used to correlate + // with its collectives. + int64_t getID() const { + return reinterpret_cast(this); + } + + virtual void startCoalescing() { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not implement startCoalescing")); + } + + virtual c10::intrusive_ptr endCoalescing() { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not implement endCoalescing")); + } + + // Subclasses must override this method to return the backend name + virtual const std::string getBackendName() const { + TORCH_INTERNAL_ASSERT(false, "getBackendName is not implemented."); + }; + + virtual c10::intrusive_ptr broadcast( + std::vector& /* tensors */, + const BroadcastOptions& /* opts */ = BroadcastOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support broadcast")); + } + + virtual c10::intrusive_ptr allreduce( + std::vector& /* tensors */, + const AllreduceOptions& /* opts */ = AllreduceOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support allreduce")); + } + + virtual c10::intrusive_ptr allreduce_sparse( + std::vector& /* tensors */, + const AllreduceOptions& /* opts */ = AllreduceOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support allreduce sparse")); + } + + virtual c10::intrusive_ptr allreduce_coalesced( + std::vector& /* tensors */, + const AllreduceCoalescedOptions& /* opts */ = + AllreduceCoalescedOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support allreduce_coalesced")); + } + + virtual c10::intrusive_ptr reduce( + std::vector& /* tensors */, + const ReduceOptions& /* opts */ = ReduceOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support reduce")); + } + + virtual c10::intrusive_ptr allgather( + std::vector>& /* outputTensors */, + std::vector& /* inputTensors */, + const AllgatherOptions& /* opts */ = AllgatherOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support allgather")); + } + + // Gathers a single tensor inputBuffer into a single buffer outputBuffer that + // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE. + // For implementers of ProcessGroup API and advanced users only. + // Note: this function will be deprecated in near future. + virtual c10::intrusive_ptr _allgather_base( + at::Tensor& /* outputBuffer */, + at::Tensor& /* inputBuffer */, + const AllgatherOptions& /* opts */ = AllgatherOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not support _allgather_base")); + } + + // This function is deprecated and will be moved out of Backend to comms: + // * do not add dependencies on this function, + // * do not implement it in your Backend, implement _allgather_base + // instead. + virtual c10::intrusive_ptr allgather_coalesced( + std::vector>& /* outputTensorLists */, + std::vector& /* inputTensors */, + const AllgatherOptions& /* opts */ = AllgatherOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support allgather_coalesced")); + } + + // This function is a coalesced version of `allgather_into_tensor` (currently + // still named as `_allgather_base`). Each tensor in the vector corresponds to + // an input/output of one `allgather_into_tensor` operation. + virtual c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& /* outputs */, + std::vector& /* inputs */, + const AllgatherOptions& /* opts */ = AllgatherOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support allgather_into_tensor_coalesced")); + } + + virtual c10::intrusive_ptr gather( + std::vector>& /* outputTensors */, + std::vector& /* inputTensors */, + const GatherOptions& /* opts */ = GatherOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support gather")); + } + + virtual c10::intrusive_ptr scatter( + std::vector& /* outputTensors */, + std::vector>& /* inputTensors */, + const ScatterOptions& /* opts */ = ScatterOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support scatter")); + } + + virtual c10::intrusive_ptr reduce_scatter( + std::vector& /* outputTensors */, + std::vector>& /* inputTensors */, + const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not support reduce_scatter")); + } + + virtual c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& /* outputBuffer */, + at::Tensor& /* inputBuffer */, + const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support _reduce_scatter_base")); + } + + // This function is a coalesced version of `reduce_scatter_tensor` (currently + // still named as `_reduce_scatter_base`). Each tensor in the vector + // corresponds to an input/output of one `reduce_scatter_tensor` operation. + virtual c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& /* outputs */, + std::vector& /* inputs */, + const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support reduce_scatter_tensor_coalesced")); + } + + virtual c10::intrusive_ptr alltoall_base( + at::Tensor& /* outputBuffer */, + at::Tensor& /* inputBuffer */, + std::vector& /* outputSplitSizes */, + std::vector& /* inputSplitSizes */, + const AllToAllOptions& /* opts */ = AllToAllOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not support alltoall_base")); + } + + virtual c10::intrusive_ptr alltoall( + std::vector& /* outputTensors */, + std::vector& /* inputTensors */, + const AllToAllOptions& opts = AllToAllOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support alltoall")); + } + + virtual void monitoredBarrier( + const BarrierOptions& /* unused */, + bool /* unused */ = false) { + auto backendName = getBackendName(); + TORCH_CHECK( + false, + c10::str( + "Backend ", + backendName, + " does not support monitoredBarrier, only GLOO supports monitored barrier.")); + } + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. Only implemented + // for GLOO and NCCL backends currently. + virtual void setSequenceNumberForGroup() { + auto backendName = getBackendName(); + TORCH_CHECK( + false, + c10::str( + "Backend ", + backendName, + " does not yet support sequence numbers.")); + } + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + virtual uint64_t getSequenceNumberForGroup() { + auto backendName = getBackendName(); + TORCH_CHECK( + false, + c10::str( + "Backend ", + backendName, + " does not yet support sequence numbers.")); + } + + virtual c10::intrusive_ptr send( + std::vector& /* tensors */, + int /* dstRank */, + int /* tag */) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support send")); + } + + virtual c10::intrusive_ptr recv( + std::vector& /* tensors */, + int /* srcRank */, + int /* tag */) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support recv")); + } + + virtual c10::intrusive_ptr recvAnysource( + std::vector& /* tensors */, + int /* tag */) { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not support recvAnysource")); + } + + virtual c10::intrusive_ptr barrier( + const BarrierOptions& /* opts */ = BarrierOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support barrier")); + } + + virtual void registerOnCompletionHook( + std::function)>&& hook) { + TORCH_CHECK( + false, + "Only ProcessGrouppNCCL supports onCompletion hook, but got ", + getBackendName(), + " backend."); + } + + virtual void waitForPendingWorks() { + TORCH_CHECK( + false, + "Only ProcessGrouppNCCL supports waitForPendingWorks, but got ", + getBackendName(), + " backend."); + } + + virtual void enableCollectivesTiming() { + TORCH_CHECK( + false, + "Backend ", + getBackendName(), + " is missing implementation of enableCollectivesTiming."); + } + + bool hasHooks() const { + return onCompletionHook_ != nullptr; + } + + // Do not call this directly, use ProcessGroup::setGroupName instead. + void setGroupName(const std::string& name) { + pg_name_ = name; + } + + const std::string& getGroupName() const { + return pg_name_; + } + + protected: + // Implementations of this interface need to call this to setup + // appropriate logging etc. + void init(); + + const int rank_; + const int size_; + // Debug level setting. It is parsed once when ProcessGroup is constructed and + // remains the same across use of this process group. + DebugLevel dist_debug_level_; + std::string pg_name_; + + std::function)> onCompletionHook_; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp new file mode 100644 index 0000000000000000000000000000000000000000..03cb8c42c193b177872b3983cde8124e85f6ee2e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp @@ -0,0 +1,12 @@ +#include + +namespace c10d_functional { + +void register_process_group( + const std::string& tag, + c10::intrusive_ptr pg); + +c10::intrusive_ptr resolve_process_group( + const std::string& tag); + +} // namespace c10d_functional diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3fa0ca69892a6d12686415b0087d6002b764f8a9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace c10d { + +C10_EXPORT void register_process_group( + const std::string& group_name, + c10::intrusive_ptr group); + +C10_EXPORT c10::intrusive_ptr resolve_process_group( + const std::string& group_name); + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b691de302a389ece3dda5a539796c5d080f6073f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp @@ -0,0 +1,61 @@ +#pragma once + +#include + +#include +#include +#include + +#include + +namespace c10d { + +class TORCH_API HashStore : public Store { + public: + ~HashStore() override = default; + + void set(const std::string& key, const std::vector& data) override; + + std::vector compareSet( + const std::string& key, + const std::vector& expectedValue, + const std::vector& desiredValue) override; + + std::vector get(const std::string& key) override; + + void wait(const std::vector& keys) override { + wait(keys, Store::kDefaultTimeout); + } + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override; + + int64_t add(const std::string& key, int64_t value) override; + + int64_t getNumKeys() override; + + bool check(const std::vector& keys) override; + + bool deleteKey(const std::string& key) override; + + void append(const std::string& key, const std::vector& value) + override; + + std::vector> multiGet( + const std::vector& keys) override; + + void multiSet( + const std::vector& keys, + const std::vector>& values) override; + + // Returns true if this store support append, multiGet and multiSet + bool hasExtendedApi() const override; + + protected: + std::unordered_map> map_; + std::mutex m_; + std::condition_variable cv_; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..25a0b6cdfec5e32b86df56b0e0aa5ce78b3afff3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp @@ -0,0 +1,139 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { + +class TORCH_API ParamCommsDebugInfo : public c10::DebugInfoBase { + public: + ParamCommsDebugInfo() = default; + ParamCommsDebugInfo( + int rank, + std::string&& colName, + int inNelems, + int outNelems, + at::ScalarType dType, + std::vector inSplitSizes, + std::vector outSplitSizes, + int worldSize); + + ~ParamCommsDebugInfo() override = default; + + int getRank() const { + return rank_; + } + + int getWorldSize() const { + return worldSize_; + } + + const std::string getColumnName() const { + return columnName_; + } + + int getInMessageNelems() const { + return inMessageNelems_; + } + + int getOutMessageNelems() const { + return outMessageNelems_; + } + + at::ScalarType getDType() const { + return dType_; + } + + const std::vector& getInputSplitSizes() const { + return inputSplitSizes_; + } + + const std::vector& getOutputSplitSizes() const { + return outputSplitSizes_; + } + + private: + int rank_{}; + int worldSize_{}; + std::string columnName_; + int inMessageNelems_{}; + int outMessageNelems_{}; + at::ScalarType dType_ = at::kByte; + std::vector inputSplitSizes_; + std::vector outputSplitSizes_; +}; + +#define RECORD_PARAM_COMMS( \ + seq, \ + pg_ptr, \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize) \ + auto paramCommsInfo = std::make_shared( \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize); \ + c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \ + std::initializer_list paramList = { \ + c10::IValue(seq), \ + c10::IValue(pg_ptr), \ + rank, \ + colName, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize}; \ + c10::ArrayRef paramInputs(paramList); \ + RECORD_FUNCTION(at::kParamCommsCallName, paramInputs); + +#define RECORD_PARAM_COMMS_DATA( \ + seq, \ + pg_ptr, \ + InputTensors, \ + OutputTensors, \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize) \ + auto paramCommsInfo = std::make_shared( \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize); \ + c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \ + std::initializer_list paramList = { \ + c10::IValue(InputTensors), \ + c10::IValue(seq), \ + c10::IValue(pg_ptr), \ + rank, \ + colName, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize}; \ + c10::ArrayRef paramInputs(paramList); \ + RECORD_FUNCTION_WITH_INPUTS_OUTPUTS( \ + at::kParamCommsCallName, \ + paramInputs, \ + std::vector(1, c10::IValue(OutputTensors))); +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..74399554b8cd0d000b43c55a72dd37bd9fdc8d1f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp @@ -0,0 +1,64 @@ +#pragma once + +#include +#include + +namespace c10d { + +class TORCH_API PrefixStore : public Store { + public: + explicit PrefixStore(std::string prefix, c10::intrusive_ptr store); + + using Store::set; + void set(const std::string& key, const std::vector& value) override; + + using Store::compareSet; + std::vector compareSet( + const std::string& key, + const std::vector& expectedValue, + const std::vector& desiredValue) override; + + std::vector get(const std::string& key) override; + + int64_t add(const std::string& key, int64_t value) override; + + bool deleteKey(const std::string& key) override; + + int64_t getNumKeys() override; + + bool check(const std::vector& keys) override; + + void wait(const std::vector& keys) override; + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override; + + const std::chrono::milliseconds& getTimeout() const noexcept override; + + void setTimeout(const std::chrono::milliseconds& timeout) override; + + void append(const std::string& key, const std::vector& value) + override; + + std::vector> multiGet( + const std::vector& keys) override; + + void multiSet( + const std::vector& keys, + const std::vector>& values) override; + + // Returns true if this store support append, multiGet and multiSet + bool hasExtendedApi() const override; + + c10::intrusive_ptr getUnderlyingStore(); + + protected: + std::string prefix_; + c10::intrusive_ptr store_; + + std::string joinKey(const std::string& key); + std::vector joinKeys(const std::vector& keys); +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3567cb35722f74594bffa6fde003dadd00575bb0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp @@ -0,0 +1,918 @@ +#pragma once + +#ifdef USE_C10D_NCCL + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace c10d { +// Environment variable which controls whether we perform a NCCL healt check +// which ensures communicators are healthy at the beginning of init. +static std::vector TORCH_ENABLE_NCCL_HEALTH_CHECK = { + "TORCH_ENABLE_NCCL_HEALTH_CHECK", + "ENABLE_NCCL_HEALTH_CHECK"}; + +// Environment variable which controls whether or not wait() is blocking or +// non-blocking. +static std::vector TORCH_NCCL_BLOCKING_WAIT = { + "TORCH_NCCL_BLOCKING_WAIT", + "NCCL_BLOCKING_WAIT"}; + +// Environment variable which controls whether or not we perform Async Error +// Handling with NCCL. +static std::vector TORCH_NCCL_ASYNC_ERROR_HANDLING = { + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + "NCCL_ASYNC_ERROR_HANDLING"}; + +// Environment Variable to control whether dumping debug info on watchdog +// timeout is enabled. This variable must be set together with +// TORCH_NCCL_ENABLE_MONITORING=1 and TORCH_NCCL_TRACE_BUFFER_SIZE > 0. +static std::vector TORCH_NCCL_DUMP_ON_TIMEOUT = { + "TORCH_NCCL_DUMP_ON_TIMEOUT"}; + +// Environment Variable to control whether Desync Debug is enabled. +// This variable must be set together with TORCH_NCCL_ASYNC_ERROR_HANDLING. +static std::vector TORCH_NCCL_DESYNC_DEBUG = { + "TORCH_NCCL_DESYNC_DEBUG", + "NCCL_DESYNC_DEBUG"}; + +static std::vector TORCH_NCCL_ENABLE_TIMING = { + "TORCH_NCCL_ENABLE_TIMING", + "NCCL_ENABLE_TIMING"}; + +static std::vector TORCH_NCCL_ENABLE_MONITORING = { + "TORCH_NCCL_ENABLE_MONITORING"}; + +static std::vector TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC = { + "TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"}; + +static std::vector TORCH_NCCL_TRACE_BUFFER_SIZE = { + "TORCH_NCCL_TRACE_BUFFER_SIZE"}; + +constexpr const char* NCCL_BACKEND_NAME = "nccl"; + +constexpr auto kProcessGroupNCCLDefaultTimeout = + std::chrono::milliseconds(10 * 60 * 1000); + +// NoHandling: do not handle asynchronous NCCL errors +// TearDown: tear down process upon error, see `WorkNCCL::handleException` +// CleanUpOnly: just clean up collectives and abort communicators without +// tearing down process SkipCleanUp: (this is a temporary option and can be +// removed in future) tear down process without cleaning up NCCL communicators. +// This should be used as a last resort in case `ncclCommAbort` itself is +// hanging +enum ErrorHandlingMode { + NoHandling = 0, + TearDown = 1, + CleanUpOnly = 2, + SkipCleanUp = 3 +}; + +#define SHOULD_CLEAN_UP(a) (a != NoHandling && a != SkipCleanUp) + +#define SHOULD_TEAR_DOWN(a) (a != NoHandling && a != CleanUpOnly) + +// If set, ProcessGroupNCCL doesn't use recordStream calls to ensure +// caching allocator safety for tensors used on both user-facing and +// internal comm streams. +// Instead, it stashes live references to those tensors until after +// user-facing streams are synced with comm streams. +// See stashed_for_allocator_safety_ below. +static std::vector TORCH_NCCL_AVOID_RECORD_STREAMS = { + "TORCH_NCCL_AVOID_RECORD_STREAMS"}; + +// If set, ProcessGroupNCCL registers postAlloc and preFree hooks to cuda cache +// allocator so that whenever a tensor is allocated or freed, ProcessGroupNCCL +// can register/deregister the tensor on all available NCCL communicators. +static std::vector TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK = + {"TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK", + "NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"}; + +// ProcessGroupNCCL implements NCCL bindings for c10d. +// +// All functions of the class are expected to be called in the same order +// across all processes in the process group. This is the only way that we +// can guarantee to match up the same calls among all processes. +// +// All NCCL functions provided by this class are asynchronous functions. More +// specifically, each NCCL call is scheduled on a separate CUDA stream that is +// different from the current CUDA stream. This is for the purpose of +// achieving potentially concurrency and better performance. As a result, +// it is the callers' responsibility to make sure that the CUDA stream their +// code works on needs to wait for the NCCL operation from +// this class. +// +// This can be done by calling: +// +// either WorkNCCL::wait() or WorkNCCL::synchronize(), both achieves the same +// functionality and are synonyms. +// +// Also note that WorkNCCL::finishedGPUExecution() is a helper function only +// provided by ProcessGroupNCCL to check if the NCCL operation of WorkNCCL has +// finished execution on the GPU (not just scheduled). +// +// Example on using the NCCL process group +// +// ProcessGroupNCCL pg(store, rank, size); +// std::shared_ptr work = pg.allreduce(tensors); +// +// // At this point, NCCL kernel has already by queued successfully +// // Now, let current stream wait for the NCCL to finish, this function is +// // async operation as well +// +// work->wait() +// +// // Now continue on other work in the current stream. +class TORCH_API ProcessGroupNCCL : public Backend { + public: + class WorkNCCL : public Work, public std::enable_shared_from_this { + public: + friend struct WorkInfo; + + // Constructor takes a list of CUDA devices + WorkNCCL( + const std::vector& devices, + int rank, + OpType opType, + uint64_t seq, + const char* profilingTitle = nullptr, + const c10::optional>& inputs = c10::nullopt, + bool desyncDebug = false, + bool enableTiming = false); + // Copy constructor doing partial copy without outputs_. Cleanup thread + // monitors and removes finished works. However it will deadlock when + // destructs outputs_ tensors who are view tensors in autograd graph. + WorkNCCL(const WorkNCCL& w); + + ~WorkNCCL() override; + + // Checks if the NCCL kernel has started to execute. + bool isStarted(); + + // Checks if request has completed. In this specific case of NCCL, it checks + // if the NCCL operation has completed on the GPU in its own NCCL stream. + // Non-blocking operation. + bool isCompleted() override; + + bool isSuccess() const override; + + // Same as calling synchronize() for NCCL work. + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + void abort() override; + + // Let current stream wait on the completing of the NCCL work + // Throws on exceptions. Blocking operation, which will wait for work + // completion. + void synchronize() override; + + // Synchronize streams by blocking each on the NCCL stream + void synchronizeStreams(); + + // Helper function to handle exception (throw if needed). + void handleException(ErrorHandlingMode asyncErrorHandling); + + // Helper function that checks if the NCCL kernels have finished + // execution on the GPUs + bool finishedGPUExecution(); + + // Get a Future object that will be marked as completed internally. + c10::intrusive_ptr getFuture() override; + + float getDuration() const override; + + uint64_t getSequencenumber() const override; + + // Helper function that sets an exception_ptr on the WorkNCCL object. + void setException(std::exception_ptr exception_ptr); + + // Helper function that returns True if the WorkNCCL object has timed out + // and False otherwise. + // In case of timeout, set exception on the WorkNCCL object. + bool checkTimeout( + c10::optional timeout = c10::nullopt); + + std::vector result() override; + + protected: + // The cached list of CUDA devices to operate on + std::vector devices_; + + // The start CUDA events of NCCL operator tracking this work item on + // multiple CUDA devices. These start CUDA events are needed by desync + // debugging if enabled. + std::shared_ptr> ncclStartEvents_; + + // The end CUDA events of NCCL operator tracking this work item on + // multiple CUDA devices. + std::shared_ptr> ncclEndEvents_; + + // The NCCL communicators used for this work item. + std::vector> ncclComms_; + + // Tensors used for barrier op + std::vector barrierTensors_; + + // Clone of blockingWait_ from ProcessGroupNCCL. + bool blockingWait_ = false; + + // Clone of avoidRecordStreams_ from ProcessGroupNCCL. + bool avoidRecordStreams_ = false; + + // Clone of opTimeout_ from ProcessGroupNCCL. + std::chrono::milliseconds opTimeout_; + + // Time point representing when the work started. + std::chrono::time_point workStartTime_; + + // Record the collective sequential number. + uint64_t seq_; + + // Indicates if the nccl start event has been updated to the store trace. + // This will be used by desync debug. + bool startTraceUpdated_{false}; + + // Record collective sizes for debug. We only record the size on the first + // device as multi-device per process is deprecated + size_t numelIn_ = -1; + size_t numelOut_ = -1; + + // Wrapper method for the static checkForNCCLErrors which can be overridden + // for tests. + virtual std::exception_ptr checkForNCCLErrors( + const std::vector>& ncclComms) const; + + friend std::ostream& operator<<( + std::ostream& output, + const WorkNCCL& workNCCL); + + private: + // Helper function for synchronize + void synchronizeInternal(std::chrono::milliseconds timeout); + + // Checks for NCCL errors and sets an appropriate exception_ptr. + void checkAndSetException(); + + // Just checks whether GPU execution has started, without modifying + // exception_ptr. + bool startedGPUExecutionInternal() const; + + // Just checks whether GPU execution has completed, without modifying + // exception_ptr. + bool finishedGPUExecutionInternal() const; + + // Reference to the store so that we can write aborted communicators + // to the store. + c10::intrusive_ptr store_; + + // Store a reference to NCCL collective's outputs, used by result and to + // give a more descriptive message when representing the Work as a string. + std::shared_ptr> outputs_; + + // TORCH_NCCL_AVOID_RECORD_STREAMS implementation helper. + // Stores references to participating non-output tensors (ie inputs, + // flattened intermediates). + // We'll clear this list in synchronizeStreams, just after user-facing + // stream(s) are synced with the nccl work stream(s). + // By keeping these refs (as well as outputs_) alive until after the + // collective's work rejoins the user-facing streams, we achieve + // caching allocator safety without any recordStream calls. + // For in-place collectives, some refs stashed here may alias outputs_, + // but that doesn't do any harm. + std::shared_ptr> stashed_for_allocator_safety_; + + // The future returned by getFuture. + c10::intrusive_ptr future_; + + bool timingEnabled_; + // unique id used to tell the trace buffer that this + // work has completed + c10::optional trace_id_; + friend class ProcessGroupNCCL; + }; + + class CoalescedWorkNCCL + : public Work, + public std::enable_shared_from_this { + public: + // Constructor takes a list of WorkNCCL works + CoalescedWorkNCCL( + std::vector works, + int rank, + OpType opType); + + ~CoalescedWorkNCCL() override; + + // Same as calling synchronize() for NCCL work. + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + protected: + // The cached list of CUDA devices to operate on + std::vector works_; + + friend class ProcessGroupNCCL; + }; + + struct Options : Backend::Options { + // NOTE: timeout in ProcessGroupNCCL::Options denote the timeout for + // operations. This is only used when blockingWait_ is enabled. + explicit Options(bool is_high_priority_stream = false); + + // return intrusive_ptr of the object + static c10::intrusive_ptr create( + bool is_high_priority_stream = false) { + return c10::make_intrusive(is_high_priority_stream); + } + + // Schedule NCCL operations on high priority CUDA streams + bool is_high_priority_stream; + +#ifdef NCCL_HAS_COMM_NONBLOCKING + // Configure ranks + ncclConfig_t config = NCCL_CONFIG_INITIALIZER; +#endif + + // Optional "parent" backend and color to create communicators from + // via `ncclCommSplit` + std::shared_ptr split_from; + int64_t split_color{0}; + }; + + // If you wish to create multiple process groups, each with a potentially + // different rank and size, you can do so by passing a new store instance + // to each one. If you have only a single store object, you can + // use the `c10d::PrefixStore` to derive scoped instances. + // This is also what the Python API in torch.distributed does. + // + // The process group instance keeps a reference to the store because + // it may be used long after the constructor runs. In fact, the constructor + // doesn't create any NCCL communicators. A single NCCL communicator can + // only be used on a specific set of devices, and are therefore created + // on-demand when a collective runs. If another collective is executed later, + // against a different set of devices, the process group creates another NCCL + // communicator. These NCCL communicators are cached and reused if possible. + // + ProcessGroupNCCL( + const c10::intrusive_ptr& store, + int rank, + int size, + c10::intrusive_ptr options = Options::create()); + + // This constructor includes the deprecated `groupName` argument. + // If you have existing code that uses the `groupName`, you can replace + // it by specifying a `c10d::PrefixStore(groupName, store)` for store. + C10_DEPRECATED ProcessGroupNCCL( + const c10::intrusive_ptr& store, + int rank, + int size, + const std::string& groupName, + c10::intrusive_ptr options = Options::create()) + : ProcessGroupNCCL(store, rank, size, options) {} + + ~ProcessGroupNCCL() override; + + c10::intrusive_ptr getOptions() { + return options_; + } + + const std::string getBackendName() const override { + return std::string(NCCL_BACKEND_NAME); + } + + void startCoalescing() override; + + c10::intrusive_ptr endCoalescing() override; + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr _broadcast_oop( + std::vector& outputTensors, + std::vector& inputTensors, + const BroadcastOptions& opts = BroadcastOptions()); + + c10::intrusive_ptr allreduce_sparse( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr _reduce_oop( + std::vector& outputTensors, + std::vector& inputTensors, + const ReduceOptions& opts = ReduceOptions()); + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputbuffer, + at::Tensor& inputbuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputs, + std::vector& inputs, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + void groupStart(); + + void groupEnd(); + + void groupEndNonblocking(std::vector> comms); + + // Unsupported Ops + c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. + void setSequenceNumberForGroup() override; + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + uint64_t getSequenceNumberForGroup() override; + + // Return the total number of splits the communicators held by this process + // group have performed. + uint64_t getCommSplitCounter() const; + + void registerOnCompletionHook( + std::function)>&& hook) override; + void waitForPendingWorks() override; + + void enableCollectivesTiming() override; + + // Provide an API for users to define their own ways to store NCCL debug info. + void registerDebugInfoWriter(std::unique_ptr writer); + + // Provides an API to abort the ProcessGroup (similar to ncclCommAbort) + // instead of relying on ProcessGroupNCCL destructor. + void abort(c10::optional abortReason = c10::nullopt); + + void shutdown(); + + protected: + // Helper that broadcasts nccl unique ID to all ranks through the store + void broadcastUniqueNCCLID( + ncclUniqueId* ncclID, + bool isSingleP2POp, + const std::string& devicesKey, + int p2pRank); + + // Helper that either looks up the cached NCCL communicators or creates + // a new set of NCCL communicators as a cache entry + std::vector>& getNCCLComm( + const std::string& devicesKey, + const std::vector& devices, + OpType opType, + int p2pRank = 0, + bool isSendRecvSelf = false); + + // Wrapper method which can be overridden for tests. + virtual std::exception_ptr checkForNCCLErrors( + const std::vector>& ncclComms); + + virtual c10::intrusive_ptr initWork( + std::vector devices, + int rank, + OpType opType, + const char* profilingTitle = nullptr, + const std::vector& inputs = {}, + const std::vector& outputs = {}); + + virtual c10::intrusive_ptr + initCoalescedWork( + const std::vector>& works, + int rank, + OpType opType); + + private: + // Helper that encapsulates work shared across all collective communication + // primitives. The callbacks have the following signatures: + // + // ncclResult_t fn(at::Tensor& input, at::Tensor& output, + // ncclComm_t, at::cuda::CUDAStream&); + // void {pre,post}(std::vector); + template + c10::intrusive_ptr collective( + std::vector& input, + std::vector& output, + Fn fn, + OpType opType, + const char* profilingTitle = nullptr, + bool avoidRecordStreams = false); + + template + c10::intrusive_ptr collective( + std::vector& input, + std::vector& output, + Fn fn, + PreProcess pre, + PostProcess post, + OpType opType, + const char* profilingTitle = nullptr, + bool avoidRecordStreams = false); + + // Helper that encapsulates work shared across point-to-point communication + // primitives. It is the same structure as the helper used for collective + // communication primitives. + template + c10::intrusive_ptr pointToPoint( + std::vector& tensor, + Fn fn, + int peer, + OpType opType, + const char* profilingTitle = nullptr); + template + c10::intrusive_ptr pointToPoint( + std::vector& tensor, + Fn fn, + int peer, + OpType opType, + PreProcess pre, + PostProcess post, + const char* profilingTitle); + + c10::intrusive_ptr allreduce_impl( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()); + + // Checks for NCCL errors on each of the communicators and returns an + // appropriate exception_ptr (nullptr if no errors). + static std::exception_ptr checkForNCCLErrorsInternal( + const std::vector>& ncclComms); + + // Function that runs as part of a separate thread and checks for errors on + // NCCL communicators. We need a separate thread to check for NCCL errors + // since we can't rely on the user calling certain methods like wait(), + // isCompleted() etc. to detect and remediate errors. In addition to this, we + // need a mechanism to safely abort and remove NCCL communicators from our + // cache. This can be done cleanly by having a thread for the ProcessGroupNCCL + // class. Attempting to modify the communicator cache from the WorkNCCL class + // might run into issues with object lifetime since the ProcessGroupNCCL + // object might get destroyed before the WorkNCCL object. + void ncclCommWatchdog(); + + // Performs a health check by initializing dummy NCCL communicators and then + // destroying them. This will help indicate and signal any NCCL-related issues + // prior to the first collective. The actual initialization and subsequent + // destruction is ran on a separate thread and the main thread is signalled + // about timeouts/errors to report to the application. + void runHealthCheck(); + + // Destroys initialized NCCL communicators in devNCCLComMap_ given by input + // key. Throws if there are no communicators to destroy. Also removes + // communicators from the cache and clears used device indices. + void destroyNCCLComms(const std::string& devNCCLCommMapKey); + + // Watchdog's inside loop. + // Takes care of cleaning up completed work, and aborting upon failure or + // timeout. + void workCleanupLoop(); + + void runHookLoop(); + + // In the timeout case and we will dump debug info such as the NCCL flight + // recorder to storage. Down the road, if we have more complicated or blocking + // operations, we might need to use a side thread to do it. + void dumpDebuggingInfo(); + + // Desync debug helper + void logWorkStart(WorkNCCL& work); + + // Desync debug helper + void logWorkEnd(WorkNCCL& work); + + protected: + // Function that runs as part of a separate thread aside from watchdog + // thread because we need to check the heartbeat from watchdog thread + // so that when we get stuck in some NCCL/CUDA calls, + // we can dump the debugging information and abort the process. + virtual void heartbeatMonitor(); + + // Function that directly trigger std::abort so that the whole process + // gets terminated. + virtual void terminateProcess(std::string errMsg); + + // Check the writeDebugInfo_ flag and if it is true, we do nothing. + // If not, we first set the flag to be true and return a thread which will + // get and write the debug info into storage. + c10::optional tryWriteDebugInfo(); + + // When watchdog timeout, this function will be called and return debug info + // for users. For now we only get information from retrieveDesyncReport. + // We are working on enabling more useful debug information for watchdog + // timeout. + virtual std::string getNCCLWatchdogDebugInfo(); + + static const int64_t kWatchdogThreadSleepMillis; + + // The store is used to broadcast the NCCL unique ID of rank 0. + c10::intrusive_ptr store_; + + bool storeError_{false}; + + const c10::intrusive_ptr options_; + + // The number of NCCL communicators that have been created during + // the lifetime of this process group. This sequence number is + // used to scope keys used in the store. + uint64_t ncclCommCounter_{0}; + + // The store keys to trace the last NCCL collective kernel CUDA events - start + // event and end event respectively. These are used to do desync root cause + // analysis. + const std::string traceKeyStart_; + const std::string traceKeyEnd_; + + // The NCCL communicator that the process group has cached. + // + // For collective operations: + // The key is a list of GPU devices that an operation is operating on + // The GPU devices are stored in a device sequence and the cache NCCL + // communicator is associated with this GPU device sequence + // + // e.g. If the process group op only uses device 0, then the value of + // the used device string stored (value of the hashmap) would be "0". + // + // If the process group op uses device 0 - 7 and the each tensor of the + // input tensor list is on device, 0, 1, 2, 3, 4, 5, 6, 7 separately, + // then the value of the used device string (key) stored would be + // "0,1,2,3,4,5,6,7" + // + // If the process group op uses device 0 - 7 and the each tensor of the + // input tensor list is on device, 0, 4, 5, 6, 7, 1, 2, 3 separately, + // then the value of the used device string stored would be + // "0,4,5,6,7,1,2,3" + // + // Note that the order of the device for the tensor list matters. + // + // For point-to-point operations: + // The key is a string of my current rank and the peer process rank. + // e.g. If process 1 and process 2 are involved in a point-to-point + // communication, the key will be "1:2" on both processes. Note: this is for + // the scenario where there is only 1 GPU per process. When it comes to + // multiple GPUs per process, this part may need to redesigned. + std::unordered_map>> + devNCCLCommMap_; + + // The NCCL communicators currently in process of being initialized. + std::unordered_map>> + inInitializationCommMap_; + + // Map from ncclUniqueId to appropriate communicator. + std::unordered_map>> + ncclIdToCommMap_; + + // Mutex to guard maps like devNCCLCommMap_ and ncclIdToCommMap_. + std::mutex mutex_; + + // Heartbeat of watchdog thread. + uint64_t heartbeat_; + + // The time interval used for deciding whether there is no watchdog heartbeat. + int heartbeatTimeoutInSec_; + + // Size of ring buffer where we store NCCL Traces for debugging. + int ncclTraceBufferSize_; + + // We gate the heartbeat monitor thread so that we can roll it out gradually. + std::atomic monitorThreadEnabled_; + + // Monitor thread which checks the heartbeat of Watchdog thread. + // If the monitor thread finds there is no heartbeat, it will dump debug info + // and then kill the watchdog thread to avoid hang. + std::thread ncclHeartbeatMonitorThread_; + + // Watchdog thread which looks for errors on the cached NCCL communicators. + std::thread ncclCommWatchdogThread_; + + std::thread onCompletionHookThread_; + + // Whether or not we should terminate the watchdog and workCleanup threads. + std::atomic terminateProcessGroup_; + + // Whether or not we should terminate the heartbeat monitoring threads. + std::atomic terminateHeartbeatMonitorThread_; + + // Whether we are in the shutdown mode when we are trying to get debug info, + // such as desync report. + std::atomic collectiveDebugInfoMode_; + + // Whether there are hooks pending to be fired + std::atomic hasPendingHooks_; + + // Mutex to Guard workMetaList_ + std::mutex workMetaListMutex_; + + // Mutex to Guard monitorWakeUpCV_ + std::mutex monitorMutex_; + + bool writeDebugInfo_ = false; + + // Mutex to Guard the check of writeDebugInfo_ + std::mutex writeDebugInfoMutex_; + + // Condition Variable for watchdog thread sleep + std::condition_variable workMetaListCV_; + + // Condition Variable for monitor thread to wake up early + std::condition_variable monitorWakeUpCV_; + + // Vector to Store WorkNCCL pointers + std::list workMetaList_; + + // Mutex to Guard workMetaList_ + std::mutex completedWorkListMutex_; + + // Condition Variable for watchdog thread sleep + std::condition_variable completedWorkListCV_; + + std::list completedWorkList_; + + // Add Work Pointer to workVector + void workEnqueue(c10::intrusive_ptr); + + // The CUDA streams used by NCCL kernels + std::unordered_map> + ncclStreams_; + + // The CUDA events used to sync NCCL streams + std::unordered_map> ncclEvents_; + + // Device Indexes used for all collectives in this group + std::set usedDeviceIdxs_; + + // Flag to denote if a coalescing groupStart/groupEnd block is active + int coalescing_state_ = 0; + + // Stores device indexes for all collectives run inside a coalescing block + std::vector> coalescedDevices_; + + // Stores communicators for all collectives run inside a coalescing block + std::vector>> coalescedComms_; + + // map from the key: "group name + pg counter (ID)" to the + // unique NCCL ID count. This needs to be group and pg specific + // + // For each process group, we need a uniform unique NCCL ID counter to ensure + // that NCCL operation in this process group can be completed successfully. + // Since each process group ID belongs to a group name, the key to this map + // is a combination of group name and ProcessGroupNCCL ID. + static std::unordered_map pgUniqueNCCLIDCnt_; + + // map from group name to the pg counter (ID) within that group + // + // For each group with the "group name" (which is the key), we need to + // keep track of a unique process group ID when creating a new + // ProcessGroupNCCL for this "group name". Therefore, the value of this + // map keeps the unique ProcessGroupNCCL's ID for a specific group with + // the "group name". The reason we need a per-group process group ID counter + // is that different group can have different ranks and we need ensure that + // each group has its own uniform process group ID for all its ranks. + static std::unordered_map processGroupCounterMap_; + + // Whether or not wait() and synchronize() are blocking operations that wait + // for the operation to complete. + bool blockingWait_ = false; + + // Whether or not to hook the cache allocator to register all allocated + // tensors + bool useTensorRegisterAllocatorHook_ = false; + + // Whether or not the workCleanupThread is used to perform async error + // handling. + ErrorHandlingMode asyncErrorHandling_ = NoHandling; + + // Whether or not to enable timeout root cause analysis. + bool desyncDebug_; + + // Whether or not to dump debug info on timeout + bool dumpOnTimeout_; + + // Whether or not to create start CUDAEvent and enable timing for start + // and end events. Note that enableTiming_ is always true if desyncDebug_ + // is set to true. + std::atomic enableTiming_; + + // Whether or not TORCH_NCCL_AVOID_RECORD_STREAMS was set + bool avoidRecordStreams_ = false; + + // Set of communicators that this process group has aborted and their + // ncclUniqueId has been written to the store. We don't need a lock + // for this map since only the watchdog thread accesses this set. The + // set contains the string representation of ncclUniqueId. + std::unordered_set abortedComms_; + + // The number of active ncclGroupStart() calls. This counter will be increased + // by 1 when ncclGroupStart() is called and decreased by 1 when ncclGroupEnd() + // is called. + static thread_local uint64_t ncclActiveGroupCounter_; + + // Counting for the sequential number of NCCL collective call. + uint64_t seq_{0}; + + std::exception_ptr watchDogException_ = nullptr; + + // The callback function to store NCCL debug info. + std::unique_ptr debugInfoWriter_ = nullptr; + + size_t uid_; +}; + +TORCH_API std::string dump_nccl_trace(); + +} // namespace c10d + +#endif // USE_C10D_NCCL diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp new file mode 100644 index 0000000000000000000000000000000000000000..8255bceebd6cf2c0d2d4c2b98e0396c1020a3d6b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp @@ -0,0 +1,113 @@ +#pragma once + +#include + +#include + +namespace c10d { + +constexpr const char* ROUND_ROBIN_BACKEND_NAME = "round_robin"; + +// ProcessGroupRoundRobin implements simple load balancing. +// +// It is constructed with multiple processes groups. Each call is dispatched to +// one of the specified process groups in a round robin fashion. Each process +// group instance must have the same rank and size. +// +// All functions of the class are expected to be called in the same order +// across all processes in the process group. This is the only way that we +// can guarantee to match up the same calls among all processes. +// +class TORCH_API ProcessGroupRoundRobin final : public ProcessGroup { + public: + explicit ProcessGroupRoundRobin( + int rank, + int size, + std::vector> processGroups); + + ~ProcessGroupRoundRobin() override; + + const std::string getBackendName() const override { + return std::string(ROUND_ROBIN_BACKEND_NAME); + } + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputs, + std::vector& inputs, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputs, + std::vector>& inputs, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputs, + std::vector>& inputs, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + private: + std::vector> processGroups_; + std::vector>::const_iterator iterator_; + + // Returns the next ProcessGroup to use. + const c10::intrusive_ptr& next(); +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp new file mode 100644 index 0000000000000000000000000000000000000000..22fc58134566c67fd455bd5abbedbe0cc2a8df41 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp @@ -0,0 +1,353 @@ +#pragma once + +#ifdef USE_C10D_UCC + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef USE_CUDA +#include +#include +#endif + +namespace c10d { + +#define TORCH_UCC_DEVICE_NOT_SET -2 + +#ifdef USE_CUDA +#define SAVE_TENSORS(_TENSORS, _DATA) \ + do { \ + if ((_TENSORS)[0].device().is_cuda()) { \ + for (const auto i : c10::irange((_TENSORS).size())) { \ + c10::cuda::CUDACachingAllocator::recordStream( \ + (_TENSORS)[i].storage().data_ptr(), (*stream)); \ + } \ + } else { \ + (_DATA) = (_TENSORS); \ + } \ + } while (0) + +#else +#define SAVE_TENSORS(_TENSORS, _DATA) (_DATA) = (_TENSORS); +#endif + +constexpr const char* UCC_BACKEND_NAME = "ucc"; + +struct event_pool_t { +#ifdef USE_CUDA + std::queue> event_pool; +#endif + std::mutex event_pool_mutex; +}; + +class Comm; + +// UCC does not support multiple CUDA devices per process. +class TORCH_API ProcessGroupUCC : public Backend { + private: + void set_timeout(ucc_coll_args_t& args); + + public: + class WorkData { + public: + std::vector src; + std::vector dst; + std::vector flat; + WorkData() {} + virtual ~WorkData() = default; + }; + class AlltoallWorkData : public WorkData { + public: + AlltoallWorkData(int size) + : send_lengths(size), + send_offsets(size), + recv_lengths(size), + recv_offsets(size) {} + std::vector send_lengths; + std::vector send_offsets; + std::vector recv_lengths; + std::vector recv_offsets; + }; + + class AllgathervWorkData : public WorkData { + public: + AllgathervWorkData(int size) : recv_lengths(size), recv_offsets(size) {} + std::vector recv_lengths; + std::vector recv_offsets; + }; + + class ScattervWorkData : public WorkData { + public: + ScattervWorkData(int size) : send_lengths(size), send_offsets(size) {} + std::vector send_lengths; + std::vector send_offsets; + }; + + class ProgressEntry { + friend class ProcessGroupUCC; + friend class Comm; + + public: + ProgressEntry(CommBase* comm, ucc_coll_req_h request) + : status_(UCC_INPROGRESS), comm_(comm), request_(request) {} + // Finalizes UCC status or exception of collective request. + void finalize(std::exception_ptr eptr = nullptr); + ucc_status_t status_; + CommBase* comm_; + ucc_coll_req_h request_; + std::unique_ptr data; + c10::intrusive_ptr future_; + std::exception_ptr eptr_; + }; + + class WorkUCC : public Work { + friend class ProcessGroupUCC; + friend class Comm; + + public: + WorkUCC( + OpType opType, + uint64_t seq, + const char* prof_title, + const c10::optional>& inputs, + const c10::intrusive_ptr& logger) + : Work(-1, opType, prof_title, inputs), logger_(logger), seq_(seq) {} + ~WorkUCC(); + void setException(); + void setAndThrowException(); + bool isCompleted() override; + bool isSuccess() const override; + bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override; + c10::intrusive_ptr getFuture() override; + std::vector result() override; + int sourceRank() const override; +#ifdef USE_CUDA + std::unique_ptr fence = nullptr; + event_pool_t* ep = nullptr; +#endif + int sourceRank_; + + protected: + std::shared_ptr entry_; + c10::intrusive_ptr logger_; + uint64_t seq_; + + private: + // The future returned by getFuture. + c10::intrusive_ptr future_; + // Store a reference to collective's outputs, used by result + std::shared_ptr> outputs_; + }; + + explicit ProcessGroupUCC( + const c10::intrusive_ptr& store, + int rank = -1, + int size = -1, + std::chrono::duration timeout = kBackendDefaultTimeout); + + void initComm(c10::Device dev); + + ~ProcessGroupUCC() override; + + const std::string getBackendName() const override { + return std::string(UCC_BACKEND_NAME); + } + +#ifdef USE_CUDA + std::unique_ptr getPooledEvent(); +#endif + + // Performs a health check by initializing dummy UCC & UCX communicators and + // then destroying them. This will help indicate and signal any + // UCC/UCX-related issues prior to the first collective. The actual + // initialization and subsequent destruction is ran on a separate thread and + // the main thread is signalled about timeouts/errors to report to the + // application. + void runHealthCheck(); + + template + c10::intrusive_ptr collective_post( + OpType opType, + PreProcess preproc, + PostProcess postproc, + ucc_coll_args_t& coll, + std::unique_ptr data, + c10::Device dev, + std::vector& inputTensors, + std::vector& outputTensors, + const char* prof_title); + + c10::intrusive_ptr broadcast( + std::vector& data, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + // Counting for the sequential number of UCC collective_post call. + uint64_t seq_{0}; + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. + void setSequenceNumberForGroup() override; + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + uint64_t getSequenceNumberForGroup() override; + + static c10::intrusive_ptr createProcessGroupUCC( + const c10::intrusive_ptr<::c10d::Store>& store, + int rank, + int size, + const std::chrono::duration& timeout); + + protected: + const std::chrono::duration timeout_; + std::shared_ptr oob; + std::shared_ptr comm = {nullptr}; + uint32_t comm_id; + ucc_team_h team{nullptr}; + ucc_ee_h cuda_ee{nullptr}; + ucc_ee_h cuda_ee_p2p[2]{nullptr, nullptr}; + +#ifdef USE_CUDA + std::unique_ptr stream = nullptr; + std::unique_ptr stream_p2p[2] = {nullptr, nullptr}; + event_pool_t ep; +#endif + c10::intrusive_ptr logger; +}; + +class Comm { + c10::intrusive_ptr logger; + std::shared_ptr oob; + CommUCC ucc_comm; + std::mutex mutex; + std::thread progress_thread; + std::condition_variable queue_produce_cv; + std::condition_variable queue_consume_cv; + std::deque> progress_queue; + bool stop_progress_loop; + bool collective_inprogress; + torch_ucc_phase_t finalize_phase; + + public: + c10::DeviceIndex cuda_device_index; + Comm( + const c10::intrusive_ptr& logger, + std::shared_ptr oob, + c10::Device dev, + bool is_health_check); + + ~Comm(); + + void ucc_create_team( + ucc_team_h& team, + std::shared_ptr oob); + + void ucc_destroy_team(ucc_team_h& team); + + c10::intrusive_ptr enqueue_p2p( + OpType opType, + ucc_coll_req_h request, + const char* prof_title); + +#ifdef USE_CUDA + void enqueue_cuda_collective( + std::unique_ptr data, + c10::intrusive_ptr work, + ucc_coll_args_t& coll, + ucc_team_h team, + ucc_ee_h ee); +#endif + + void enqueue_collective( + std::unique_ptr data, + c10::intrusive_ptr work, + ucc_coll_args_t& coll, + ucc_team_h team); + + static std::shared_ptr get_comm( + uint32_t& id, + c10::Device dev, + std::shared_ptr oob, + const c10::intrusive_ptr& logger, + bool is_health_check = false); + + void progress_loop(); +}; + +} // namespace c10d + +#endif // USE_C10D_UCC diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp new file mode 100644 index 0000000000000000000000000000000000000000..13503ca3f5a9fadd13ec537b52c64b62d2d1ac33 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp @@ -0,0 +1,140 @@ +#pragma once + +#ifdef USE_C10D_GLOO + +#include +#include +#include + +namespace c10d { + +class TORCH_API ProcessGroupWrapper : public Backend { + public: + explicit ProcessGroupWrapper( + c10::intrusive_ptr backend, + c10::intrusive_ptr glooBackend); + + const std::string getBackendName() const override; + + c10::intrusive_ptr broadcast( + std::vector& data, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& data, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + // This function is deprecated and will be moved out of ProcessGroup to comms: + // * do not add dependencies on this function, + // * do not implement it in your ProcessGroup, implement _allgather_base + // instead. + c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) override; + + void monitoredBarrier(const BarrierOptions& opts, bool waitAllRanks = false) + override; + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. Only implemented + // for GLOO and NCCL backends currently. + // dont implement this + void setSequenceNumberForGroup() override; + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + uint64_t getSequenceNumberForGroup() override; // just call underlying + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const ReduceScatterOptions& opts) override; + + void startCoalescing() override; + + c10::intrusive_ptr endCoalescing() override; + + c10::intrusive_ptr getWrappedPg() const; + + private: + // Underlying process group that actual application collectives will be + // dispatched to + c10::intrusive_ptr backend_; + // Gloo process group responsible for internal coordination such as monitored + // barrier, sequence number checking, collective fingerprint collecting. + c10::intrusive_ptr glooBackend_; + // Conducts several checks to ensure that the underlying collective is well + // formed with the goal of notifying the user about incorrect collective use + // in the application. + void runCollectiveChecks( + OpType op_type, + const std::vector& tensors); +}; +} // namespace c10d + +#endif // USE_C10D_GLOO diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9b361172b22df16b3a6b36bbfc07a8785bdd6ecd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp @@ -0,0 +1,73 @@ + +#pragma once + +#include + +#include + +namespace c10d { + +// `RankLocal` maintains a unique instance of T for each non-autograd thread. +// For non-autograd threads, `RankLocal::get()` functions similar to +// thread_local. For autograd threads, `RankLocal::get()` returns the +// instance of T corresponding to the enqueuing non-autograd thread. The +// mechanism allows for rank-specific context shared between forward and +// backward. It works for both the one-rank-per-process and one-rank-per-thread +// scenarios. +// +// NOTE: RankLocal doesn't make the underlying objects thread-safe. +template +class RankLocal { + public: + RankLocal(const RankLocal&) = delete; + RankLocal& operator=(const RankLocal&) = delete; + + static T& get() { + // Fast path: non-autograd threads can simply return + // the object reference cached in TLS. + if (cached_ != nullptr) { + return *cached_; + } + const auto node = torch::autograd::get_current_node(); + auto fwd_thread_id = node == nullptr ? at::RecordFunction::currentThreadId() + : node->thread_id(); + // Optimistically aquire the read lock first, since most likely we are in + // an autograd thread and the object has already been constructed. + { + std::shared_lock read_lock(lock_); + auto it = thread_id_to_rank_local_.find(fwd_thread_id); + if (it != thread_id_to_rank_local_.end()) { + // Cache for non-autograd threads + if (node == nullptr) { + cached_ = &it->second; + } + return it->second; + } + } + + std::unique_lock write_lock(lock_); + auto [it, _] = thread_id_to_rank_local_.try_emplace(fwd_thread_id); + // Cache for non-autograd threads + if (node == nullptr) { + cached_ = &it->second; + } + return it->second; + } + + private: + RankLocal(){}; + thread_local static T* cached_; + static std::unordered_map thread_id_to_rank_local_; + static std::shared_mutex lock_; +}; + +template +thread_local T* RankLocal::cached_ = nullptr; + +template +std::unordered_map RankLocal::thread_id_to_rank_local_; + +template +std::shared_mutex RankLocal::lock_; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6771baaf7373946ac9ed6acc0f61e432db995427 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp @@ -0,0 +1,161 @@ +#pragma once + +#include +#include +#include + +#include + +namespace c10d { +namespace detail { + +class TCPServer; + +class TCPClient; + +struct SocketAddress { + std::string host{}; + std::uint16_t port{}; +}; + +class Counter { + public: + void update(double val); + std::unordered_map observe() const; + + double mean() const noexcept { + return mean_; + } + int64_t count() const noexcept { + return count_; + } + double variance() const noexcept { + return m2_ / count_; + } + double sample_variance() const noexcept { + return m2_ / (count_ - 1); + } + + private: + int64_t count_ = 0; + double mean_ = 0; + double m2_ = 0; +}; + +} // namespace detail + +struct TCPStoreOptions { + static constexpr std::uint16_t kDefaultPort = 29500; + + std::uint16_t port = kDefaultPort; + bool isServer = false; + c10::optional numWorkers = c10::nullopt; + bool waitWorkers = true; + std::chrono::milliseconds timeout = Store::kDefaultTimeout; + + // A boolean value indicating whether multiple store instances can be + // initialized with the same host:port pair. + bool multiTenant = false; + + // If specified, and if isServer is true, the underlying TCPServer will take + // over the bound socket associated to this fd. This option is useful to avoid + // port assignment races in certain scenarios. + c10::optional masterListenFd = c10::nullopt; + + // A boolean value indicating whether to use the experimental libUV backend. + bool useLibUV = false; +}; + +class TORCH_API TCPStore : public Store { + public: + explicit TCPStore(std::string host, const TCPStoreOptions& opts = {}); + + [[deprecated("Use TCPStore(host, opts) instead.")]] explicit TCPStore( + const std::string& masterAddr, + std::uint16_t masterPort, + c10::optional numWorkers = c10::nullopt, + bool isServer = false, + const std::chrono::milliseconds& timeout = kDefaultTimeout, + bool waitWorkers = true); + + ~TCPStore() override; + + void set(const std::string& key, const std::vector& value) override; + + std::vector compareSet( + const std::string& key, + const std::vector& expectedValue, + const std::vector& desiredValue) override; + + std::vector get(const std::string& key) override; + + int64_t add(const std::string& key, int64_t value) override; + + bool deleteKey(const std::string& key) override; + + bool check(const std::vector& keys) override; + + int64_t getNumKeys() override; + + void wait(const std::vector& keys) override; + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override; + + void append(const std::string& key, const std::vector& value) + override; + + std::vector> multiGet( + const std::vector& keys) override; + + void multiSet( + const std::vector& keys, + const std::vector>& values) override; + + bool hasExtendedApi() const override; + + // Waits for all workers to join. + void waitForWorkers(); + + // Returns the hostname used by the TCPStore. + const std::string& getHost() const noexcept { + return addr_.host; + } + + // Returns the port used by the TCPStore. + std::uint16_t getPort() const noexcept { + return addr_.port; + } + + std::unordered_map> + collectClientCounters() const noexcept; + + bool isLibUvBackend() const noexcept { + return usingLibUv_; + } + + private: + int64_t incrementValueBy(const std::string& key, int64_t delta); + + void validate(void); + + std::vector doGet(const std::string& key); + + void doWait( + c10::ArrayRef keys, + std::chrono::milliseconds timeout); + + detail::SocketAddress addr_; + std::shared_ptr server_; + std::unique_ptr client_; + c10::optional numWorkers_; + + const std::string initKey_ = "init/"; + const std::string keyPrefix_ = "/"; + std::mutex activeOpLock_; + std::unordered_map clientCounters_; + bool usingLibUv_ = false; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStoreBackend.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStoreBackend.hpp new file mode 100644 index 0000000000000000000000000000000000000000..572340d1429840bb008aa272fea24315d0382d56 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStoreBackend.hpp @@ -0,0 +1,77 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#ifdef _WIN32 +#include +#include +#else +#include +#include +#endif + +namespace c10d { +namespace detail { + +// Magic number for client validation. +static const uint32_t validationMagicNumber = 0x3C85F7CE; + +enum class QueryType : uint8_t { + VALIDATE, + SET, + COMPARE_SET, + GET, + ADD, + CHECK, + WAIT, + GETNUMKEYS, + DELETE_KEY, + APPEND, + MULTI_GET, + MULTI_SET, + CANCEL_WAIT, +}; + +enum class CheckResponseType : uint8_t { READY, NOT_READY }; + +enum class WaitResponseType : uint8_t { STOP_WAITING, WAIT_CANCELED }; + +// Abstract base class to handle thread state for TCPStoreMasterDaemon. +// Contains the windows/unix implementations to signal a +// shutdown sequence for the thread +class BackgroundThread { + public: + explicit BackgroundThread(); + + virtual ~BackgroundThread() = 0; + virtual std::uint16_t port() const = 0; + + void start(); + bool stop_requested(); + + protected: + void dispose(); + virtual void run() = 0; + virtual void stop() = 0; + bool is_running() { + return is_running_.load(); + } + + private: + std::atomic is_running_; + std::thread daemonThread_{}; +}; + +std::unique_ptr create_tcpstore_backend( + const TCPStoreOptions& opts); +std::unique_ptr create_libuv_tcpstore_backend( + const TCPStoreOptions& opts); +bool is_libuv_tcpstore_backend_available(); + +} // namespace detail +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..bda2bed5700500ee533e585ac6eba9b259886456 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h @@ -0,0 +1,543 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +namespace c10d { + +/* Trace Utils Related to TORCH_NCCL_DESYNC_DEBUG */ + +inline std::string getTraceStartKey(const std::string& pgName, int rank) { + return pgName + "_" + std::to_string(rank) + "_trace_start"; +} + +inline std::string getTraceEndKey(const std::string& pgName, int rank) { + return pgName + "_" + std::to_string(rank) + "_trace_end"; +} + +inline bool traceUpdate( + c10::intrusive_ptr& store, + const std::string& key, + uint64_t seq, + const std::string& col) { + std::vector value(col.size() + sizeof(seq) + 1); + memcpy(value.data(), &seq, sizeof(seq)); + memcpy(value.data() + sizeof(seq), col.data(), col.size()); + try { + store->set(key, value); + return true; + } catch (...) { + LOG(ERROR) << "Store is down while updating #" << seq << " with key " + << key; + return false; + } + return true; +} + +enum TraceDebugEvent { + kEventStart, + kEventEnd, +}; +// >> +using TraceMap = + std::map>>; + +inline std::string ranksToString(const std::vector& ranks) { + std::string str; + for (int rank : ranks) { + if (str.empty()) { + str = std::to_string(rank); + } else { + str += ", " + std::to_string(rank); + } + } + return str; +} + +inline std::string ranksFromTrace( + const std::vector>& items) { + std::string ranks; + for (auto& p : items) { + if (ranks.empty()) { + ranks = std::to_string(p.first); + } else { + ranks += ", " + std::to_string(p.first); + } + } + return ranks; +} + +inline std::string analyzeMissingRanks(const std::vector& missingRanks) { + return c10::str( + "\n\t - To our best knowledge, ranks [", + ranksToString(missingRanks), + "] are the lagging ranks that caused this timeout. " + "They never joined any collectives"); +} + +inline std::string analyzeLaggingRanks(const TraceMap& traceMap) { + uint64_t lagSeq = traceMap.begin()->first; + std::vector startRanks; + std::vector endRanks; + for (auto& p : traceMap.begin()->second) { + if (p.second.second == kEventStart) { + startRanks.push_back(p.first); + } else { + endRanks.push_back(p.first); + } + } + std::string report = + "\n\t - To our best knowledge, the lagging/dead/mismatched ranks " + "that caused the desync are:"; + if (startRanks.size()) { + report += c10::str( + "\n\t - [", + ranksToString(startRanks), + "] joined but didn't finish collective #", + lagSeq, + " (count from 1)"); + } + if (endRanks.size()) { + report += c10::str( + "\n\t [", + ranksToString(endRanks), + "] finished collective #", + lagSeq, + ", but didn't join collective #", + lagSeq + 1, + " (count from 1)"); + } + return report; +} + +inline std::string dumpSnapshot(TraceMap& traceMap) { + std::string report = "\n\t - Snapshot of ranks' latest states:"; + for (auto& tracePair : traceMap) { + uint64_t seq = tracePair.first; + std::map>& subMap = + tracePair.second; + + std::unordered_map> collectivesStart; + std::unordered_map> collectivesEnd; + for (auto& p : subMap) { + int rank = p.first; + const std::string& col = p.second.first; + if (p.second.second == kEventStart) { + collectivesStart[col].push_back(rank); + } else { + collectivesEnd[col].push_back(rank); + } + } + + if (collectivesStart.size()) { + report += c10::str("\n\t #", seq, " started ranks:"); + for (auto& mapPair : collectivesStart) { + report += c10::str( + "\n\t [", + ranksToString(mapPair.second), + "] started ", + mapPair.first); + } + } + if (collectivesEnd.size()) { + report += c10::str("\n\t #", seq, " finished ranks:"); + for (auto& mapPair : collectivesEnd) { + report += c10::str( + "\n\t [", + ranksToString(mapPair.second), + "] finished ", + mapPair.first); + } + } + } + return report; +} + +inline bool parseTraceValue( + c10::intrusive_ptr& store, + const std::string& key, + uint64_t& seq, + std::string& col) { + try { + std::vector traceValue = store->get(key); + memcpy(&seq, traceValue.data(), sizeof(seq)); + std::string colName((char*)traceValue.data() + sizeof(seq)); + col = colName; + return true; + } catch (...) { + LOG(ERROR) << "Store is down while getting key " << key; + return false; + } + return true; +} + +inline std::string retrieveDesyncReport( + c10::intrusive_ptr& store, + const std::string& pgName, + int myRank, + int worldSize) { + std::string report; + + uint64_t thisSeq; + std::string thisCol; + + std::vector missingRanks; + TraceMap traceMap; + + for (const auto rank : c10::irange(worldSize)) { + // Build traceMapStart. + uint64_t seqStart; + { + std::string traceKeyStart = getTraceStartKey(pgName, rank); + if (!store->check({traceKeyStart})) { + missingRanks.push_back(rank); + continue; + } + std::string col; + if (!parseTraceValue(store, traceKeyStart, seqStart, col)) { + return report; + } + traceMap[seqStart].emplace(rank, std::make_pair(col, kEventStart)); + if (rank == myRank) { + thisSeq = seqStart; + thisCol = std::move(col); + } + } + + // Build traceMapEnd. + { + std::string traceKeyEnd = getTraceEndKey(pgName, rank); + if (!store->check({traceKeyEnd})) { + continue; + } + uint64_t seq; + std::string col; + if (!parseTraceValue(store, traceKeyEnd, seq, col)) { + return report; + } + if (seq == seqStart) { + traceMap[seq][rank].second = kEventEnd; + } + } + } + + TORCH_INTERNAL_ASSERT( + !missingRanks.empty() || !traceMap.empty(), + "Trace shouldn't be empty while enabled GLOO_ASYNC_TIMEOUT_DEBUG"); + TORCH_INTERNAL_ASSERT( + !thisCol.empty(), + "Timeout rank [", + myRank, + "] must have collective tracking iteam in c10::Store trace"); + TORCH_INTERNAL_ASSERT( + traceMap[thisSeq][myRank].second == kEventStart, + "Timeout rank [", + myRank, + "] last trace item must be kEventStart. thisSeq = ", + thisSeq, + ", col = ", + thisCol); + + report += c10::str( + "\n\t - [", myRank, "] Timeout at collective: ", thisCol, ", #", thisSeq); + + if (!missingRanks.empty()) { + report += analyzeMissingRanks(missingRanks); + } else { + report += analyzeLaggingRanks(traceMap); + report += dumpSnapshot(traceMap); + } + + return report; +} + +/* Trace Utils Related to Flight Recorder */ + +/* Note: this is only used by PGNCCL (could be generalized in an ideal world but + * wasn't done that way, so isn't expected to be fully general at the moment) */ + +#ifdef USE_C10D_NCCL + +DebugInfoWriter::DebugInfoWriter(int rank) { + std::string fileName = getCvarString( + {"TORCH_NCCL_DEBUG_INFO_TEMP_FILE"}, "/tmp/nccl_trace_rank_"); + filename_ = c10::str(fileName, rank); +} + +DebugInfoWriter::~DebugInfoWriter() = default; + +void DebugInfoWriter::write(const std::string& ncclTrace) { + // Open a file for writing. The ios::binary flag is used to write data as + // binary. + std::ofstream file(filename_, std::ios::binary); + + // Check if the file was opened successfully. + if (!file.is_open()) { + LOG(ERROR) << "Error opening file for writing NCCLPG debug info: " + << filename_; + return; + } + + file.write(ncclTrace.data(), ncclTrace.size()); + LOG(INFO) << "Finished writing NCCLPG debug info to " << filename_; +} + +inline std::string pickle_str(const c10::IValue& v) { + std::vector result; + { + auto writer = [&](const char* data, size_t size) { + result.insert(result.end(), data, data + size); + }; + torch::jit::Pickler pickler( + writer, nullptr, nullptr, nullptr, nullptr, false); + pickler.protocol(); + pickler.pushIValue(v); + pickler.stop(); + } + return std::string(result.begin(), result.end()); +} + +inline c10::Dict new_dict() { + return c10::Dict( + c10::AnyType::get(), c10::AnyType::get()); +} + +inline c10::List new_list() { + return c10::List(c10::AnyType::get()); +} + +struct NCCLTraceBuffer { + static NCCLTraceBuffer* get() { + // intentionally leak on exit + // because this will hold python state that may get destructed + static NCCLTraceBuffer* instance = new NCCLTraceBuffer(); + return instance; + } + NCCLTraceBuffer() { + max_entries_ = getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0); + capture_cpp_stack_ = getCvarBool({"TORCH_NCCL_TRACE_CPP_STACK"}, false); + enabled_ = max_entries_ > 0; + } + using EventList = std::vector; + struct Entry { + size_t id_; // incremented id in the trace buffer + // used to figure out where in the circular entries + // buffer this entry will be located to + // update state information + size_t pg_id_; + size_t seq_id_; // as tracked by the process group + const char* profiling_name_; + + std::shared_ptr traceback_; + // we borrow pointser to start_ and end_ so we can query the state + // on reporting. However, once the event is completed, the call + // to `complete` will clear these. + EventList *start_, *end_; + + // timestamp when the entry was created, likely close to the time the work + // was 'enqueued'- not necessarily started + c10::time_t time_created_; + + const char* state_ = "scheduled"; + + // size information for input/output tensors + c10::SmallVector input_dims_; + c10::SmallVector output_dims_; + c10::SmallVector sizes_; // flattened from inputs, outputs + bool retired_ = false; // is this work entry no longer in the workMetaList_? + // a retired but not completed event has timed out + }; + + bool enabled_ = false; + bool capture_cpp_stack_ = false; + std::mutex mutex_; + std::vector entries_; + size_t max_entries_ = 0; + size_t next_ = 0; + size_t id_ = 0; + + c10::optional record( + size_t pg_id, + size_t seq_id, + const char* profiling_name, + const std::vector& inputs, + const std::vector& outputs, + EventList* start, + EventList* end) { + if (!enabled_) { + return c10::nullopt; + } + auto traceback = + torch::CapturedTraceback::gather(true, true, capture_cpp_stack_); + std::lock_guard guard(mutex_); + + auto te = Entry{ + id_, + pg_id, + seq_id, + profiling_name, + std::move(traceback), + std::move(start), + std::move(end), + c10::getTime()}; + + for (const auto& input : inputs) { + c10::IntArrayRef sizes = input.sizes(); + te.input_dims_.push_back(sizes.size()); + te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end()); + } + + for (const auto& output : outputs) { + c10::IntArrayRef sizes = output.sizes(); + te.output_dims_.push_back(sizes.size()); + te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end()); + } + + if (entries_.size() < max_entries_) { + entries_.emplace_back(std::move(te)); + } else { + entries_[next_++] = std::move(te); + if (next_ == max_entries_) { + next_ = 0; + } + } + return id_++; + } + + void update_state(Entry& r) { + if (r.start_ != nullptr) { + bool started = true; + for (auto& ev : *r.start_) { + if (!ev.query()) { + started = false; + break; + } + } + if (started) { + r.state_ = "started"; + } + } + if (r.end_ != nullptr) { + bool completed = true; + for (auto& ev : *r.end_) { + if (!ev.query()) { + completed = false; + break; + } + } + if (completed) { + r.state_ = "completed"; + } + } + } + + std::vector dump_entries() { + std::lock_guard guard(mutex_); + std::vector result; + result.reserve(entries_.size()); + result.insert(result.end(), entries_.begin() + next_, entries_.end()); + result.insert(result.end(), entries_.begin(), entries_.begin() + next_); + // query any remaining events + for (auto& r : result) { + update_state(r); + r.start_ = r.end_ = nullptr; + } + return result; + } + + void retire_id(c10::optional id) { + if (!enabled_ || !id) { + return; + } + std::lock_guard guard(mutex_); + auto& entry = entries_.at(*id % max_entries_); + if (entry.id_ == *id) { + update_state(entry); + entry.retired_ = true; + entry.start_ = entry.end_ = nullptr; + } + } + + std::string dump() { + auto result = dump_entries(); + auto entries = new_list(); + c10::IValue pg_id_s = "pg_id"; + c10::IValue seq_id_s = "seq_id"; + c10::IValue profiling_name_s = "profiling_name"; + c10::IValue input_sizes_s = "input_sizes"; + c10::IValue output_sizes_s = "output_sizes"; + c10::IValue time_created_s = "time_created_us"; + + c10::IValue frames_s = "frames"; + c10::IValue state_s = "state"; + c10::IValue line_s = "line"; + c10::IValue name_s = "name"; + c10::IValue filename_s = "filename"; + c10::IValue retired_s = "retired"; + + std::vector tracebacks; + for (auto& e : result) { + tracebacks.push_back(e.traceback_.get()); + } + torch::SymbolizedTracebacks stracebacks = torch::symbolize(tracebacks); + std::vector all_frames; + for (const auto& f : stracebacks.all_frames) { + auto d = new_dict(); + d.insert(name_s, f.funcname); + d.insert(filename_s, f.filename); + d.insert(line_s, int64_t(f.lineno)); + all_frames.emplace_back(std::move(d)); + } + + for (auto i : c10::irange(result.size())) { + auto& e = result.at(i); + auto& tb = stracebacks.tracebacks.at(i); + auto dict = new_dict(); + dict.insert(pg_id_s, int64_t(e.pg_id_)); + dict.insert(seq_id_s, int64_t(e.seq_id_)); + dict.insert(profiling_name_s, e.profiling_name_); + dict.insert(time_created_s, int64_t(e.time_created_ / 1000)); + + auto it = e.sizes_.begin(); + auto read_sizes = [&](const c10::SmallVector& dims) { + auto sizes = new_list(); + for (auto dim : dims) { + auto arg_sizes = new_list(); + for (auto i : c10::irange(dim)) { + (void)i; + arg_sizes.push_back(*it++); + } + sizes.push_back(arg_sizes); + } + return sizes; + }; + + dict.insert(input_sizes_s, read_sizes(e.input_dims_)); + dict.insert(output_sizes_s, read_sizes(e.output_dims_)); + dict.insert(state_s, e.state_); + dict.insert(retired_s, e.retired_); + + auto frames = new_list(); + for (int64_t frame : tb) { + frames.push_back(all_frames.at(frame)); + } + dict.insert(frames_s, frames); + entries.push_back(dict); + } + return pickle_str(entries); + } +}; + +#endif +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp new file mode 100644 index 0000000000000000000000000000000000000000..dc9a9856965addb9792796d4928c7592ea38c64a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp @@ -0,0 +1,180 @@ +#pragma once + +#include + +#include +#include + +#include +#include + +#include +#include + +namespace c10d { + +// Base class for supplementary data potentially needed by ReduceOps +struct TORCH_API _SupplementBase : torch::CustomClassHolder { + ~_SupplementBase() override = default; +}; + +// Supplementary data specific to NCCL PREMUL_SUM +// The point of use in ProcessGroupNCCL knows how to unpack it. +struct NCCLPreMulSumSupplement : _SupplementBase { + double double_factor{0.0}; + at::Tensor tensor_factor; + NCCLPreMulSumSupplement(double f) : double_factor{f} {} + NCCLPreMulSumSupplement(at::Tensor t) : tensor_factor{std::move(t)} { + TORCH_CHECK_EQ(tensor_factor.numel(), 1); + } +}; + +// Other ReduceOps that need different supplementary data can also +// derive from _SupplementBase. +struct TORCH_API ReduceOp : torch::CustomClassHolder { + // note(crcrpar): RedOpType could be defined outside of `ReduceOp` + enum RedOpType : uint8_t { + SUM = 0, + AVG = 1, + PRODUCT = 2, + MIN = 3, + MAX = 4, + BAND = 5, // Bitwise AND + BOR = 6, // Bitwise OR + BXOR = 7, // Bitwise XOR + PREMUL_SUM = 8, // Multiply by a user-supplied constant before summing. + UNUSED = 9 + }; + + ReduceOp() = default; + + ReduceOp(RedOpType op) : op_(op) { + TORCH_INTERNAL_ASSERT( + op_ != PREMUL_SUM, + "Use `torch.distributed._make_nccl_premul_sum` to create an instance of ReduceOp with PREMUL_SUM"); + } + + ReduceOp( + RedOpType op, + c10::intrusive_ptr<_SupplementBase> optional_supplement) { + if (optional_supplement.get()) { + op_ = op; + } else { + supplement_ = optional_supplement; + } + } + + // The heap resource supplement_, if it exists, is managed by a + // c10::intrusive_ptr, so constructors and operator= can be simple + ReduceOp(const ReduceOp& other) + : op_(other.op_), supplement_(other.supplement_) {} + + const ReduceOp& operator=(const ReduceOp& other) { + op_ = other.op_; + supplement_ = other.supplement_; + return *this; + } + + operator RedOpType() const { + return op_; + } + + bool operator==(const std::uint8_t other) { + TORCH_INTERNAL_ASSERT(other < 9, "Invalid other op value"); + return other == op_; + } + + bool operator==(const ReduceOp::RedOpType other) { + return *this == static_cast(other); + } + + // todo(crcrpar): Handle `RedOpType::PREMUL_SUM` with its scaling factor. + bool operator==(const ReduceOp& other) { + return *this == other.op_; + } + + RedOpType op_ = SUM; + // supplement_ is "type-erased" storage for optional supplementary + // data the op might need. + // The point of use will know the derived type supplement_ really is, + // and downcast its pointer to extract the data as the needed type(s). + // Right now, only PREMUL_SUM needs supplementary data, but the same + // mechanism could extend to support other nontrivial reduce ops with + // different supplementary payloads. + c10::intrusive_ptr<_SupplementBase> supplement_; +}; + +template +ReduceOp makeNCCLPreMulSum(const T& factor) { + ReduceOp rop; + rop.op_ = ReduceOp::PREMUL_SUM; + rop.supplement_ = c10::make_intrusive(factor); + return rop; +} + +constexpr auto kUnsetTimeout = std::chrono::milliseconds(-1); + +struct BroadcastOptions { + int64_t rootRank = 0; + int64_t rootTensor = 0; + std::chrono::milliseconds timeout = kUnsetTimeout; + bool asyncOp = true; +}; + +struct AllreduceOptions { + ReduceOp reduceOp = ReduceOp::SUM; + std::chrono::milliseconds timeout = kUnsetTimeout; + c10::optional sparseIndices = c10::nullopt; +}; + +struct AllreduceCoalescedOptions : AllreduceOptions {}; + +struct ReduceOptions { + ReduceOp reduceOp = ReduceOp::SUM; + int64_t rootRank = 0; + int64_t rootTensor = 0; + std::chrono::milliseconds timeout = kUnsetTimeout; +}; + +struct AllgatherOptions { + std::chrono::milliseconds timeout = kUnsetTimeout; + bool asyncOp = true; +}; + +struct GatherOptions { + int64_t rootRank = 0; + std::chrono::milliseconds timeout = kUnsetTimeout; +}; + +struct ScatterOptions { + int64_t rootRank = 0; + std::chrono::milliseconds timeout = kUnsetTimeout; + bool asyncOp = true; +}; + +struct ReduceScatterOptions { + ReduceOp reduceOp = ReduceOp::SUM; + std::chrono::milliseconds timeout = kUnsetTimeout; + bool asyncOp = true; +}; + +struct AllToAllOptions { + std::chrono::milliseconds timeout = kUnsetTimeout; +}; + +struct BarrierOptions { + std::vector device_ids; + std::chrono::milliseconds timeout = kUnsetTimeout; + c10::optional device; +}; + +struct DistributedBackendOptions { + c10::intrusive_ptr<::c10d::Store> store; + int group_rank; + int group_size; + std::chrono::duration timeout; + std::string group_id; + std::vector global_ranks_in_group; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp new file mode 100644 index 0000000000000000000000000000000000000000..953cec8a1bc36e8550b026e49ef4d2b4fab76e75 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp @@ -0,0 +1,58 @@ +#pragma once + +#ifdef USE_C10D_UCC + +#include + +namespace c10d { + +#define RECORD_COMMS_TRACE( \ + _comms_tracer, _work, _opType, _rank, _comm_size, _inTensors, _outTensors) \ + do { \ + if (torch_ucc_config.enable_comms_logger) { \ + _comms_tracer->recordComms( \ + opTypeToString(_opType), \ + (uintptr_t)_work.get(), \ + _rank, \ + _comm_size, \ + _inTensors, \ + _outTensors); \ + } \ + } while (0) + +// interfaces to collect communication traces +class TORCH_API CommTraceLogger : public torch::CustomClassHolder { + private: + std::vector comms_trace_; + std::vector curBlocks_; /* unused */ + std::vector curOutSplitSizes_; + std::vector curInSplitSizes_; + int curRoot_ = -1; + unsigned long seqnum = 0; + + public: + void setCurBlock(const std::string& name); /* unused */ + void popBlock(); /* unused */ + // record root info if applicable, e.g., broadcast, gather, scatter + void recordOptionalInfo(int root = -1); + // record input/output splits of Alltoallv + void recordOptionalInfo( + const std::vector& outputSplitSizes = {}, + const std::vector& inputSplitSizes = {}); + // record essential comms information + void recordComms( + const std::string& collName, + const uintptr_t workReq = 0, + const int rank = -1, + const int world_size = -1, + const std::vector& inputTensors = {}, + const std::vector& outputTensor = {}); + // return collected comms traces + std::vector& getCommsTrace() { + return comms_trace_; + } +}; + +} // namespace c10d + +#endif // USE_C10D_UCC diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a44e2de86ef7dc2477d59cbf221f477b00cc8370 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp @@ -0,0 +1,187 @@ +#pragma once + +#ifdef USE_C10D_UCC + +#include +#include +#include + +namespace c10d { + +// Macro to generate the error message on a non-successful UCC return value. +#define TORCH_UCC_GET_ERROR_MSG(_err, _error_msg, _result) \ + do { \ + _err = c10::str( \ + "[", \ + std::string(__FILE__), \ + ":", \ + std::to_string(__LINE__), \ + "] ", \ + logger->getLogPrefix(), \ + _error_msg, \ + ", error code ", \ + _result, \ + ": ", \ + ucc_status_string(_result), \ + ", system error code ", \ + errno); \ + } while (0) + +// Macro to throw on a non-successful UCC return value. +#define TORCH_UCC_CHECK(_cmd, _error_msg) \ + do { \ + ucc_status_t result = _cmd; \ + if (result != UCC_OK) { \ + std::string err; \ + TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \ + TORCH_CHECK(false, err); \ + } \ + } while (0) + +// Macro and throw on a non-successful UCC return value and free its request. +#define TORCH_UCC_CHECK_REQUEST(_request, _cmd, _error_msg) \ + do { \ + ucc_status_t result = _cmd; \ + if (result != UCC_OK) { \ + std::string err; \ + TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \ + if (_request != nullptr) { \ + ucc_collective_finalize(_request); \ + } \ + TORCH_CHECK(false, err); \ + } \ + } while (0) + +// Macros to print logs with unified format +#define TORCH_UCC_LOG_ERROR(_phase, _msg) \ + LOG(ERROR) << logger->getLogPrefix(_phase) << "[ERROR] " << _msg; +#define TORCH_UCC_LOG_INFO(_phase, _msg) \ + LOG(INFO) << logger->getLogPrefix(_phase) << "[INFO] " << _msg; +#define TORCH_UCC_LOG_DEBUG(_phase, _msg) \ + VLOG(1) << logger->getLogPrefix(_phase) << "[DEBUG] " << _msg; + +enum torch_ucc_phase_t { + TORCH_UCC_UNKNOWN = -1, + TORCH_UCC_INIT, + TORCH_UCC_HEALTH_CHECK, + TORCH_UCC_READY, + TORCH_UCC_COLL_POST, + TORCH_UCC_COLL_PROGRESS, + TORCH_UCC_FINALIZE, +}; + +const std::map ucc_phase_map = { + {TORCH_UCC_UNKNOWN, "UNKNOWN"}, + {TORCH_UCC_INIT, "INIT"}, + {TORCH_UCC_HEALTH_CHECK, "HEALTH_CHECK"}, + {TORCH_UCC_READY, "READY"}, + {TORCH_UCC_COLL_POST, "COLL_POST"}, + {TORCH_UCC_COLL_PROGRESS, "COLL_PROGRESS"}, + {TORCH_UCC_FINALIZE, "FINALIZE"}, +}; + +class CommTraceLogger; + +class TORCH_API ProcessGroupUCCLogger : public torch::CustomClassHolder { + public: + ProcessGroupUCCLogger(); + ProcessGroupUCCLogger(std::string log_prefix, torch_ucc_phase_t phase); + + std::string getLogPrefix(torch_ucc_phase_t phase = TORCH_UCC_UNKNOWN); + void setLogPrefix(std::string log_prefix); + inline void setPhase(torch_ucc_phase_t phase) { + local_phase = phase; + } + + void initCommsTracer(); + void flushComms(int rank, int world_size); + std::shared_ptr trace_generator = nullptr; + + protected: + std::string log_prefix; + torch_ucc_phase_t local_phase = TORCH_UCC_UNKNOWN; + bool initialized_CommTraceLogger = false; +}; + +struct torch_ucc_oob_coll_info_t { + c10::intrusive_ptr store; + uint32_t comm_id; + int rank; + int size; + void* rbuf; + size_t msglen; + std::string getKey(std::string key) { + return std::to_string(comm_id) + key; + } +}; + +class CommBase { + public: + CommBase(const c10::intrusive_ptr& logger_) + : logger(logger_) {} + virtual void progress() = 0; + virtual void free_request(ucc_coll_req_h request) = 0; + virtual ~CommBase() {} + c10::intrusive_ptr logger; +}; +class CommUCC : public CommBase { + public: + ucc_lib_h lib{nullptr}; + ucc_context_h context{nullptr}; + + public: + void progress() override; + CommUCC( + std::shared_ptr oob, + const c10::intrusive_ptr& logger); + void free_request(ucc_coll_req_h request) override; + ~CommUCC(); +}; + +ucc_status_t oob_allgather( + void* sbuf, + void* rbuf, + size_t msglen, + void* coll_info, + void** req); + +ucc_status_t oob_allgather_test(void* req); + +ucc_status_t oob_allgather_free(void* req); + +// trim: remove spaces before and after the string view +// implementation borrowed from https://stackoverflow.com/a/17976541 +inline c10::string_view trim(c10::string_view s) { + auto wsfront = std::find_if_not( + s.begin(), s.end(), [](int c) { return std::isspace(c); }); + auto wsback = std::find_if_not(s.rbegin(), s.rend(), [](int c) { + return std::isspace(c); + }).base(); + return ( + wsback <= wsfront ? "" : s.substr(wsfront - s.begin(), wsback - wsfront)); +} + +inline std::string tolower(c10::string_view s) { + std::string result; + result.reserve(s.size()); + for (auto c : s) { + result.push_back(std::tolower(c)); + } + return result; +} + +inline std::vector parse_list(std::string list) { + std::vector result; + list = tolower(trim(list)); + while (!list.empty()) { + const auto end_pos = list.find_first_of(','); + const auto token = trim(list.substr(0, end_pos)); + result.push_back(std::string(token)); + list = (end_pos != c10::string_view::npos) ? list.substr(end_pos + 1) : ""; + } + return result; +} + +} // namespace c10d + +#endif // USE_C10D_UCC diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ffce091b6c5f0841de4ca514911bc64ed3ce30d5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace c10d { +namespace tcputil { + +#define CONNECT_SOCKET_OFFSET 2 + +inline int poll(struct pollfd* fds, unsigned long nfds, int timeout) { + return ::poll(fds, nfds, timeout); +} + +inline void addPollfd( + std::vector& fds, + int socket, + short events) { + fds.push_back({.fd = socket, .events = events}); +} + +inline struct ::pollfd getPollfd(int socket, short events) { + struct ::pollfd res = {.fd = socket, .events = events}; + return res; +} + +} // namespace tcputil +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9b2b1aa245f841eac7d61f2238bf7a8385846612 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace c10d { +namespace tcputil { + +#define CONNECT_SOCKET_OFFSET 1 + +inline int poll(struct pollfd* fdArray, unsigned long fds, int timeout) { + return WSAPoll(fdArray, fds, timeout); +} + +inline void addPollfd( + std::vector& fds, + int socket, + short events) { + fds.push_back({(SOCKET)socket, events}); +} + +inline struct ::pollfd getPollfd(int socket, short events) { + struct ::pollfd res = {(SOCKET)socket, events}; + return res; +} + +} // namespace tcputil +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp new file mode 100644 index 0000000000000000000000000000000000000000..50c6ae03861b73c6e238fd804560800a6ee048fa --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp @@ -0,0 +1,161 @@ +#pragma once + +#include +#include +#include + +constexpr auto kNoTimeout = std::chrono::milliseconds(0); + +namespace c10d { + +constexpr const char* const kSeqNumStoreKey = "SEQ_NUM_STORE_KEY"; + +enum class OpType : std::uint8_t { + BROADCAST = 0, + ALLREDUCE = 1, + ALLREDUCE_COALESCED = 2, + REDUCE = 3, + ALLGATHER = 4, + _ALLGATHER_BASE = 5, + ALLGATHER_COALESCED = 6, + GATHER = 7, + SCATTER = 8, + REDUCE_SCATTER = 9, + ALLTOALL_BASE = 10, + ALLTOALL = 11, + SEND = 12, + RECV = 13, + RECVANYSOURCE = 14, + BARRIER = 15, + _REDUCE_SCATTER_BASE = 16, + COALESCED = 17, + _ALLREDUCE_SPARSE = 18, + UNKNOWN = 100, +}; + +// Converts OpType to human readable string. +TORCH_API std::string opTypeToString(OpType opType); + +// Whether or not an OP is an p2p op (SEND, RECV, RECVANYSOURCE) +TORCH_API bool isP2POp(OpType opType, bool batchP2P = false); + +// Please do not use Work API, it is going away, to be +// replaced by ivalue::Future. +// Python binding for this class might change, please do not assume +// this will be bound using pybind. +class TORCH_API Work : public torch::CustomClassHolder { + public: + Work( + int rank = -1, + OpType opType = OpType::UNKNOWN, + const char* profilingTitle = nullptr, + const c10::optional>& inputTensors = + c10::nullopt); + + ~Work() override; + + // Checks if request has completed. Non-blocking operation. + virtual bool isCompleted(); + + // Returns if the work completed successfully. + // If false, the exception function can be called to get details. + virtual bool isSuccess() const; + + // Returns exception if isSuccess() returned false. + virtual std::exception_ptr exception() const; + + // Returns source rank if this objects represents a recv-from-any. + virtual int sourceRank() const; + + // Returns result tensors, if applicable. + // If work is not supposed to have result, we return empty list. + virtual std::vector result(); + + // Ensures that operations on the output tensors that are invoked + // after this function returns are correctly sequenced after the + // asynchronous completion of this work. + // + // For CUDA tensors, it inserts stream synchronization such that + // the streams of the caller wait for completion of the + // asynchronous operations on the destination tensors. + // + // For CPU tensors, it is currently a nop. + // + // This function should only be used if the caller polls for + // completion through the `isCompleted` function, it has returned + // true, and the `isSuccess` function also has returned true. + // + virtual void synchronize(); + + // Waits until request completes. Blocking operation. + // Throws if the work completed with an exception. + // Returns false if the work is aborted. + // Otherwise, it always returns true, indicating the work is completed. + // + // Functionally equivalent to: + // + // while (!isCompleted()) { /* nop */ } + // auto success = isSuccess(); + // if (!success) { std::rethrow_exception(exception()); } + // return success; + // + virtual bool wait(std::chrono::milliseconds timeout = kNoTimeout); + + virtual void abort(); + + // Returns a Future object that will be associated with the completion of + // work. Only NCCL backend is currently supported. + virtual c10::intrusive_ptr getFuture(); + + virtual float getDuration() const; + + virtual uint64_t getSequencenumber() const; + + OpType retrieveOpType() const; + + static c10::intrusive_ptr create_from_future( + const c10::intrusive_ptr&); + + protected: + // Completes the work object and optionally sets the exception in a + // thread-safe manner. Notifies all waiting condition variables as well. + void finish(std::exception_ptr exception = nullptr); + + // Similar to finish, but throws an exception if one is already set or + // provided by the user. + void finishAndThrow(std::exception_ptr exception); + + mutable std::mutex mutex_; + std::condition_variable cv_; + bool completed_ = false; + std::exception_ptr exception_; + + // Current rank of the node. + const int rank_; + + // Operation type that this work object refers to. + OpType opType_; + + // When profiling, the callback to record end of operation event. This + // callback needs to be called when collective operation is complete. + std::function recordFunctionEndCallback_; +}; + +struct TORCH_API WorkInfo { + WorkInfo( + const OpType& opType, + const std::chrono::time_point& timeStarted, + const std::chrono::time_point& timeFinished, + const std::chrono::duration& activeDuration) + : opType(opType), + timeStarted(timeStarted), + timeFinished(timeFinished), + activeDuration(activeDuration) {} + + OpType opType; + std::chrono::time_point timeStarted; + std::chrono::time_point timeFinished; + std::chrono::duration activeDuration; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h new file mode 100644 index 0000000000000000000000000000000000000000..5151a33f7ee351184e53daa68155dcc6c7390358 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace distributed { +namespace c10d { + +PyMethodDef* python_functions(); + +} // namespace c10d +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ee8db21c172a4d0dd3febc39bb9bb5021caee5f8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp @@ -0,0 +1,140 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10d { + +// Broadcast many tensors to all processes in the process group. +TORCH_API void broadcast_coalesced( + const c10::intrusive_ptr& process_group, + at::TensorList tensors, + size_t buffer_size, + int rank = 0); + +// This class passes bucket contents tensor to DDP communication hook. +class TORCH_API GradBucket { + public: + explicit GradBucket( + size_t index, + size_t bucket_count, + at::Tensor tensor, + std::vector offsets, + std::vector lengths, + std::vector sizes_vec, + std::vector parameters, + c10::optional sparse_grad_indices) + : index_(index), + bucket_count_(bucket_count), + buffer_(std::move(tensor)), + offsets_(std::move(offsets)), + lengths_(std::move(lengths)), + sizes_vec_(std::move(sizes_vec)), + parameters_(std::move(parameters)), + sparse_grad_indices_(std::move(sparse_grad_indices)) {} + + // Returns the index of the bucket, which is unique across all the buckets. + size_t getIndex() const { + return index_; + } + + const at::Tensor& getBuffer() const { + return buffer_; + } + + // Returns a mutable buffer compared with the above method. + at::Tensor& getBufferRef() { + return buffer_; + } + + // Overwrites the buffer at a specific index. + void setBuffer(at::Tensor& buffer) { + buffer_ = buffer; + } + + // Each tensor in the list that getGradients corresponds to a + // parameter. + std::vector getGradients() const; + + // Returns model parameters belonging to this bucket. They are returned in the + // same order as gradient tensors via getGradients(). For example, + // getParameters[i] will have its gradient stored in + // getGradients[i] + const std::vector getParameters() const { + return parameters_; + } + + // Returns whther this bucket is the last bucket to allreduce in an iteration. + bool isLast() const { + return index_ == bucket_count_ - 1; + } + + c10::optional& getSparseGradIndices() { + return sparse_grad_indices_; + } + + private: + size_t index_; + size_t bucket_count_; + at::Tensor buffer_; + + // Per-variable info in buffer_. + std::vector offsets_; + std::vector lengths_; + std::vector sizes_vec_; + + // Model parameters for this bucket. + const std::vector parameters_; + + // Predefined sparse indices for this bucket (only used for sparse tensors). + // The gradients will be updated to have indices with these tensor values + c10::optional sparse_grad_indices_; +}; + +// Base class of both `PythonCommHook` and `CppCommHook`. +// Requires implementing 1) `runHook` method that communicates gradients +// asynchronously, and 2) `parseHookResult` method that converts the hook +// result into a tensor. +class TORCH_API CommHookInterface { + public: + virtual ~CommHookInterface() = default; + + // Passes the input grad bucket to the registered communication hook. + // Once the tensor in the bucket are ready, kicks off the hook asynchronously + // and returns a future that holds the communication results. + virtual c10::intrusive_ptr runHook( + GradBucket& bucket) = 0; + + // Returns the resulting tensor once the communication hook result is + // ready. The resulting tensor will then be copied to the grads of + // individual parameters. + virtual at::Tensor parseHookResult(const c10::IValue& result) = 0; +}; + +namespace detail { +// This helper function is called both by CppCommHookInterface below and inside +// reducer. +TORCH_API at::Tensor parseCppCommHookResult(const c10::IValue& result); +} // namespace detail + +// This CppCommHook interface only requires implementing runHook method that +// potentially uses a state. +template +class CppCommHookInterface : public CommHookInterface { + public: + explicit CppCommHookInterface(T state) : state_(std::move(state)) {} + + ~CppCommHookInterface() override = default; + + at::Tensor parseHookResult(const c10::IValue& result) override { + return detail::parseCppCommHookResult(result); + } + + protected: + T state_; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp new file mode 100644 index 0000000000000000000000000000000000000000..683841f3ba885f96c94d688190fc530a88e01003 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp @@ -0,0 +1,52 @@ +#pragma once + +#include +#include + +namespace c10d { + +enum class BuiltinCommHookType { + ALLREDUCE = 1, + FP16_COMPRESS = 2, +}; + +class AllReduceCommHook + : public CppCommHookInterface> { + public: + explicit AllReduceCommHook(const c10::intrusive_ptr& state) + : CppCommHookInterface>(state) {} + + ~AllReduceCommHook() override = default; + + c10::intrusive_ptr runHook(GradBucket& bucket) override; +}; + +class FP16CompressCommHook + : public CppCommHookInterface> { + public: + explicit FP16CompressCommHook(const c10::intrusive_ptr& state) + : CppCommHookInterface>(state) {} + + ~FP16CompressCommHook() override = default; + + c10::intrusive_ptr runHook(GradBucket& bucket) override; +}; + +// Almost same as AllReduceCommHook, but without division inside the hook. +// This enables the optimization of fusing copy and division and saves one scan +// over all the input parameters, when no communication hook is provided by the +// user. Only used internally and not released as a public built-in +// communication hook. +class _AllReduceBySumCommHook + : public CppCommHookInterface> { + public: + explicit _AllReduceBySumCommHook( + const c10::intrusive_ptr& state) + : CppCommHookInterface>(state) {} + + ~_AllReduceBySumCommHook() override = default; + + c10::intrusive_ptr runHook(GradBucket& bucket) override; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h new file mode 100644 index 0000000000000000000000000000000000000000..a00b6f70653aaa8d4456033800c5dc69942e3b03 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h @@ -0,0 +1,33 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +#include +#include + +// Utility macro similar to C10_THROW_ERROR, the major difference is that this +// macro handles exception types defined in the c10d namespace, whereas +// C10_THROW_ERROR requires an exception to be defined in the c10 namespace. +#define C10D_THROW_ERROR(err_type, msg) \ + throw ::c10d::err_type( \ + {__func__, __FILE__, static_cast(__LINE__)}, msg) + +namespace c10d { + +using c10::DistNetworkError; + +class TORCH_API SocketError : public DistNetworkError { + using DistNetworkError::DistNetworkError; +}; + +class TORCH_API TimeoutError : public DistNetworkError { + using DistNetworkError::DistNetworkError; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5ae305ce4eb9f1347ef710ddd7c79f2c55865566 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp @@ -0,0 +1,104 @@ +#include +#include + +#include + +namespace c10d { + +class TORCH_API Logger { + public: + explicit Logger(std::shared_ptr reducer); + // Set logging data that can be got during DistributedDataParallel + // construction time. + void set_construction_data_and_log( + const std::string& module_name, + const std::vector& device_ids, + int output_device, + bool broadcast_buffers, + bool has_sync_bn, + bool static_graph); + + void set_static_graph(); + + // An interface for users to get DDPLoggingData and log them + // in the applications. Explanation of logging fields are in + // "struct DDPLoggingData" of "torch/c10/util/Logging.h". + at::DDPLoggingData get_ddp_logging_data(); + + // Stream insertion operator for logging data to stream under + // TORCH_DISTRIBUTED_DEBUG. + friend std::ostream& operator<<(std::ostream& output, const Logger& logger); + + ~Logger() noexcept(false) { + // Log if DDP graph is static in Logger dtor instead of Reducer dtor since + // Logger is deleted before Reducer. + log_if_graph_static(reducer_->ddp_graph_static()); + } + + // Set environment variables. + void set_env_variables(); + // Set parameters stats. + void set_parameter_stats(); + // Get size of each bucket (Bytes). + std::vector get_bucket_sizes(); + // Get variable indices for each bucket. + std::vector> get_per_bucket_variable_indices(); + // Set comm. hook, if used + void set_comm_hook(const std::string& hook); + // Set running with uneven input detection (model.join() context manager) + void set_uneven_input_join(); + + // Reset performance stats at current iteration + void reset_performance_stats(); + + // Calculate avg stats using cpu timer and gpu timer + // that has been recorded in reducer. + void calculate_avg_time( + int64_t& avg_time, + int64_t& time_duration, + Timer& timer, + Timer::Event start_event, + Timer::Event end_event); + + // Set the absolute time of the event that has been recorded in reducer. + void set_event_time(int64_t& event_time, Timer& timer, Timer::Event event); + // Set stats that can be collected only during + // training loop. It is called at the beginning of forward call + // to record the run time stats of sampled iterations that previously ran. + // GPU performance stats are collected only for single process + // single device program and single device module right now. + // TODO to support single process multiple devices and multi device modules, + // events need to be created and recorded on multiple devices. + void set_runtime_stats_and_log(); + + // Called when DDP/reducer is failing with an error. The + // logging data structure will have two fields filled: "has_error" indicating + // that this iteration encountered an error and other fields are not valid, + // and "error", a string which contains the error message that DDP failed + // with. + template + void set_error_and_log(const std::string& ddp_error, const Args&... args) { + ddp_logging_data_->ints_map["has_error"] = 1; + auto err = c10::str(ddp_error, args...); + ddp_logging_data_->strs_map["error"] = err; + // Report the iteration we are erroring at so user knows how many examples + // successfully processed before this error was hit. + ddp_logging_data_->ints_map["iteration"] = reducer_->num_iterations_; + at::LogPyTorchDDPUsage(*ddp_logging_data_); + } + + // When running without static graph, called when reducer is destroyed to log + // if graph was actually static and is a candidate for static graph + // optimization. + void log_if_graph_static(bool is_static); + + private: + // ddp_logging_data_ is used to hold all the ddp related logging + // data fields. + std::unique_ptr ddp_logging_data_; + std::shared_ptr reducer_; + // track the number of iterations when runtime stats are collected so far. + long num_iterations_stats_recorded_ = 0; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..b4df02e773807ee8786570cf0044f0d3615ff592 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h @@ -0,0 +1,51 @@ +// Copyright (c) Meta Platforms, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +#include +#include +#include + +namespace c10d { +namespace detail { + +enum class LogLevel { Trace, Debug, Info, Warning, Error }; + +TORCH_API bool isLogLevelEnabled(LogLevel level) noexcept; + +template +std::string formatLogMessage(fmt::string_view fmt, T&&... args) { + return fmt::vformat(fmt, fmt::make_format_args(args...)); +} + +} // namespace detail +} // namespace c10d + +#define C10D_ERROR(...) \ + LOG_IF( \ + ERROR, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Error)) \ + << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__) + +#define C10D_WARNING(...) \ + LOG_IF( \ + WARNING, \ + c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Warning)) \ + << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__) + +#define C10D_INFO(...) \ + LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Info)) \ + << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__) + +#define C10D_DEBUG(...) \ + LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Debug)) \ + << "[c10d - debug] " << c10d::detail::formatLogMessage(__VA_ARGS__) + +#define C10D_TRACE(...) \ + LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Trace)) \ + << "[c10d - trace] " << c10d::detail::formatLogMessage(__VA_ARGS__) diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h new file mode 100644 index 0000000000000000000000000000000000000000..48ad7cefae9418ffc989a334aa8b2636ec110219 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h @@ -0,0 +1,34 @@ +#pragma once + +#include + +#include +#include +#include +#include + +namespace c10d { + +class TORCH_PYTHON_API PythonCommHook : public CommHookInterface { + public: + // Takes a state and a callable hook. The inputs are Python objects. + // The state is passed to the hook in runHook method, and it can be used to + // maintain and update any state information during the execution of the hook. + // The hook performs user-specified processing and returns a future indicating + // asychronous communication of gradients. + PythonCommHook(py::object state, py::object hook) + : state_(std::move(state)), hook_(std::move(hook)) {} + + ~PythonCommHook() override; + + c10::intrusive_ptr runHook(GradBucket& bucket) override; + + at::Tensor parseHookResult(const c10::IValue& result) override; + + private: + // Only needed for stateful communication. + py::object state_; + py::object hook_; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..43782204be05496e52b2e7f0415847d17f12b1a5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp @@ -0,0 +1,589 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef _WIN32 +#include +#endif + +namespace c10d { + +constexpr int kDefaultFirstBucketBytes = int(1024 * 1024); +constexpr int kDefaultBucketBytesCap = int(25 * 1024 * 1024); +// Collect runtime stats once for every kDDPRuntimeLoggingSampleRate iterations. +constexpr int kDDPRuntimeLoggingSampleRate = 100; + +// Forward declaration +class Logger; + +// Local accumulator type for a single bucket. +struct BucketAccumulator { + std::vector indices; + size_t size = 0; + size_t size_limit = 0; +}; + +class TORCH_API Reducer { + public: + // The constructor takes a list of variables (i.e. parameters) for this + // process's single model replica (as DDP assumes single-process + // single-device). The bucket assignment for this reducer, `bucket_indices`, + // is specified as a list of buckets, each of which is specified as a list of + // indices into the bucket's `variables` list. + explicit Reducer( + std::vector params, + std::vector> bucket_indices, + std::vector per_bucket_size_limits, + c10::intrusive_ptr process_group, + std::vector expect_sparse_gradients, + int64_t bucket_bytes_cap, + bool find_unused_parameters, + bool gradient_as_bucket_view, + std::unordered_map param_names, + int64_t first_bucket_bytes_cap); + + ~Reducer() noexcept(false); + + // To (re-)initialize bucket assignment, pass a list of buckets, each of + // which is specified by a list of indices in the bucket's `variables` list. + // This function performs validation that the variables within a bucket + // all live on the same device and have the same dimensionality. + void initialize_buckets(std::vector> bucket_indices); + + void autograd_hook(size_t index); + + // This function is called when the forward function has produced an output, + // and the user wishes to reduce gradients in the backwards pass. + // If they don't, and wish to accumulate gradients before reducing them, + // a call to this function can simply be omitted. + void prepare_for_backward(const std::vector& outputs); + + // Called at the beginning of forward() inside DistributedDataParallel, + // right now it captures the starting time of forward in each iteration. + void prepare_for_forward(); + + // Returns the relative time in nanoseconds when gradients were ready, + // with respect to the time `prepare_for_backward` was called. The + // vector is for parameters for a single model replica. + std::vector get_backward_stats() const { + return backward_stats_; + } + + // Registers a hook to the reducer. The hook is `CommHookInterface` + // type to allow both Python and CPP hooks. This function can only + // be called once before calling backward. + // Cannot combine with the call of `register_builtin_comm_hook`. + void register_comm_hook(std::unique_ptr iface); + + // Registers a built-in C++ comm hook to the reducer. This function can only + // be called once before calling backward. + // Cannot combine with the call of `register_comm_hook`. + void register_builtin_comm_hook(c10d::BuiltinCommHookType comm_hook_type); + + // Informs reducer that optimizer is running in backward, so gradients + // don't need to be copied from buckets as the optimizer would've already + // been applied. + void set_optimizer_in_backward() { + optim_in_backward_ = true; + }; + + // Runs allreduce or installed communication hook given GradBucket instance. + c10::intrusive_ptr run_comm_hook( + GradBucket& grad_bucket); + + // Runs default allreduce hook. + c10::intrusive_ptr run_allreduce_hook( + GradBucket& grad_bucket); + + // Returns gradient buckets in sequential order of buckets_. This is the order + // in which buckets are reduced across processes. If return_zero_tensors=true, + // will return zero tensors of the same shape instead of the true tensors. + std::vector get_grad_buckets( + bool return_zero_tensors = true) const; + + // Rebuild buckets based on rebuilt_params_ and rebuilt_param_indices_ + // according to when tensors received grads in the backward pass. + // TODO this function makes broadcast communication call and + // could be overlapped with next forward() call, thus + // it could be async. Will make it async when rebuilding buckets for + // find_unused_parameters = true case, as we could rebuild buckets more than + // once for find_unused_parameters = true case, where subgraphs are trained + // and parameter indices order may change more frequently. + // For find_unused_parameters = false case, buckets are only rebuilt once, + // the performance cost is negligible. Returns true if the buckets were + // rebuilt. + bool rebuild_buckets(); + + void setSparseMetadata(std::map& metadata); + + // Install futures that should be awaited at end of backwards. Currently these + // are only used by user-defined custom buffer reduction hooks, but can be + // generalized to any user-originating futures that need to be awaited. + void install_futures(c10::List> futs); + + // Returns true if we should rebuild buckets, else false. We only rebuild + // buckets once after the first iteration and never rebuild them if + // find_unused_parameters_. + inline bool should_rebuild_buckets() const { + return (static_graph_ || !find_unused_parameters_) && !has_rebuilt_bucket_; + } + + // Pushes all parameters to be rebuilt. + void push_rebuilt_params_for_all_indices(); + + // Creates and sets ForwardPassWorkHandle given a Work and the + // corresponding tensor being reduced. + void set_forward_pass_work_handle( + c10::intrusive_ptr forwardPassWorkHandle, + bool useStaticWorldSize); + + // Retrieve on-device tensors used to track locally unused parameters. It is + // a tensor where index i = 1 if the Variable with that index has been used. + at::Tensor get_local_used_map_on_device() const; + + // An function for users to set sample_rate of collecting + // runtime stats. The time stats will be recorded for the + // first 10 iterations, after 10 iterations time stats will be + // recorded once every "sample_rate" training iterations. + void set_ddp_runtime_logging_sample_rate(int sample_rate); + + // Specify the training graph is static. + void set_static_graph(); + + // Delay all reduce to be after all gradients' calculation is complete. + void delay_all_reduce(); + + void set_mixed_precision_param_dtype(c10::ScalarType dtype); + + // Weak reference to associated DDP logger. The reference is weak to avoid + // refcycle between reducer and logger. + void set_logger(std::weak_ptr logger); + + // When graph is not explicitly set by user as static and has unused + // parameters, this will return whether the graph has been static until the + // current iteration, which means unused params set has not changed. + bool ddp_graph_static(); + + // Removes autograd hooks registered by the Reducer on the model parameters. + void remove_autograd_hooks(); + + // Checks whether or not the reducer has finalized the current backward + // iteration. + void check_finalized(); + + // Updates the underlying process group used by DDP with the new process + // group. + void update_process_group( + c10::intrusive_ptr new_process_group); + + // Resets reducer state. + void reset_state(); + + protected: + // Forward declaration. + struct Bucket; + + void push_rebuilt_params(const size_t& index); + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + mutable std::mutex mutex_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const std::vector params_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + c10::intrusive_ptr<::c10d::ProcessGroup> process_group_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector expect_sparse_gradients_; + + std::vector> + grad_accumulators_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes) + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::unordered_map gradAccToVariableMap_; + std::vector>> + hooks_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes) + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool expect_autograd_hooks_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool require_finalize_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + size_t next_bucket_; + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool has_marked_unused_parameters_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const bool find_unused_parameters_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const bool gradient_as_bucket_view_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector unused_parameters_; + // Previous iteration's unused params, used for checking if unused parameters + // change between iterations. Only filled during the first backwards call. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector prev_iteration_unused_parameters_; + // Whether graph is static or not. When user does not explicitly set static + // graph, the only possible dynamism is set of unused parameters changing + // between iterations which is tracked by this flag. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool ddp_graph_static_{true}; + // Locally used parameter maps indicating if parameters are used locally + // during the current iteration or no_sync session if no_sync is on. + // Each map is a one-dim int32 tensor of number of parameters. These tensors + // are marked in autograd_hook to indicate the corresponding param has been + // used, and get allreduced in the end of backward step of current iteration + // or no_sync session for figuring out the globally unused parameters. + // + // local_used_map_: CPU tensor for bookkeeping locally used params + // local_used_map_dev_: dev tensor for reducing globally unused params + at::Tensor local_used_map_; + at::Tensor local_used_map_dev_; + // Indicate that reduction is done and D2H copy is done as well. + bool local_used_map_reduced_; + + // Weak pointer to associated DDP logger. + std::weak_ptr logger_; + // List of futures installed by Reducer::install_futures that should be + // awaited at the end of backwards pass. + c10::optional>> + installed_futures_{c10::nullopt}; + // Mixed precision parameter dtype for bucket type checking. + c10::optional mixed_precision_param_dtype_{c10::nullopt}; + + // Work handle for allreduce on local_used_map_ + c10::intrusive_ptr local_used_work_; + + void mark_variable_ready_dense(size_t variable_index); + + void mark_variable_ready_sparse(size_t variable_index); + + void mark_variable_ready(size_t variable_index); + + void mark_bucket_ready(size_t bucket_index); + + void finalize_bucket_dense(Bucket& bucket); + + void finalize_backward(); + + // Returns list of model parameters corresponding to the given bucket. + // bucket_index is a key to cache after buckets are rebuilt, after which this + // mapping never changes. + std::vector get_variables_for_bucket( + size_t bucket_index, + const Bucket& bucket) const; + + // Asserts that the reduction for the previous iteration has finished before + // rebuilding buckets or kicking off the next one. + void ensure_prior_reduction_finished(); + + // Broadcast rebuilt buckets from rank 0 to other ranks before initializing + // the buckets + void sync_bucket_indices(std::vector>& bucket_indices); + + // We'd like to use DistAutogradContext::GradCallback here but dist autograd + // doesn't exist under Windows. So we just directly use the concrete type but + // to preserve and enforce our original intent we do a static assert when dist + // autograd is available. + using GradCallback = std::function; +#ifndef _WIN32 + static_assert( + std::is_same< + GradCallback, + torch::distributed::autograd::DistAutogradContext::GradCallback>:: + value, + ""); +#endif + void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb); + + // This function is called inside `initialize_buckets()`. It initializes both + // `bucket_views_in` and `bucket_views_out` with views for each variable's + // gradient into the bucket's flattened `gradients` tensor. Views serve as + // entry points to `copy_()` each grad's data in/out of the flattened + // `gradients` tensor. + void initialize_bucket_views(Bucket& bucket); + + // This function is called inside `finalize_backward`, it happens only if + // DDP communication hook was registered to recreate just bucket_views_out + // with the result of `future_work`. + void populate_bucket_views_out(Bucket& bucket, at::Tensor& tensor); + + // If gradient_as_bucket_view_ is false, after allreduce buckets, + // copy bucket results back to grads. + void copy_bucket_to_grad( + at::Tensor& variable, + Reducer::Bucket& bucket, + size_t intra_bucket_index, + bool global_unused); + // Check layout of grad and bucket_view before copying the grad to bucket. + void check_grad_layout(const at::Tensor& grad, const at::Tensor& bucket_view); + + // A bucket contains [1..N] gradients to be reduced, where the gradients + // have the same dtype and device. + // Coalescing gradients together before reducing can result in lower overhead + // and/or faster time to completion. Coalescing requires the constituent + // gradients to have the same dtype and device, and the resulting flattened + // tensor uses that common dtype and device. The flattened tensor is filled + // as the corresponding gradients are computed (triggered by autograd hooks), + // and the buckets are reduced in a predetermined order consistent across + // processes. + struct Bucket { + // Gradients of the bucket flattened into a 1-dimensional tensor + at::Tensor gradients; + + // Views into the `gradients` tensor for each individual gradient + // Each view is created with layout (size and stride) matching the + // gradient's expected layout (see the "Gradient Layout Contract" in + // torch/csrc/autograd/functions/accumulate_grad.h). + // `bucket_views_in[i].copy_(grad)` and `grad.copy_(bucket_views_out[i])` + // provide convenient ways to copy gradient data in/out of `gradients`, + // respectively. + // We keep both `bucket_views_in` and `bucket_views_out` because + // registering a DDP communication hook may re-initialize + // `bucket_views_out` with the value of the hook's `future_work` but we + // still need separate views into the bucket's original flattened gradient + // to copy in gradient data. + std::vector bucket_views_in; + std::vector bucket_views_out; + + // Variables whose gradients are held in this bucket + // We use refcounted tensors here so that we can easily unflatten the + // bucket's flattened `gradients` tensor into the participating variables + // after reduction has completed. + std::vector variables; + + // Per-variable offset/length into the flattened `gradients` tensor and + // the corresponding `GradBucket` instance for communication hooks + std::vector offsets; + std::vector lengths; + + // Per-variable sizes slicing into the bucket's `gradients` tensor + std::vector sizes_vec; + + // Number of gradients left to be computed before the bucket is ready to + // be reduced + size_t pending; + + // Global indices of participating variables in the bucket + std::vector variable_indices; + + // Future work handle for DDP communication hook + // If no hook is registered, a temporary vanilla allreduce hook is used. + c10::intrusive_ptr future_work; + + // If this bucket should expect a single sparse gradient + // If `true`, then this implies that `bucket.variables.size() == 1`. + bool expect_sparse_gradient = false; + + // Sparse indices tensor + c10::optional sparse_tensor_indices = c10::nullopt; + + // TODO(@pietern) + // Memory copies from gradient tensors into the bucket are potentially + // done on different CUDA streams. We record an event for every copy + // so that we can synchronize with them prior to kicking off the reduction. + // std::vector events; + }; + + std::vector buckets_; + + // A variable locator locates a particular variable in the reducer's buckets + struct VariableLocator { + // Index of the bucket containing the variable in the `buckets_` vector + size_t bucket_index; + // Index of the variable in the bucket, which may be used consistently + // across `bucket_views_in`, `bucket_views_out`, `variables`, `offsets`, + // `lengths`, `sizes_vec`, and `variable_indices` in `Bucket` + size_t intra_bucket_index; + + VariableLocator() = default; + + VariableLocator(size_t bucket_index_, size_t intra_bucket_index_) + : bucket_index(bucket_index_), + intra_bucket_index(intra_bucket_index_) {} + }; + + // Map the index of a variable to its location in the bucket structure. + std::vector variable_locators_; + + // track the number of iterations to synchronize grads in training so far. + long num_iterations_; + // track distinct iteration of backward call. This is distinct from + // num_iterations_, for example in the case of multiple forward before + // backward. + long num_bwd_calls_; + // whether the first autograd hook for a distinct backward pass has been + // called. + bool first_autograd_hook_called_; + // track the number of buckets that have been ready for + // communication calls like allReduce or communication hooks. + int num_buckets_ready_; + + // Timing information. + int64_t backward_compute_start_time_ = -1; + std::unique_ptr timer_; + + // We collect the relative timestamp of every gradient being ready + // when executing autograd. This can be used to derive a timeline of + // the point in time buckets were ready, or ideal bucket assignment/ordering. + std::vector backward_stats_; + + bool should_collect_runtime_stats(); + void record_forward_compute_start_time(); + void record_backward_compute_start_time(); + void record_backward_compute_end_time(); + void record_backward_comm_start_time(); + void record_backward_comm_end_time(); + + int get_ddp_runtime_logging_sample_rate(); + int ddp_runtime_logging_sample_rate_ = kDDPRuntimeLoggingSampleRate; + + bool is_multi_device_module_ = false; + + // Following variables are to help build dynamic bucket order + bool has_rebuilt_bucket_; + std::vector rebuilt_params_; + std::vector rebuilt_param_indices_; + const int64_t bucket_bytes_cap_; + +#ifndef _WIN32 + struct RpcContext { + using ContextPtr = torch::distributed::autograd::ContextPtr; + // The shared_ptr is to hold the context instance. + ContextPtr context_ptr_holder; + std::atomic context_ptr{nullptr}; + + void set(ContextPtr&& new_context_ptr); + }; + RpcContext rpc_context_; +#endif + + // A struct containing work handle and tensor for allreduce scheduled in + // forward pass, if applicable. + struct ForwardPassAllreduceWork { + c10::intrusive_ptr workHandle; + at::Tensor resultTensor; + // whether we should divide by the initial world_size or the no. of + // remaining DDP ranks. + bool useStaticWorldSize; + }; + + // Handle for the currently scheduled allreduce in the forward pass, if + // applicable. + ForwardPassAllreduceWork forwardPassWorkHandle_; + + // Division factor for reduction of gradients. + // Equal to the process group size, with an exception of handling uneven + // input. + int div_factor_; + + bool static_graph_; + + // Key: size_t (index), Value: the number of times that a variable's + // autograd_hook() should be triggered before marking this variable's grad as + // ready for communication. Map will not change after 1st iteration. + std::unordered_map numGradHooksTriggeredMap_; + // Key: size_t (index), Value: the number of times that a variable's + // autograd_hook() are left to be triggered before marking this variable's + // grad as ready for communication. Map will change after 1st iteration to + // track a grad is ready for communication or not. + std::unordered_map numGradHooksTriggeredMapPerIteration_; + + private: + // reset counting for buckets before backward starts + void reset_bucket_counting(); + // search unused parameters beore backward starts + void search_unused_parameters( + const std::vector& outputs); + void set_divide_factor(); + // kick off all reduce for the ready bucket + void all_reduce_bucket(Bucket& bucket); + // kick off all reduce to local used map, it can help find global unused + // parameters + void all_reduce_local_used_map(); + // initialize locally used parameter maps + void initialize_local_used_map(); + // get current cuda stream + const c10::Stream get_current_stream(); + bool dynamic_graph_find_unused(); + bool static_graph_first_iteration(); + bool static_graph_after_first_iteration(); + + // comm_hook_ is used to access the DDP communication hook if registered. + std::unique_ptr comm_hook_; + + // Sparse metadata contains the indices that will be used + // when calling into sparse allreduce. + // This is only used in the sparse allreduce collective calls + std::unique_ptr> sparse_metadata_; + + // Debug level setting. It is parsed once when Reducer is constructed, and + // remains the same across a single invocation of DDP training. + DebugLevel ddp_debug_level_; + // Mapping of variable index to fully qualified name of model to notify users + // about errors when certain parameters do not get gradient. + std::unordered_map param_names_; + // Variable indices stored sequentially in order of when the gradient is ready + // for the current backwards pass. + std::vector grad_ready_order_indices_; + // Bytes capacity of first bucket, can be configured by user + int64_t first_bucket_bytes_cap_; + // Per iteration set of parameter indices that have been marked ready. + std::unordered_set perIterationReadyParams_; + // Retrieves parameter names that have not been marked as ready as part of + // previous iteration. + std::vector getUnmarkedParamsForIteration(); + // Retrieves parameter indices that have not been marked as ready as part of + // previous iteration. + std::vector getUnmarkedParamIndicesForIteration(); + // Raises appropriate error if mark_variable_ready is called on the same + // variable twice, which is unexpected. + void checkAndRaiseMarkedTwiceError(size_t curVariableIndex); + // Retrieves parameter corresponding to the given VariableIndex. + at::Tensor& get_param_from_index(size_t index); + + // Cached bucket index to model parameter mapping. Populated after buckets + // are rebuilt after which this mapping is static. + mutable std::unordered_map> + cached_variables_for_bucket_; + + bool optim_in_backward_{false}; + friend class Logger; +}; + +// This is equivalent to take_tensors but returns indices into the +// tensor list argument for bucket assignment. Also, it is aware +// of device placement and will not allow buckets to span devices. +// The index of tensors[i] assigned to bucket is tensor_indices[i], +// when tensor_indices is empty, the index of tensors[i] assigned to +// bucket is i. +TORCH_API std::tuple>, std::vector> +compute_bucket_assignment_by_size( + const std::vector& tensors, + const std::vector& bucket_size, + const std::vector& expect_sparse_gradient = {}, + const std::vector& tensor_indices = {}, + const c10::optional>& logger = {}); + +// Verify models across all processes are the same as model on rank 0 with +// respect to no. of params and matching dtype/size/layout. +TORCH_API void verify_params_across_processes( + const c10::intrusive_ptr& process_group, + const std::vector& params, + const c10::optional>& logger); +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..acd8975c4d2db13cac2e988238a0a8a2a191df68 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp @@ -0,0 +1,81 @@ +#pragma once +#include +#include + +namespace c10d { +constexpr int kUnsetTime = -1; + +inline int64_t current_time_in_nanos() { + return c10::getTime(); +} + +class TORCH_API Timer { + private: + // The timestamp of forward call start time in each iteration. + int64_t forward_start_time = kUnsetTime; + // The timestamp of backward computation start and end time in each + // iteration. + int64_t backward_compute_start_time = kUnsetTime; + int64_t backward_compute_end_time = kUnsetTime; + // The timestamp of first communication call start time in each iteration. + int64_t backward_comm_start_time = kUnsetTime; + // The timestamp of last communication call end time in each iteration. + int64_t backward_comm_end_time = kUnsetTime; + + public: + enum class Event { + kForwardStart, + kBackwardComputeStart, + kBackwardComputeEnd, + kBackwardCommStart, + kBackwardCommEnd, + }; + + // Record the current event, i.e., mark it as having occurred now. Default + // CPU implementation. + virtual void record(Event event) { + getTimeRef(event) = current_time_in_nanos(); + } + + // Return the difference between when two events occurred, in nanoseconds. + // Or nullopt if one of them hasn't been recorded. + virtual c10::optional measureDifference(Event start, Event end) = 0; + + virtual ~Timer() = default; + + // Return host-side timestamp, or nullopt if it has not yet been recorded. + c10::optional getTimestamp(Event event) { + auto time = getTimeRef(event); + if (time == kUnsetTime) { + return c10::nullopt; + } else { + return time; + } + } + + // Return host-side time member variable corresponding to the given event. + int64_t& getTimeRef(Event event) { + switch (event) { + case Event::kForwardStart: + return forward_start_time; + case Event::kBackwardComputeStart: + return backward_compute_start_time; + case Event::kBackwardComputeEnd: + return backward_compute_end_time; + case Event::kBackwardCommStart: + return backward_comm_start_time; + case Event::kBackwardCommEnd: + return backward_comm_end_time; + default: + TORCH_INTERNAL_ASSERT(false); + } + } +}; + +TORCH_DECLARE_TYPED_REGISTRY( + TimerRegistry, + c10::DeviceType, + Timer, + std::unique_ptr, + c10::Device); +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp new file mode 100644 index 0000000000000000000000000000000000000000..50c800e8d7980d20fc942043e0a6894a9d31872c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10d { +const int kUnsetSeqNum = 0; + +namespace { +constexpr int kByteOffset = 8; +} + +// Converts from int to char vec to write in store +template +inline std::vector toVec(uint64_t num, int numBytes) { + std::vector values; + // Read off bytes from right to left, pushing them into + // char array. + for (const auto i : c10::irange(numBytes)) { + uint8_t x = (num >> (kByteOffset * i)) & 0xff; + values.push_back(static_cast(x)); + } + return values; +} + +// Converts from char vec (such as from store read) to int. +template +inline uint64_t fromVec(const std::vector& values) { + uint64_t num = 0; + // Set each byte at the correct location on num + for (const auto i : c10::irange(values.size())) { + uint8_t x = static_cast(values[i]); + num |= (static_cast(x) << (kByteOffset * i)); + } + return num; +} + +class TORCH_API SequenceNum { + public: + SequenceNum(); + explicit SequenceNum(const uint64_t num); + // Retrieve num_. Will throw if not set. + uint64_t get() const; + // Increment num_. Will throw if not set. + void increment(); + // Increment num_ and return the old value. Will throw if not set. + uint64_t getAndIncrement(); + // Sets num_ + void set(const uint64_t num); + // Returns true if this SequenceNum is properly initialized with a value, else + // false. + bool isSet() const; + + SequenceNum& operator=(const SequenceNum& other); + + SequenceNum(const SequenceNum& other); + + private: + c10::optional num_; + mutable std::mutex lock_; +}; + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h new file mode 100644 index 0000000000000000000000000000000000000000..52832722304cf651b6333f849f29fd9d96a0fc42 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h @@ -0,0 +1,93 @@ +// Copyright (c) Meta Platforms, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace c10d { +namespace detail { + +class SocketOptions { + public: + SocketOptions& prefer_ipv6(bool value) noexcept { + prefer_ipv6_ = value; + + return *this; + } + + bool prefer_ipv6() const noexcept { + return prefer_ipv6_; + } + + SocketOptions& connect_timeout(std::chrono::seconds value) noexcept { + connect_timeout_ = value; + + return *this; + } + + std::chrono::seconds connect_timeout() const noexcept { + return connect_timeout_; + } + + private: + bool prefer_ipv6_ = true; + std::chrono::seconds connect_timeout_{30}; +}; + +class SocketImpl; + +class Socket { + public: + // This function initializes the underlying socket library and must be called + // before any other socket function. + static void initialize(); + + static Socket listen(std::uint16_t port, const SocketOptions& opts = {}); + + static Socket listenFromFd(int fd, std::uint16_t expected_port); + + static Socket connect( + const std::string& host, + std::uint16_t port, + const SocketOptions& opts = {}); + + Socket() noexcept = default; + + Socket(const Socket& other) = delete; + + Socket& operator=(const Socket& other) = delete; + + Socket(Socket&& other) noexcept; + + Socket& operator=(Socket&& other) noexcept; + + ~Socket(); + + Socket accept() const; + + int handle() const noexcept; + + std::uint16_t port() const; + + bool waitForInput(std::chrono::milliseconds timeout); + + private: + explicit Socket(std::unique_ptr&& impl) noexcept; + + std::unique_ptr impl_; +}; + +} // namespace detail + +} // namespace c10d diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h new file mode 100644 index 0000000000000000000000000000000000000000..432141a97cf5c07dc4c7e2b63fbf393dd22ec420 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +enum RRefProxyType { RPC_SYNC, RPC_ASYNC, REMOTE }; + +// Python wrapper of an RRef shared_ptr that supports Python +// pickle and unpickle. +class PYBIND11_EXPORT PyRRef { + public: + // The first ctor can only be called while holding GIL. See its implementation + // for more explanations. + explicit PyRRef(const py::object& value, const py::object& type_hint); + explicit PyRRef(c10::intrusive_ptr rref); + PyRRef(const PyRRef&) = default; + ~PyRRef(); + + bool isOwner() const; + bool confirmedByOwner() const; + WorkerInfo owner() const; + std::string ownerName() const; + py::object toHere( + const float timeoutSeconds = + torch::distributed::rpc::kUnsetRpcTimeout) const; + py::object localValue() const; + std::string str() const; + py::tuple pickle() const; + static PyRRef unpickle(const py::tuple& t); + c10::IValue toIValue() const; + // Future that is associated with the creation of this RRef on the remote end. + // This is only used to get the future corresponding to the rref for profiling + // use cases. + c10::intrusive_ptr getFuture() const; + // Keeps track of the future responsible for profiling owner creation + // acknowledgement + c10::intrusive_ptr getProfilingFuture() const; + // Sets the future responsible for profiling owner creation acknowledgement. + // This future is set from python to be a future that returns when profiling + // callbacks have been run. + void setProfilingFuture(c10::intrusive_ptr profilingFuture); + + // create a proxy on this RRef, which can be used to launch RPC on the owner + // of this RRef to run functions on the object referenced by this RRef. + py::object createRRefProxy( + const RRefProxyType& mode, + float timeoutSeconds = rpc::kUnsetRpcTimeout) const; + + // get the type of the data object referenced by this RRef. Timeout argument + // is only used in the first invocation of this function as an argument to the + // RPC to the owner node of the RRef. + py::object getRRefType( + float timeout = rpc::kUnsetRpcTimeout, + bool blocking = true); + + // Run the backward pass with the RRef as the root. + void backward(int64_t autogradContextId, bool retainGraph); + + // Helper static function to run backward on a given rref. + static void backward( + int64_t autogradContextId, + bool retainGraph, + const c10::intrusive_ptr& rref); + + // Specialization of backward if the rref is an OwnerRRef. + static void backwardOwnerRRef( + int64_t autogradContextId, + bool retainGraph, + IValue value); + + private: + c10::intrusive_ptr rref_; + c10::optional> profilingFuture_; + c10::optional type_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h new file mode 100644 index 0000000000000000000000000000000000000000..e640f48838f37c4a185e6d2840671d550ccbe6ad --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// RPC call representing calling a Python function over RPC. +class TORCH_API PythonCall final : public RpcCommandBase { + public: + PythonCall(SerializedPyObj&& serializedPyObj, bool isAsyncExecution); + + c10::intrusive_ptr toMessageImpl() && override; + + static std::unique_ptr fromMessage(const Message& message); + + const SerializedPyObj& serializedPyObj() const; + + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + private: + SerializedPyObj serializedPyObj_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h new file mode 100644 index 0000000000000000000000000000000000000000..c230037bf8ff2b8267538e8e07bafe7053feb045 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +class TORCH_API PythonRemoteCall : public RpcCommandBase { + public: + PythonRemoteCall( + SerializedPyObj&& serializedPyObj, + at::IValue retRRefId, + at::IValue retForkId, + const bool isAsyncExecution); + + inline const SerializedPyObj& serializedPyObj() const { + return serializedPyObj_; + } + + inline const at::IValue& retRRefId() const { + return retRRefId_; + } + + inline const at::IValue& retForkId() const { + return retForkId_; + } + + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + private: + SerializedPyObj serializedPyObj_; + const at::IValue retRRefId_; + const at::IValue retForkId_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_rpc_handler.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_rpc_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..fccdbd2d16d43c4fc3228d20840cfe9ab5943ef6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_rpc_handler.h @@ -0,0 +1,133 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Singleton class provides interface to execute python UDF remote call +// and deserialize the returned results by running python function +// in internal_rpc_utilities. +// The singleton object is constructed at first when RPC agent is +// constructed, where the python function in +// torch/distributed/internal_rpc_utils.py are imported only once. +class PYBIND11_EXPORT PythonRpcHandler { + public: + struct RRefProxyFunctions { + py::object rrefProxyCtor_; + py::object rpcSync_; + py::object rpcAsync_; + py::object remote_; + }; + + struct RRefTypeFunctions { + py::object onOwner_; + py::object onUser_; + }; + + static PythonRpcHandler& getInstance(); + + // Run a pickled Python UDF and return the result py::object + py::object runPythonUdf(const py::object& pythonUdf); + + // Serialized a py::object into a string + SerializedPyObj serialize(const py::object& obj); + + // Deserialize a string into a py::object + py::object deserialize(const SerializedPyObj& serializedObj); + + // Check if obj is RemoteException, then throw it + void handleException(const py::object& obj); + // Alternative if the caller is already holding the GIL. + void handleExceptionGILHeld(const py::object& obj); + // Check if obj is an RemoteException instance. + bool isRemoteException(const py::object& obj); + + // Explicitly clean up py::objects to avoid segment faults when + // py::objects with CPython are cleaned up later at program exit + // See similar issues reported https://github.com/pybind/pybind11/issues/1598 + // and https://github.com/pybind/pybind11/issues/1493 + // Our local tests also caught this segment faults if py::objects are cleaned + // up at program exit. The explanation is: CPython cleans up most critical + // utilities before cleaning up PythonRpcHandler singleton, so when + // PythonRpcHandler singleton cleans up py::objects and call dec_ref(), it + // will crash. + // The solution is to clean up py::objects earlier when Rpc agent join(). + // Be note that py::objects can not be cleaned up when Rpc agent is destroyed + // as well, as Rpc agent is global variable and it will have same issue as + // PythonRpcHandler. + void cleanup(); + + std::shared_ptr jitCompilationUnit(); + + // Parse the string to recover the jit_type, this is used for RRef python + // pickling/unpickling type recovery. The type string inference rule is as + // follows: + // 1. first try to parse if this is primitive types. + // i.e. TensorType, IntType, PyObjectType, etc. + // 2. if not primitive type, we query the python_cu to see if it is a + // class type or interface type registered in python + // We use a ScriptTypeParser instance with custom PythonTypeResolver + // to resolve types according to the above rules. + TypePtr parseTypeFromStr(const std::string& typeStr); + + // Return a set of Python functions for RRef helpers. + const RRefProxyFunctions& getRRefProxyFunctions() const; + + // Return a set of Python functions to retrieve the type of the object + // referenced by a given RRef. + const RRefTypeFunctions& getRRefTypeFunctions() const; + + PythonRpcHandler(const PythonRpcHandler&) = delete; + PythonRpcHandler& operator=(const PythonRpcHandler&) = delete; + PythonRpcHandler(PythonRpcHandler&&) = delete; + PythonRpcHandler& operator=(PythonRpcHandler&&) = delete; + + private: + void init(); + PythonRpcHandler(); + ~PythonRpcHandler() = default; + + // Ref to `torch.distributed.rpc.internal._run_function`. + py::object pyRunFunction_; + + // Ref to `torch.distributed.rpc.internal.serialize`. + py::object pySerialize_; + + // Ref to `torch.distributed.rpc.internal.deserialize`. + py::object pyDeserialize_; + + // Ref to 'torch.distributed.rpc.internal._handle_exception' + py::object pyHandleException_; + + // Python functions for RRef proxy + RRefProxyFunctions rrefProxyFunctions_; + + // Ref to 'torch.distributed.rpc.api._rref_typeof_on_' + RRefTypeFunctions rrefTypeFunctions_; + + // Shared ptr to python compilation unit in jit, it is constructed in python + // side (see _python_cu = torch._C.CompilationUnit() in jit/__init__.py) + // and imported in C++ (see get_python_cu() in + // csrc/jit/python/pybind_utils.h). We import the compilation unit here only + // once for less cost and thread safety. + std::shared_ptr jitCompilationUnit_; + + // jit type parser to parse type_str back to TypePtr for RRef type + // recovery when pickling and unpickling RRef + std::shared_ptr typeParser_; + + // Indicates whether or not we have properly initialized the handler. + bool initialized_; + + // Lock to protect initialization. + std::mutex init_lock_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h new file mode 100644 index 0000000000000000000000000000000000000000..2fc0efb8cdc717b27c3adc31103fff3e5e86a783 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +using torch::jit::Operator; + +// A ScriptCall instance represents an invocation of a builtin operator for a +// TorchScript function. If it is a builtin operator, it +// contains a shared ptr to the `Operator` and a list of arguments. +// If it is a TorchScript function, it contains a non empty qualifiedName string +// to the TorchScript function schema name and a list of arguments. +class TORCH_API ScriptCall : public RpcCommandBase { + public: + // Constructor for builitin operator call. + ScriptCall(std::shared_ptr op, std::vector&& stack); + // Constructor for TorchScript function call. + ScriptCall( + const c10::QualifiedName& qualifiedName, + std::vector&& stack, + const bool isAsyncExecution = false); + + bool hasOp() const; + std::shared_ptr op() const; + bool hasQualifiedName() const; + const c10::QualifiedName& qualifiedName() const; + // return the argument stack of this builtin operator + const std::vector& stack() const; + std::vector& stackRef(); + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + ~ScriptCall() override = default; + + protected: + virtual void toIValues(std::vector& ivalues) const; + static std::unique_ptr fromIValues( + std::vector& ivalues); + + private: + // Given an operator symbol and a string schema, return the matched operator. + static std::shared_ptr matchOperator(const std::string& str_schema); + + static const std::string BUILTIN_OP_NAMESPACE_; + static const std::string ATEN_PREFIX_; + + // This field has value if this ScriptCall represents invocation of a builtin + // operator. + c10::optional> op_; + // This field has non empty string if this ScriptCall represents invocation of + // an annotated torchscript function defined by users. + c10::optional qualifiedName_; + std::vector stack_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_utils.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..bf5d87cacc4b514a22159265893de6d7fe355347 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_utils.h @@ -0,0 +1,123 @@ +#pragma once + +#ifdef USE_TENSORPIPE + +#include + +namespace tensorpipe { +class Message; +class Allocation; +class Descriptor; +} // namespace tensorpipe + +namespace torch { +namespace distributed { +namespace rpc { + +TORCH_API const c10::Stream& getStreamForDevice( + const std::vector& streams, + const c10::Device& device); + +// Inspired by c10/core/impl/DeviceGuardImplInterface.h. + +class TensorpipeDeviceTypeConverter { + public: + // Ideally we'd want this to also return a tensorpipe::Message::Tensor object + // but we cannot forward-declare that class (because it's nested), and we + // cannot include the TensorPipe headers because it's a private dependency. + // Thus we bend over backwards and entrust this method with appending that + // object to the `tensors` field of the tensorpipe::Message object we pass. + virtual c10::optional> prepareTensorForSending( + const c10::Storage& storage, + const std::vector& streams, + tensorpipe::Message& message) const = 0; + + // Same as above: this method cannot return a tensorpipe::Allocation::Tensor, + // thus it appends it to the `tensors` field of the tensorpipe::Allocation. + virtual at::DataPtr allocateTensorForReceiving( + int deviceIndex, + size_t length, + const std::vector& streams, + tensorpipe::Allocation& allocation) const = 0; + + virtual ~TensorpipeDeviceTypeConverter() = default; +}; + +extern TORCH_API std::array< + std::atomic, + static_cast(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)> + device_type_converter_registry; + +class TORCH_API TensorpipeDeviceTypeConverterRegistrar { + public: + TensorpipeDeviceTypeConverterRegistrar( + DeviceType, + const TensorpipeDeviceTypeConverter*); +}; + +#define C10_REGISTER_TENSORPIPE_DEVICE_TYPE_CONVERTER( \ + DevType, TensorpipeDeviceTypeConverter) \ + static ::torch::distributed::rpc::TensorpipeDeviceTypeConverterRegistrar \ + C10_ANONYMOUS_VARIABLE(g_##DeviceType)( \ + ::c10::DeviceType::DevType, new TensorpipeDeviceTypeConverter()); + +inline const TensorpipeDeviceTypeConverter* getDeviceTypeConverter( + DeviceType type) { + return device_type_converter_registry[static_cast(type)].load(); +} + +// A struct that holds pointers that keep alive all the memory that will be +// accessed by TensorPipe during a write operation. +struct TensorpipeWriteBuffers { + // Allocate on heap so pointers stay valid as we move the holder. + std::unique_ptr type; + std::unique_ptr id; + std::vector payload; + std::vector pickle; + // This contains the original tensors and the clones of the sparse tensors. + std::vector tensors; + // This contains the copies of the data of the tensors that didn't own their + // memory, e.g., the ones created from torch::from_blob() with no deleter. + std::vector> copiedTensors; +}; + +// A struct that holds pointers that keep alive all the memory that will be +// accessed by TensorPipe during a read operation. +struct TensorpipeReadBuffers { + // Allocate on heap so pointers stay valid as we move the holder. + std::unique_ptr type; + std::unique_ptr id; + std::vector payload; + std::vector pickle; + std::vector tensors; +}; + +// Convert an RPC message into a TensorPipe message, plus a holder to all the +// data that must be kept alive while the write is performed asynchronously. +TORCH_API std::tuple +tensorpipeSerialize( + c10::intrusive_ptr rpcMessage, + std::vector devices, + const std::vector& streams); + +// Allocate the buffers that will hold the incoming data. They will be managed +// by the returned holder, which must be kept alive until the asynchronous read +// has finished. Pointers to these buffers will be stored in the returned +// tensorpipe::Allocation struct. +TORCH_API std::pair +tensorpipeAllocate( + const tensorpipe::Descriptor& tpDescriptor, + const std::vector& streams); + +// Convert a TensorPipe message back into an RPC message. This requires the data +// to be available and can thus only be performed once the asynchronous read has +// completed. The holder can be destroyed once this function returns. +TORCH_API c10::intrusive_ptr tensorpipeDeserialize( + tensorpipe::Descriptor&& tpDescriptor, + TensorpipeReadBuffers&& holder); + +} // namespace rpc +} // namespace distributed +} // namespace torch + +#endif // USE_TENSORPIPE diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..d7eea6b2c8f954086819d45e2c94d7c661aa9448 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// This function sends an rpc call to run torchscript function, currently the +// torchscript function could only be a user defined python function with +// "@torch.jit.script" annotation. The torchscript function could not be +// a class constructor, class method, instance method or a script module. +// dst: destination worker name +// qualifiedName: torchscript function qualified name string like +// "moduleName::torchscriptFunctionName", e.g, +// "dist_autograd_test::my_py_add" +// stack: a bag of IValue args passed to torchscriptFunctionName +// It returns c10::intrusive_ptr +c10::intrusive_ptr TORCH_API rpcTorchscript( + const std::string& dstWorkerName, + const c10::QualifiedName& qualifiedName, + const c10::FunctionSchema& functionSchema, + std::vector& stack, + const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout, + const bool isAsyncExecution = false); + +c10::intrusive_ptr TORCH_API remoteTorchscript( + const std::string& dstWorkerName, + const c10::QualifiedName& qualifiedName, + const c10::FunctionSchema& functionSchema, + std::vector& stack, + const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout, + const bool isAsyncExecution = false); + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h new file mode 100644 index 0000000000000000000000000000000000000000..9c4029bead95cefbdec8c522943f86d52d99fc11 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +using worker_id_t = int16_t; +using local_id_t = int64_t; + +bool getAllowJitRRefPickle(); +TORCH_API void enableJitRRefPickle(); +TORCH_API void disableJitRRefPickle(); + +struct TORCH_API JitRRefPickleGuard { + JitRRefPickleGuard(); + ~JitRRefPickleGuard(); +}; + +struct TORCH_API GloballyUniqueId final { + GloballyUniqueId(worker_id_t createdOn, local_id_t localId); + GloballyUniqueId(const GloballyUniqueId& other) = default; + GloballyUniqueId& operator=(const GloballyUniqueId& other) = delete; + + bool operator==(const GloballyUniqueId& other) const; + bool operator!=(const GloballyUniqueId& other) const; + + at::IValue toIValue() const; + static GloballyUniqueId fromIValue(const at::IValue&); + + struct Hash { + size_t operator()(const GloballyUniqueId& key) const { + return (uint64_t(key.createdOn_) << kLocalIdBits) | key.localId_; + } + }; + + static constexpr int kLocalIdBits = 48; + + const worker_id_t createdOn_; + const local_id_t localId_; +}; + +TORCH_API std::ostream& operator<<( + std::ostream& os, + const GloballyUniqueId& globalId); + +using RRefId = GloballyUniqueId; +using ForkId = GloballyUniqueId; +using ProfilingId = GloballyUniqueId; + +struct TORCH_API SerializedPyObj final { + SerializedPyObj(std::string&& payload, std::vector&& tensors) + : payload_(std::move(payload)), tensors_(std::move(tensors)) {} + + std::vector toIValues() &&; + static SerializedPyObj fromIValues(std::vector value); + + std::string payload_; + std::vector tensors_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h new file mode 100644 index 0000000000000000000000000000000000000000..c6dda5ba470447ad6ce461566737a4f704107050 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// This class converts the content in a PythonCall into py::object. This is a +// helper class to make sure that all arguments deserialization is done before +// entering RequestCallbackImpl::processRpc(...), so that the deserialization +// related logic can be carried out in one spot instead of scattered in multiple +// places for different message types. +// NB: The reason for not consolidating class into PythonCall is because +// PythonCall is a libtorch type which should not depend on Python types. +class TORCH_API UnpickledPythonCall : public RpcCommandBase { + public: + UnpickledPythonCall( + const SerializedPyObj& serializedPyObj, + bool isAsyncExecution); + ~UnpickledPythonCall() override; + + // toMessage() method is not implemented, as objects of this class should + // never be directly converted into a Message object. + c10::intrusive_ptr toMessageImpl() && override; + const py::object& pythonUdf() const; + + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + private: + py::object pythonUdf_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h new file mode 100644 index 0000000000000000000000000000000000000000..30602b0c9b73183c2584111b522a3a162ab68edf --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h @@ -0,0 +1,8 @@ +#pragma once + +#include + +namespace torch { +TORCH_API bool get_cpp_stacktraces_enabled(); +TORCH_API bool get_disable_addr2line(); +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h new file mode 100644 index 0000000000000000000000000000000000000000..e27c168a8ef46a5860f793e79e9be05a80f27e18 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h @@ -0,0 +1,15 @@ +#pragma once + +namespace torch { +namespace utils { + +static inline bool cuda_enabled() { +#ifdef USE_CUDA + return true; +#else + return false; +#endif +} + +} // namespace utils +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_lazy_init.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_lazy_init.h new file mode 100644 index 0000000000000000000000000000000000000000..90a8581e63ab305da966c35938c342a4b0bba1b7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_lazy_init.h @@ -0,0 +1,33 @@ +#pragma once + +#include + +// cuda_lazy_init() is always compiled, even for CPU-only builds. +// Thus, it does not live in the cuda/ folder. + +namespace torch { +namespace utils { + +// The INVARIANT is that this function MUST be called before you attempt +// to get a CUDA Type object from ATen, in any way. Here are some common +// ways that a Type object may be retrieved: +// +// - You call getNonVariableType or getNonVariableTypeOpt +// - You call toBackend() on a Type +// +// It's important to do this correctly, because if you forget to add it +// you'll get an oblique error message about "Cannot initialize CUDA without +// ATen_cuda library" if you try to use CUDA functionality from a CPU-only +// build, which is not good UX. +// +void cuda_lazy_init(); +void set_requires_cuda_init(bool value); + +static void maybe_initialize_cuda(const at::TensorOptions& options) { + if (options.device().is_cuda()) { + torch::utils::cuda_lazy_init(); + } +} + +} // namespace utils +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h new file mode 100644 index 0000000000000000000000000000000000000000..81ad207306844a4b8e0b57efe72a6079d4a74e26 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include + +template +class TORCH_PYTHON_API THPPointer { + public: + THPPointer() : ptr(nullptr){}; + explicit THPPointer(T* ptr) noexcept : ptr(ptr){}; + THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {} + + ~THPPointer() { + free(); + }; + T* get() { + return ptr; + } + const T* get() const { + return ptr; + } + T* release() { + T* tmp = ptr; + ptr = nullptr; + return tmp; + } + operator T*() { + return ptr; + } + THPPointer& operator=(T* new_ptr) noexcept { + free(); + ptr = new_ptr; + return *this; + } + THPPointer& operator=(THPPointer&& p) noexcept { + free(); + ptr = p.ptr; + p.ptr = nullptr; + return *this; + } + T* operator->() { + return ptr; + } + explicit operator bool() const { + return ptr != nullptr; + } + + private: + void free(); + T* ptr = nullptr; +}; + +/** + * An RAII-style, owning pointer to a PyObject. You must protect + * destruction of this object with the GIL. + * + * WARNING: Think twice before putting this as a field in a C++ + * struct. This class does NOT take out the GIL on destruction, + * so if you will need to ensure that the destructor of your struct + * is either (a) always invoked when the GIL is taken or (b) takes + * out the GIL itself. Easiest way to avoid this problem is to + * not use THPPointer in this situation. + */ +using THPObjectPtr = THPPointer; +using THPCodeObjectPtr = THPPointer; +using THPFrameObjectPtr = THPPointer; diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/out_types.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/out_types.h new file mode 100644 index 0000000000000000000000000000000000000000..1cab00bc270f2e3dff532cc715327c030698c190 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/out_types.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace utils { + +TORCH_API void check_out_type_matches( + const at::Tensor& result, + c10::optional scalarType, + bool scalarType_is_none, + c10::optional layout, + c10::optional device, + bool device_is_none); + +} +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pybind.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pybind.h new file mode 100644 index 0000000000000000000000000000000000000000..4f3871d3ea97a5c84311b19dd355ab7b3b1e3c30 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pybind.h @@ -0,0 +1,388 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace py = pybind11; + +// This makes intrusive_ptr to be available as a custom pybind11 holder type, +// see +// https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers +PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr, true); + +PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr); +PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr, true); + +namespace pybind11 { +namespace detail { + +// torch.Tensor <-> at::Tensor conversions (without unwrapping) +template <> +struct TORCH_PYTHON_API type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::Tensor, _("torch.Tensor")); + + bool load(handle src, bool); + + static handle cast( + const at::Tensor& src, + return_value_policy /* policy */, + handle /* parent */); +}; + +// torch._StorageBase <-> at::Storage +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::Storage, _("torch.StorageBase")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (torch::isStorage(obj)) { + value = torch::createStorage(obj); + return true; + } + return false; + } + + static handle cast( + const at::Storage& src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(torch::createPyObject(src)); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::Generator, _("torch.Generator")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (THPGenerator_Check(obj)) { + value = reinterpret_cast(obj)->cdata; + return true; + } + return false; + } + + static handle cast( + const at::Generator& src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(THPGenerator_Wrap(src)); + } +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::IntArrayRef, _("Tuple[int, ...]")); + + bool load(handle src, bool); + static handle cast( + at::IntArrayRef src, + return_value_policy /* policy */, + handle /* parent */); + + private: + std::vector v_value; +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::SymIntArrayRef, _("List[int]")); + + bool load(handle src, bool); + static handle cast( + at::SymIntArrayRef src, + return_value_policy /* policy */, + handle /* parent */); + + private: + std::vector v_value; +}; + +template <> +struct TORCH_PYTHON_API type_caster> { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::ArrayRef, _("List[SymNode]")); + + bool load(handle src, bool); + static handle cast( + at::ArrayRef src, + return_value_policy /* policy */, + handle /* parent */); + + private: + std::vector v_value; +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::MemoryFormat, _("torch.memory_format")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (THPMemoryFormat_Check(obj)) { + value = reinterpret_cast(obj)->memory_format; + return true; + } + return false; + } + static handle cast( + at::MemoryFormat src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(torch::utils::getTHPMemoryFormat(src)); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::Device, _("torch.device")); + + // PYBIND11_TYPE_CASTER defines a member field called value. Since at::Device + // cannot be default-initialized, we provide this constructor to explicitly + // initialize that field. The value doesn't matter as it will be overwritten + // after a successful call to load. + type_caster() : value(c10::kCPU) {} + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (THPDevice_Check(obj)) { + value = reinterpret_cast(obj)->device; + return true; + } + return false; + } + + static handle cast( + const at::Device& src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(THPDevice_New(src)); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(c10::Stream, _("torch.Stream")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (THPStream_Check(obj)) { + value = c10::Stream::unpack3( + ((THPStream*)obj)->stream_id, + ((THPStream*)obj)->device_index, + static_cast(((THPStream*)obj)->device_type)); + return true; + } + return false; + } + + static handle cast( + const c10::Stream& src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(THPStream_Wrap(src)); + } +}; + +template <> +struct type_caster + : public type_caster_base { + using base = type_caster_base; + c10::DispatchKey tmp; + + public: + bool load(handle src, bool convert) { + if (base::load(src, convert)) { + return true; + } else if (py::isinstance( + src, py::module_::import("builtins").attr("str"))) { + tmp = c10::parseDispatchKey(py::cast(src)); + value = &tmp; + return true; + } + return false; + } + + static handle cast( + c10::DispatchKey src, + return_value_policy policy, + handle parent) { + return base::cast(src, policy, parent); + } +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + PYBIND11_TYPE_CASTER( + c10::Scalar, + _("Union[Number, torch.SymInt, torch.SymFloat, torch.SymBool]")); + bool load(py::handle src, bool); + + static py::handle cast( + const c10::Scalar& si, + return_value_policy /* policy */, + handle /* parent */); +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + PYBIND11_TYPE_CASTER(c10::SymInt, _("Union[int, torch.SymInt]")); + bool load(py::handle src, bool); + + static py::handle cast( + const c10::SymInt& si, + return_value_policy /* policy */, + handle /* parent */); +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + PYBIND11_TYPE_CASTER(c10::SymFloat, _("float")); + bool load(py::handle src, bool); + + static py::handle cast( + const c10::SymFloat& si, + return_value_policy /* policy */, + handle /* parent */); +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + PYBIND11_TYPE_CASTER(c10::SymBool, _("Union[bool, torch.SymBool]")); + bool load(py::handle src, bool); + + static py::handle cast( + const c10::SymBool& si, + return_value_policy /* policy */, + handle /* parent */); +}; + +template +struct type_caster> { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(c10::complex, _("complex")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + + // Refered from `THPUtils_unpackComplexDouble` + Py_complex py_complex = PyComplex_AsCComplex(obj); + if (py_complex.real == -1.0 && PyErr_Occurred()) { + return false; + } + + // Python's Complex is always double precision. + value = c10::complex(py_complex.real, py_complex.imag); + return true; + } + + static handle cast( + const c10::complex& complex, + return_value_policy /* policy */, + handle /* parent */) { + // Python only knows double precision complex. + return handle(PyComplex_FromDoubles(complex.real(), complex.imag())); + } +}; + +} // namespace detail +} // namespace pybind11 + +namespace torch { +namespace impl { + +// Use this function if you have a C++ object that is used from both C++ +// and Python contexts, and you need its GIL to be released when you +// destruct it in the Python context. +// +// This function is a valid shared_ptr destructor and can be used to +// conveniently allocate a shared_ptr to an object whose destructor will be run +// without the GIL. Pass it as the second argument to shared_ptr, e.g., +// +// shared_ptr(new T(), destroy_without_gil) +// +// Attaching the GIL release logic to the holder pointer rather than the +// actual destructor of T is helpful when T is Python-agnostic and +// shouldn't refer to the PYthon API. +// +// Note there are limitations to the correctness of code that makes use of this. +// In particular, if a shared_ptr is constructed from C++ code without this +// destructor and then passed to pybind11, pybind11 will happily take ownership +// of the shared_ptr (and be willing to destruct it from a context where it is +// holding the GIL). unique_ptr with a type branded deleter is less prone to +// this problem, because a stock deleter unique_ptr is not convertible with it. +// I plan to mitigate this problem by adding DEBUG-only asserts to the true C++ +// destructors that the GIL is not held (using a virtual call to get to the +// Python interpreter); alternately, we could use a virtual call to simply +// ensure we release the GIL in the C++ destructor, however, this is a layering +// violation (why does code that is ostensibly Python agnostic calling into the +// GIL). +// +// Adapted from +// https://github.com/pybind/pybind11/issues/1446#issuecomment-406341510 +template +inline void destroy_without_gil(T* ptr) { + // Because the ownership of a shared_ptr is diffuse, it's not possible to + // necessarily predict whether or not the last reference to an object will + // be destructed from Python or C++. This means that in the destructor here, + // we don't necessarily know if we actually have the GIL or not; in fact, + // we don't even know if the Python interpreter still exists! Thus, we have + // to test for it before releasing the GIL. + // + // PyGILState_Check is hopefully self explanatory. But Py_IsInitialized or + // _PyIsFinalizing? Both get set at the same time during the Python + // destruction process: + // https://github.com/python/cpython/blob/d92513390a1a0da781bb08c284136f4d7abea36d/Python/pylifecycle.c#L1716-L1717 + // so the operant question is whether or not you want to release the GIL after + // finalization has completed (and there is just no Python interpreter). + // Clearly there is no need to release GIL in that state, so we want + // Py_IsInitialized. + if (Py_IsInitialized() && PyGILState_Check()) { + pybind11::gil_scoped_release nogil; + delete ptr; + } else { + delete ptr; + } +} + +} // namespace impl +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pycfunction_helpers.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pycfunction_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..745e1842e682c8a2fb3cc9d94e77122505016571 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pycfunction_helpers.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +#include + +inline PyCFunction castPyCFunctionWithKeywords(PyCFunctionWithKeywords func) { + C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wcast-function-type") + C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wcast-function-type-strict") + return reinterpret_cast(func); + C10_DIAGNOSTIC_POP() + C10_DIAGNOSTIC_POP() +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_arg_parser.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_arg_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..abf1371e290a14a0b29aa2e72a41f03596a098b1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_arg_parser.h @@ -0,0 +1,1313 @@ +#pragma once + +// Parse arguments to Python functions implemented in C++ +// This is similar to PyArg_ParseTupleAndKeywords(), but specifically handles +// the types relevant to PyTorch and distinguishes between overloaded function +// signatures. +// +// Example: +// +// static PythonArgParser parser({ +// "norm(Scalar p, int64_t dim, bool keepdim=False)", +// "norm(Scalar p=2)", +// }); +// ParsedArgs<3> parsed_args; +// auto r = parser.parse(args, kwargs, parsed_args); +// if (r.idx == 0) { +// norm(r.scalar(0), r.int64(1), r.bool(0)); +// } else { +// norm(r.scalar(0)); +// } +// +// We auto-generate most uses of PythonArgParser; the generated files +// are torch/csrc/autograd/generated/python_*.cpp +// +// Some gotchas that you should watch out for: +// +// - Note [Order of overloads matters] +// Order of overloads matters. A set of input arguments may +// bind to multiple argument specs; we will always pick the +// first one in PythonArgParser. However, when you are writing +// overloads in, e.g., native_functions.yaml, you don't have to +// worry about what order you write them, because the code +// generation logic always gives the overloads a canonical +// order, where Tensor overloads come first, before Scalar overloads. +// This logic is in sort_declarations in +// tools/autograd/gen_python_functions.py +// +// - Zero-dim tensors (e.g., torch.tensor(2)) bind to both +// Scalar and Tensor, UNLESS they require grad (in which case +// they only bind to Tensor). + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +inline bool THPUtils_checkScalar(PyObject* obj) { +#ifdef USE_NUMPY + if (torch::utils::is_numpy_scalar(obj)) { + return true; + } +#endif + return PyFloat_Check(obj) || PyLong_Check(obj) || PyComplex_Check(obj) || + torch::is_symint(py::handle(obj)) || + torch::is_symfloat(py::handle(obj)) || torch::is_symbool(py::handle(obj)); +} + +namespace torch { + +bool should_allow_numbers_as_tensors(const std::string& name); + +enum class ParameterType { + TENSOR, + SCALAR, + INT64, + SYM_INT, + DOUBLE, + COMPLEX, + TENSOR_LIST, + INT_LIST, + GENERATOR, + BOOL, + STORAGE, + PYOBJECT, + SCALARTYPE, + LAYOUT, + MEMORY_FORMAT, + DEVICE, + STREAM, + STRING, + DIMNAME, + DIMNAME_LIST, + QSCHEME, + FLOAT_LIST, + SCALAR_LIST, + SYM_INT_LIST, + DISPATCH_KEY_SET +}; + +struct FunctionParameter; +struct FunctionSignature; +struct PythonArgs; + +// Contains bound Python arguments in declaration order +template +struct ParsedArgs { + ParsedArgs() : args() {} + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + PyObject* args[N]; +}; + +// A PythonArgParser contains a list of valid signatures. Instances are +// typically global variables and should be immutable. +struct PYBIND11_EXPORT PythonArgParser { + explicit PythonArgParser( + const std::vector& fmts, + bool traceable = false); + + // meant only for `torch` functions. + template + inline PythonArgs parse( + PyObject* self, + PyObject* args, + PyObject* kwargs, + ParsedArgs& dst); + + template + inline PythonArgs parse(PyObject* args, PyObject* kwargs, ParsedArgs& dst); + + inline PythonArgs parse(PyObject* self, ParsedArgs<0>& dst); + + // Formatted strings of non-hidden signatures + std::vector get_signatures() const; + + private: + [[noreturn]] void print_error( + PyObject* self, + PyObject* args, + PyObject* kwargs, + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + PyObject* parsed_args[]); + void check_deprecated(const FunctionSignature& signature); + PythonArgs raw_parse( + PyObject* self, + PyObject* args, + PyObject* kwargs, + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + PyObject* parsed_args[]); + + std::vector signatures_; + std::string function_name; + size_t max_args; + bool traceable; +}; + +// FunctionSignature represents a single valid signature for a Python function. +// It is immutable once constructed. The contained data can be concurrently +// accessed by multiple calls. +struct FunctionSignature { + explicit FunctionSignature(const std::string& fmt, int index); + + bool parse( + PyObject* self, + PyObject* args, + PyObject* kwargs, + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + PyObject* dst[], + std::vector& overloaded_args, + bool raise_exception); + + std::string toString() const; + + std::string name; + std::vector params; + size_t min_args; + size_t max_args; + size_t max_pos_args; + int index; + bool hidden; + bool deprecated; +}; + +// PythonArgs contains bound Python arguments for an actual invocation +// along with references to the matched signature. +struct PythonArgs { + PythonArgs( + bool traceable, + const FunctionSignature& signature, + PyObject** args, + std::vector overloaded_args) + : idx(signature.index), + traceable(traceable), + signature(signature), + args(args), + overloaded_args(std::move(overloaded_args)) {} + + int idx; + bool traceable; + const FunctionSignature& signature; + PyObject** args; + std::vector overloaded_args; // NOTE: borrowed references + + inline bool has_torch_function(); + inline std::string get_func_name(); + inline at::Tensor tensor(int i); + inline c10::optional optionalTensor(int i); + inline at::Scalar scalar(int i); + inline at::Scalar scalarWithDefault(int i, const at::Scalar& default_scalar); + inline std::vector scalarlist(int i); + inline std::vector tensorlist(int i); + inline torch::List> list_of_optional_tensors(int i); + template + inline std::array tensorlist_n(int i); + inline std::vector intlist(int i); + inline std::vector symintlist(int i); + inline c10::OptionalArray intlistOptional(int i); + inline c10::OptionalArray symintlistOptional(int i); + inline std::vector intlistWithDefault( + int i, + std::vector default_intlist); + inline c10::optional generator(int i); + inline at::Storage storage(int i); + inline at::Storage storage( + int i, + at::ScalarType& storage_scalar_type, + bool& is_typed_storage); + inline c10::Stream stream(int i); + inline at::ScalarType scalartype(int i); + inline at::ScalarType scalartypeWithDefault( + int i, + at::ScalarType default_scalartype); + inline c10::optional scalartypeOptional(int i); + inline c10::optional scalarOptional(int i); + inline c10::optional toInt64Optional(int i); + inline c10::optional toSymIntOptional(int i); + inline c10::optional toBoolOptional(int i); + inline c10::optional toDoubleOptional(int i); + inline c10::OptionalArray doublelistOptional(int i); + inline std::vector doublelist(int i); + inline std::vector getDoublelist(int i); + inline at::Layout layout(int i); + inline at::Layout layoutWithDefault(int i, at::Layout default_layout); + inline c10::optional layoutOptional(int i); + inline at::Device device(int i); + inline at::Device deviceWithDefault(int i, const at::Device& default_device); + inline c10::optional deviceOptional(int i); + inline at::Dimname dimname(int i); + inline std::vector dimnamelist(int i); + inline c10::optional> toDimnameListOptional(int i); + inline at::MemoryFormat memoryformat(int i); + inline c10::optional memoryformatOptional(int i); + inline at::QScheme toQScheme(int i); + inline std::string string(int i); + inline std::string stringWithDefault(int i, const std::string& default_str); + inline c10::optional stringOptional(int i); + inline c10::string_view stringView(int i); + inline c10::string_view stringViewWithDefault( + int i, + const c10::string_view default_str); + inline c10::optional stringViewOptional(int i); + inline PyObject* pyobject(int i); + inline int64_t toInt64(int i); + inline c10::SymInt toSymInt(int i); + inline c10::SymBool toSymBool(int i); + inline int64_t toInt64WithDefault(int i, int64_t default_int); + inline double toDouble(int i); + inline double toDoubleWithDefault(int i, double default_double); + inline c10::complex toComplex(int i); + inline c10::complex toComplexWithDefault( + int i, + c10::complex default_complex); + inline bool toBool(int i); + inline bool toBoolWithDefault(int i, bool default_bool); + inline bool isNone(int i); + inline c10::optional toDispatchKeySetOptional(int i); + + private: + at::Tensor tensor_slow(int i); + at::Scalar scalar_slow(int i); + at::Scalar scalar_slow(PyObject* arg); +}; + +// FunctionParameter is a single formal parameter of a Python function. +// It is immutable once constructed. +struct FunctionParameter { + FunctionParameter(const std::string& fmt, bool keyword_only); + + bool check( + PyObject* obj, + std::vector& overloaded_args, + int argnum, + int64_t* failed_idx = nullptr); + + void set_default_str(const std::string& str); + std::string type_name() const; + + ParameterType type_; + bool optional; + bool allow_none; + bool keyword_only; + bool allow_numbers_as_tensors = false; + int size; + std::string name; + // having this as a raw PyObject * will presumably leak it, but these are only + // held by static objects anyway, and Py_Finalize can already be called when + // this is destructed. + PyObject* python_name; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) + at::SmallVector numpy_python_names; + at::Scalar default_scalar; + std::vector default_intlist; + std::string default_string; + union { + bool default_bool; + int64_t default_int; + double default_double; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + double default_complex[2]; // see Scalar + at::ScalarType default_scalartype; + at::Layout default_layout; + }; +}; + +template +inline PythonArgs PythonArgParser::parse( + PyObject* self, + PyObject* args, + PyObject* kwargs, + ParsedArgs& dst) { + if (N < max_args) { + throw ValueError( + "PythonArgParser: dst ParsedArgs buffer does not have enough capacity, expected %d (got %d)", + (int)max_args, + N); + } + return raw_parse(self, args, kwargs, dst.args); +} + +template +inline PythonArgs PythonArgParser::parse( + PyObject* args, + PyObject* kwargs, + ParsedArgs& dst) { + return parse(nullptr, args, kwargs, dst); +} + +inline PythonArgs PythonArgParser::parse(PyObject* self, ParsedArgs<0>& dst) { + return parse(self, nullptr, nullptr, dst); +} + +inline bool PythonArgs::has_torch_function() { + return !overloaded_args.empty() || at::impl::torch_function_mode_enabled(); +} + +inline std::string PythonArgs::get_func_name() { + return signature.name; +} + +// TODO: this can return MaybeOwned +inline at::Tensor PythonArgs::tensor(int i) { + if (args[i] && THPVariable_CheckExact(args[i])) { + return THPVariable_Unpack(args[i]); + } + return tensor_slow(i); +} + +inline c10::optional PythonArgs::optionalTensor(int i) { + at::Tensor t = tensor(i); + // NOLINTNEXTLINE(bugprone-branch-clone) + if (t.defined()) { + return t; + } else { + return c10::nullopt; + } +} + +inline at::Scalar PythonArgs::scalar(int i) { + if (!args[i]) + return signature.params[i].default_scalar; + return scalar_slow(i); +} + +inline std::vector PythonArgs::scalarlist(int i) { + if (!args[i]) + return std::vector(); + auto tuple = six::isTuple(args[i]); + THPObjectPtr arg = six::maybeAsTuple(args[i]); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); + std::vector res(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) + : PyList_GET_ITEM(arg.get(), idx); + res[idx] = scalar_slow(obj); + } + return res; +} + +inline at::Scalar PythonArgs::scalarWithDefault( + int i, + const at::Scalar& default_scalar) { + if (!args[i]) + return default_scalar; + return scalar_slow(i); +} + +inline c10::optional PythonArgs::scalarOptional(int i) { + if (!args[i]) + return c10::nullopt; + return scalar_slow(i); +} + +inline std::vector PythonArgs::tensorlist(int i) { + if (!args[i]) + return std::vector(); + auto tuple = six::isTuple(args[i]); + THPObjectPtr arg = six::maybeAsTuple(args[i]); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); + std::vector res(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) + : PyList_GET_ITEM(arg.get(), idx); + // This is checked by the argument parser so it's safe to cast without + // checking if this is a tensor first + res[idx] = THPVariable_Unpack(obj); + } + return res; +} + +inline torch::List> PythonArgs:: + list_of_optional_tensors(int i) { + if (!args[i]) + return torch::List>(); + auto tuple = six::isTuple(args[i]); + THPObjectPtr arg = six::maybeAsTuple(args[i]); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); + torch::List> res; + res.reserve(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) + : PyList_GET_ITEM(arg.get(), idx); + // This is checked by the argument parser so it's safe to cast without + // checking if this is a tensor first + res.push_back(THPVariable_Unpack(obj)); + } + return res; +} + +template +inline std::array PythonArgs::tensorlist_n(int i) { + auto res = std::array(); + if (!args[i]) + return res; + auto tuple = six::isTuple(args[i]); + THPObjectPtr arg = six::maybeAsTuple(args[i]); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); + if (size != N) { + throw TypeError("expected tuple of %d elements but got %d", N, (int)size); + } + for (const auto idx : c10::irange(size)) { + PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) + : PyList_GET_ITEM(arg.get(), idx); + // This is checked by the argument parser so it's safe to cast without + // checking if this is a tensor first + res[idx] = THPVariable_Unpack(obj); + } + return res; +} + +inline std::vector PythonArgs::intlist(int i) { + return intlistWithDefault(i, signature.params[i].default_intlist); +} + +inline PyObject* toPyObject(c10::SymInt symint) { + if (symint.is_symbolic()) { + auto r = py::cast(symint).release().ptr(); + TORCH_INTERNAL_ASSERT(r); + return r; + } else { + auto m = symint.maybe_as_int(); + return THPUtils_packInt64(*m); + } +} + +inline void throw_intlist_exception( + const torch::PythonArgs* args, + size_t i, + PyObject* obj, + size_t idx, + const std::exception& e = python_error()) { + std::string error = strlen(e.what()) + ? e.what() + : std::string("type must be ") + args->signature.params[i].type_name() + + ",but got " + Py_TYPE(obj)->tp_name; + throw TypeError( + "%s(): argument '%s' failed to unpack the object at pos %zu with error \"%s\"", + args->signature.name.c_str(), + args->signature.params[i].name.c_str(), + idx + 1, + error.c_str()); +} + +inline std::vector PythonArgs::symintlist(int i) { + if (!args[i]) { + return c10::fmap(signature.params[i].default_intlist, [](int64_t di) { + return c10::SymInt(di); + }); + } + + const auto size1 = signature.params[i].size; + if (size1 > 0 && THPUtils_checkLong(args[i])) { + return std::vector( + size1, c10::SymInt(THPUtils_unpackLong(args[i]))); + } + + if (size1 > 0 && torch::is_symint(py::handle(args[i]))) { + auto si = py::handle(args[i]).cast(); + return std::vector(size1, si); + } + + if (is_dynamo_compiling && size1 > 0 && THPVariable_Check(args[i])) { + auto& var = THPVariable_Unpack(args[i]); + if (size1 == 1 && var.numel() == 1 && var.sizes().empty() && + at::isIntegralType(var.dtype().toScalarType(), /*include_bool*/ true)) { + auto scalar = var.item(); + TORCH_CHECK(scalar.isIntegral(/*include bool*/ false)); + return std::vector(size1, scalar.toSymInt()); + } + } + + PyObject* arg = args[i]; + auto tuple = PyTuple_Check(arg); + // NOLINTNEXTLINE(bugprone-branch-clone) + const auto size2 = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg); + std::vector res; + res.reserve(size2); + for (const auto idx : c10::irange(size2)) { + PyObject* obj = + tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx); + + // Elements of torch.Size are tensors during tracing, and we need to + // record extra information before they are turned into an IntArrayRef + if (traceable && jit::tracer::isTracing() && THPVariable_Check(obj)) { + auto& var = THPVariable_Unpack(obj); + jit::tracer::ArgumentStash::stashIntArrayRefElem( + signature.params[i].name, size2, idx, var); + try { + res.emplace_back(var.item()); + continue; + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + continue; + } else { + // convert tensor to scalar outside of try / catch, + // so that Tensor subclass exceptions will not be caught. + if (THPUtils_checkLongExact(obj)) { + // Fast path for plain numbers + try { + res.emplace_back(THPUtils_unpackLong(obj)); + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } else if (THPVariable_Check(obj)) { + auto& var = THPVariable_Unpack(obj); + if (var.numel() != 1 || + !at::isIntegralType( + var.dtype().toScalarType(), /*include_bool*/ true)) { + throw_intlist_exception(this, i, obj, idx); + } + auto scalar = var.item(); + TORCH_CHECK(scalar.isIntegral(/*include bool*/ false)); + res.push_back(scalar.toSymInt()); + } else { + try { + if (is_symint(py::handle(obj))) { + res.push_back(py::handle(obj).cast()); + } else { + res.emplace_back(THPUtils_unpackIndex(obj)); + } + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } + } + } + + return res; +} + +inline std::vector PythonArgs::intlistWithDefault( + int i, + std::vector default_intlist) { + if (!args[i]) + return default_intlist; + PyObject* arg = args[i]; + const auto size1 = signature.params[i].size; + if (size1 > 0 && THPUtils_checkLong(arg)) { + return std::vector(size1, THPUtils_unpackLong(arg)); + } + if (size1 > 0 && torch::is_symint(py::handle(arg))) { + return std::vector( + size1, + py::handle(arg).cast().guard_int(__FILE__, __LINE__)); + } + auto tuple = PyTuple_Check(arg); + // NOLINTNEXTLINE(bugprone-branch-clone) + const auto size2 = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg); + std::vector res(size2); + for (const auto idx : c10::irange(size2)) { + PyObject* obj = + tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx); + // Elements of torch.Size are tensors during tracing, and we need to + // record extra information before they are turned into an IntArrayRef + if (traceable && jit::tracer::isTracing() && THPVariable_Check(obj)) { + auto& var = THPVariable_Unpack(obj); + jit::tracer::ArgumentStash::stashIntArrayRefElem( + signature.params[i].name, size2, idx, var); + try { + res[idx] = var.item(); + continue; + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } else { + // convert tensor to scalar outside of try / catch, + // so that Tensor subclass exceptions will not be caught. + if (THPUtils_checkLongExact(obj)) { + // Fast path for plain numbers + try { + res[idx] = THPUtils_unpackLong(obj); + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } else if (torch::is_symint(py::handle(obj))) { + res[idx] = py::cast(py::handle(obj)) + .guard_int(__FILE__, __LINE__); + } else if (THPVariable_Check(obj)) { + auto& var = THPVariable_Unpack(obj); + if (var.numel() != 1 || + !at::isIntegralType( + var.dtype().toScalarType(), /*include_bool*/ true)) { + throw_intlist_exception(this, i, obj, idx); + } + res[idx] = var.item(); + } else { + try { + res[idx] = THPUtils_unpackIndex(obj); + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } + } + } + return res; +} + +inline c10::OptionalArray PythonArgs::intlistOptional(int i) { + if (!args[i]) { + return {}; + } + return intlist(i); +} + +inline c10::OptionalArray PythonArgs::symintlistOptional(int i) { + if (!args[i]) { + return {}; + } + return symintlist(i); +} + +inline std::vector PythonArgs::getDoublelist(int i) { + PyObject* arg = args[i]; + auto tuple = PyTuple_Check(arg); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg); + std::vector res(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = + tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx); + try { + res[idx] = THPUtils_unpackDouble(obj); + } catch (const std::exception& e) { + throw TypeError( + "%s(): argument '%s' must be %s, but found element of type %s at pos %zu", + signature.name.c_str(), + signature.params[i].name.c_str(), + signature.params[i].type_name().c_str(), + Py_TYPE(obj)->tp_name, + idx + 1); + } + } + return res; +} + +inline c10::OptionalArray PythonArgs::doublelistOptional(int i) { + if (!args[i]) { + return {}; + } + return this->getDoublelist(i); +} + +inline std::vector PythonArgs::doublelist(int i) { + if (!args[i]) { + return {}; + } + return this->getDoublelist(i); +} + +inline c10::optional PythonArgs::toDispatchKeySetOptional( + int i) { + if (!args[i]) { + return {}; + } + return py::cast(py::handle(args[i])); +} + +inline at::ScalarType PythonArgs::scalartypeWithDefault( + int i, + at::ScalarType default_scalartype) { + if (!args[i]) + return default_scalartype; + return scalartype(i); +} + +inline at::ScalarType toScalarType(PyObject* obj) { + if (obj == (PyObject*)&PyFloat_Type) { + return at::ScalarType::Double; + } + if (obj == (PyObject*)&PyBool_Type) { + return at::ScalarType::Bool; + } + if (obj == (PyObject*)&PyLong_Type) { + return at::ScalarType::Long; + } + return reinterpret_cast(obj)->scalar_type; +} + +inline at::ScalarType PythonArgs::scalartype(int i) { + if (!args[i]) { + auto scalartype = signature.params[i].default_scalartype; + return (scalartype == at::ScalarType::Undefined) + ? torch::tensors::get_default_scalar_type() + : scalartype; + } + PyObject* obj = args[i]; + return toScalarType(obj); +} + +inline c10::optional PythonArgs::scalartypeOptional(int i) { + if (!args[i]) + return c10::nullopt; + return scalartype(i); +} + +inline at::Layout toLayout(PyObject* obj) { + const auto layout = reinterpret_cast(obj); + return layout->layout; +} + +inline at::Layout PythonArgs::layout(int i) { + if (!args[i]) + return signature.params[i].default_layout; + return toLayout(args[i]); +} + +inline at::Layout PythonArgs::layoutWithDefault( + int i, + at::Layout default_layout) { + if (!args[i]) + return default_layout; + return layout(i); +} + +inline c10::optional PythonArgs::layoutOptional(int i) { + if (!args[i]) + return c10::nullopt; + return layout(i); +} + +inline at::Device toDevice(PyObject* obj) { + if (THPDevice_Check(obj)) { + const auto device = reinterpret_cast(obj); + return device->device; + } + if (THPUtils_checkLong(obj)) { + const auto device_index = THPUtils_unpackLong(obj); + TORCH_CHECK(device_index >= 0, "Device index must not be negative"); + return at::Device( + c10::DeviceType::CUDA, static_cast(device_index)); + } + const std::string& device_str = THPUtils_unpackString(obj); + return at::Device(device_str); +} + +inline at::Device PythonArgs::device(int i) { + if (!args[i]) { + return torch::tensors::get_default_device(); + } + return toDevice(args[i]); +} + +inline at::Device PythonArgs::deviceWithDefault( + int i, + const at::Device& default_device) { + if (!args[i]) + return default_device; + return device(i); +} + +inline c10::optional PythonArgs::deviceOptional(int i) { + if (!args[i]) + return c10::nullopt; + return device(i); +} + +inline at::Dimname PythonArgs::dimname(int i) { + TORCH_INTERNAL_ASSERT(args[i] != nullptr); + return THPDimname_parse(args[i]); +} + +inline std::vector parseDimnameList(PyObject* arg) { + auto tuple = PyTuple_Check(arg); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg); + std::vector res; + res.reserve(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = + tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx); + res.push_back(THPDimname_parse(obj)); + } + return res; +} + +inline c10::optional> PythonArgs:: + toDimnameListOptional(int i) { + if (!args[i]) + return c10::nullopt; + return parseDimnameList(args[i]); +} + +inline std::vector PythonArgs::dimnamelist(int i) { + TORCH_INTERNAL_ASSERT(args[i]); + PyObject* arg = args[i]; + auto size = signature.params[i].size; + TORCH_INTERNAL_ASSERT(size == 0 || size == 1); + if (size == 1 && THPUtils_checkDimname(arg)) { + return {THPDimname_parse(arg)}; + } + return parseDimnameList(arg); +} + +inline at::MemoryFormat PythonArgs::memoryformat(int i) { + if (!args[i]) + return at::MemoryFormat::Contiguous; + TORCH_CHECK( + THPMemoryFormat_Check(args[i]), + "memory_format arg must be an instance of the torch.memory_format"); + const auto memory_format = reinterpret_cast(args[i]); + return memory_format->memory_format; +} + +inline c10::optional PythonArgs::memoryformatOptional(int i) { + if (!args[i]) + return c10::nullopt; + return memoryformat(i); +} + +inline at::QScheme PythonArgs::toQScheme(int i) { + if (!args[i]) + return at::kPerTensorAffine; + TORCH_CHECK( + THPQScheme_Check(args[i]), + "qscheme arg must be an instance of the torch.qscheme"); + const auto qscheme = reinterpret_cast(args[i]); + return qscheme->qscheme; +} + +inline std::string PythonArgs::string(int i) { + return stringWithDefault(i, signature.params[i].default_string); +} + +inline std::string PythonArgs::stringWithDefault( + int i, + const std::string& default_str) { + if (!args[i]) + return default_str; + return THPUtils_unpackString(args[i]); +} + +inline c10::optional PythonArgs::stringOptional(int i) { + if (!args[i]) + return c10::nullopt; + return THPUtils_unpackString(args[i]); +} + +inline c10::string_view PythonArgs::stringView(int i) { + return stringViewWithDefault(i, signature.params[i].default_string); +} + +inline c10::string_view PythonArgs::stringViewWithDefault( + int i, + const c10::string_view default_str) { + if (!args[i]) + return default_str; + return THPUtils_unpackStringView(args[i]); +} + +inline c10::optional PythonArgs::stringViewOptional(int i) { + if (!args[i]) + return c10::nullopt; + return THPUtils_unpackStringView(args[i]); +} + +inline int64_t PythonArgs::toInt64(int i) { + if (!args[i]) + return signature.params[i].default_int; + if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) { + auto& var = THPVariable_Unpack(args[i]); + jit::tracer::ArgumentStash::stashValue( + signature.params[i].name, idx, var, c10::IntType::get()); + } + if (torch::is_symint(py::handle(args[i]))) { + return py::cast(py::handle(args[i])) + .guard_int(__FILE__, __LINE__); + } + return THPUtils_unpackLong(args[i]); +} + +inline c10::SymInt PythonArgs::toSymInt(int i) { + PyObject* obj = args[i]; + if (!args[i]) { + return c10::SymInt(signature.params[i].default_int); + } + + if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) { + auto& var = THPVariable_Unpack(args[i]); + jit::tracer::ArgumentStash::stashValue( + signature.params[i].name, idx, var, c10::IntType::get()); + } + + // convert FakeTensor to SymInt + // expect empty sizes, numel = 1 + // and ScalarType::Int + if (is_dynamo_compiling && THPVariable_Check(obj)) { + auto& var = THPVariable_Unpack(obj); + + if (var.numel() != 1 || !var.sizes().empty() || + !at::isIntegralType( + var.dtype().toScalarType(), /*include_bool*/ true)) { + throw TypeError( + "%s(): argument '%s' must be %s, failed to convert %s with sizes.empty()=%d", + signature.name.c_str(), + signature.params[i].name.c_str(), + signature.params[i].type_name().c_str(), + Py_TYPE(obj)->tp_name, + var.sizes().empty()); + } + auto scalar = var.item(); + TORCH_CHECK(scalar.isIntegral(/*include bool*/ false)); + return scalar.toSymInt(); + } + + return py::cast(py::handle(args[i])); +} + +inline c10::SymBool PythonArgs::toSymBool(int i) { + if (!args[i]) { + return c10::SymBool(signature.params[i].default_bool); + } + if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) { + auto& var = THPVariable_Unpack(args[i]); + jit::tracer::ArgumentStash::stashValue( + signature.params[i].name, idx, var, c10::BoolType::get()); + } + + return py::cast(py::handle(args[i])); +} + +inline int64_t PythonArgs::toInt64WithDefault(int i, int64_t default_int) { + if (!args[i]) + return default_int; + return toInt64(i); +} + +inline c10::optional PythonArgs::toInt64Optional(int i) { + if (!args[i]) + return c10::nullopt; + return toInt64(i); +} + +inline c10::optional PythonArgs::toSymIntOptional(int i) { + if (!args[i]) + return c10::nullopt; + return toSymInt(i); +} + +inline c10::optional PythonArgs::toBoolOptional(int i) { + if (!args[i]) { + return c10::nullopt; + } + return toBool(i); +} + +inline c10::optional PythonArgs::toDoubleOptional(int i) { + if (!args[i]) { + return c10::nullopt; + } + return toDouble(i); +} + +inline double PythonArgs::toDouble(int i) { + if (!args[i]) + return signature.params[i].default_double; + if (torch::is_symfloat(py::handle(args[i]))) { + return py::cast(py::handle(args[i])) + .guard_float(__FILE__, __LINE__); + } + if (torch::is_symint(py::handle(args[i]))) { + return static_cast(py::cast(py::handle(args[i])) + .guard_int(__FILE__, __LINE__)); + } + return THPUtils_unpackDouble(args[i]); +} + +inline bool PythonArgs::toBool(int i) { + if (!args[i]) + return signature.params[i].default_bool; + if (torch::is_symbool(py::handle(args[i]))) { + return py::cast(py::handle(args[i])) + .guard_bool(__FILE__, __LINE__); + } + return args[i] == Py_True; +} + +inline double PythonArgs::toDoubleWithDefault(int i, double default_double) { + if (!args[i]) + return default_double; + return toDouble(i); +} + +inline c10::complex PythonArgs::toComplex(int i) { + if (!args[i]) + return *(reinterpret_cast*>( + signature.params[i].default_complex)); + return THPUtils_unpackComplexDouble(args[i]); +} + +inline c10::complex PythonArgs::toComplexWithDefault( + int i, + c10::complex default_value) { + if (!args[i]) + return default_value; + return toComplex(i); +} + +inline bool PythonArgs::toBoolWithDefault(int i, bool default_bool) { + if (!args[i]) + return default_bool; + return toBool(i); +} + +inline bool PythonArgs::isNone(int i) { + return args[i] == nullptr; +} + +inline c10::optional PythonArgs::generator(int i) { + if (!args[i]) + return c10::nullopt; + return reinterpret_cast(args[i])->cdata; +} + +inline at::Storage PythonArgs::storage(int i) { + if (!args[i]) + return at::Storage(); + return createStorage(args[i]); +} + +inline at::Storage PythonArgs::storage( + int i, + at::ScalarType& storage_scalar_type, + bool& is_typed_storage) { + at::Storage storage; + if (!args[i]) { + storage = at::Storage(); + is_typed_storage = false; + storage_scalar_type = at::ScalarType::Undefined; + } else { + std::tie(storage, storage_scalar_type, is_typed_storage) = + createStorageGetType(args[i]); + } + return storage; +} + +inline c10::Stream PythonArgs::stream(int i) { + if (!args[i]) + return c10::Stream( + c10::Stream::Default::DEFAULT, c10::Device(c10::DeviceType::CPU, -1)); + if (!THPStream_Check(args[i])) { + throw TypeError( + "expected Stream object. Got '%s'", Py_TYPE(args[i])->tp_name); + } + return c10::Stream::unpack3( + ((THPStream*)args[i])->stream_id, + static_cast(((THPStream*)args[i])->device_index), + static_cast(((THPStream*)args[i])->device_type)); +} + +inline PyObject* PythonArgs::pyobject(int i) { + if (!args[i]) + return Py_None; + return args[i]; +} + +/* + * + * Handle __torch_function__ overrides if we know that there are overloaded + * arguments. All objects stored in r.overloaded_args must have a + * __torch_function__ implementation and the arguments must be ordered in order + * of precedence. Precedence goes from left to right in the order of the + * signature of the function the overloaded arguments were passed to, except + * subclasses are always considered before superclasses. + * + * If the result of calling __torch_function__ is NotImplemented, the + * next implementation in the precedence order is called. If all + * arguments return NotImplemented from their __torch_function__ + * implementation, a TypeError is raised in Python. + * + * Assumes overloaded_args has at least one entry. All entries must have + * a __torch_function__ attribute that resolves to a callable that + * accepts a torch API function, a tuple of arguments, and a dict of + * keyword arguments for the torch API function. + * + * It is sufficient to call PythonArgs::has_torch_function before + * calling this function to verify that there are valid arguments + * present. If that is not done then special care must be taken to + * ensure there are arguments that are overloaded with + * __torch_function__. + * + * See torch._overrides.handle_torch_function for the equivalent + * code in the pure-python implementation. + * + * 'r' is a parsed PythonArgs instance, returned from + * PythonArgParser::parse. + * + * 'args' is a reference to the python tuple of arguments to the torch + * API function. + * + * 'kwargs' is a reference to the python dict of keyword arguments to + * the torch API function. + * + * 'torch_api' is a reference to a python torch API namespace. + * + * 'torch_api_function' is the reference to the original torch method, usually, + * we can use torch_api and func_name to get torch_api_function. In some cases, + * e.g., torch custom op, we create the function in C++, if we still use + * torch_api and func_name to fetch original api, a cyclic call will happen. + * + * 'overloaded_args' is the args which have overloaded __torch_function__. + * + * 'func_name' is the named of the original torch method. + * + * TODO: we could use different names for the following 'handle_torch_function' + * instead of overloading. + * + */ +// Used for Tensor methods with arguments. +auto handle_torch_function( + PythonArgs& r, + PyObject* self, + PyObject* args, + PyObject* kwargs, + PyObject* torch_api, + const char* module_name, + const char* func_name_override = nullptr) -> PyObject*; + +// Used for functions which needs to parse python args. +auto handle_torch_function( + PythonArgs& r, + PyObject* args, + PyObject* kwargs, + PyObject* torch_api, + const char* module_name, + const char* func_name_override = nullptr) -> PyObject*; + +// Used for functions that have no argument parsing. +auto handle_torch_function( + PyObject* self, + const std::string& func_name, + PyObject* args = nullptr, + PyObject* kwargs = nullptr, + PyObject* torch_api = THPVariableClass, + const std::string& module_name = "torch.Tensor") -> PyObject*; + +// Used for functions created in C++, e.g., C++ custom op, which doesn't use +// PythonArgParser to get overloaded_args. +enum class TorchFunctionName { TorchFunction, TorchDispatch }; + +auto TORCH_PYTHON_API handle_torch_function_no_python_arg_parser( + at::ArrayRef overloaded_args, + PyObject* args, + PyObject* kwargs, + const char* func_name, + PyObject* torch_api_function, + const char* module_name, + TorchFunctionName torch_function_name = TorchFunctionName::TorchFunction) + -> PyObject*; + +// Used for getters of Tensor properties +auto handle_torch_function_getter( + THPVariable* self, + const std::string& property_name) -> PyObject*; + +// Used for setters of Tensor properties. +auto handle_torch_function_setter( + THPVariable* self, + const std::string& property_name, + PyObject* value) -> int; + +// Used for __getitem__ and __setitem__ +auto handle_torch_function_indexing( + PyObject* self, + PyObject* index, + PyObject* val = nullptr) -> PyObject*; + +/* + * Check if the input obj is Tensor type, including its subclass, or overloaded + * type. If the type defines __torch_function__, it also returns true. + * Otherwise returns flase. If the class is not torch.Tensor, and it defines + * __torch_function__, we append obj to overloaded_args. + * + * 'obj': the input argument to be checked + * 'overloaded_args': the vector to append the overloaded args. + */ +bool is_tensor_and_append_overloaded( + PyObject* obj, + std::vector* overloaded_args); + +/* + * Check if the input obj is Tensor List or Tensor Tuple type. First check + * whether obj is Tuple or List type, if true, iterate over each element and + * check whether it is Tensor type, including its subclass or overloaded type. + * At the same time, the overloaded arg is appended to the overloaded_args. + * + * 'obj': the input argument to be checked + * 'overloaded_args': the vector to append the overloaded args. + * 'argnum': the number of total arguments of the function being checked. + * 'throw_error': whether throw error if any element in the list or tuple is + * not tensor type or overloaded. + */ +bool is_tensor_list_and_append_overloaded( + PyObject* obj, + std::vector* overloaded_args, + int argnum, + bool throw_error); + +/* Given an argument that is definitely a tensor and is definitely overloaded, + * append it to the overloaded arguments list. Use this instead of + * is_tensor_and_append_overloaded in situations where you have a PyObject + * and you know it definitely is a Tensor and it is definitely overloaded. + * + * 'overloaded_args': the vector to append the overloaded args + * 'obj': the input tensor that is overloaded + */ +void append_overloaded_tensor( + std::vector* overloaded_args, + PyObject* obj); + +/* Given an argument that is definitely a type and is definitely overloaded, + * append it to the overloaded arguments list. Use this only with + * __torch_dispatch__, where we operate on classes that have a + * __torch_dispatch__ classmethod. + * + * 'overloaded_args': the vector to append the overloaded type + * 'obj': the input class that has a __torch_dispatch__ classmethod. + */ +void append_overloaded_type( + std::vector* overloaded_args, + PyObject* obj); + +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h new file mode 100644 index 0000000000000000000000000000000000000000..f0e4b54b131482c86801aed8183f7e01ae93b085 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h @@ -0,0 +1,147 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace utils { + +template +inline T unpackIntegral(PyObject* obj, const char* type) { +#if PY_VERSION_HEX >= 0x030a00f0 + // In Python-3.10 floats can no longer be silently converted to integers + // Keep backward compatible behavior for now + if (PyFloat_Check(obj)) { + return c10::checked_convert(THPUtils_unpackDouble(obj), type); + } + return c10::checked_convert(THPUtils_unpackLong(obj), type); +#else + return static_cast(THPUtils_unpackLong(obj)); +#endif +} + +inline void store_scalar(void* data, at::ScalarType scalarType, PyObject* obj) { + switch (scalarType) { + case at::kByte: + *(uint8_t*)data = unpackIntegral(obj, "uint8"); + break; + case at::kChar: + *(int8_t*)data = unpackIntegral(obj, "int8"); + break; + case at::kShort: + *(int16_t*)data = unpackIntegral(obj, "int16"); + break; + case at::kInt: + *(int32_t*)data = unpackIntegral(obj, "int32"); + break; + case at::kLong: + *(int64_t*)data = unpackIntegral(obj, "int64"); + break; + case at::kHalf: + *(at::Half*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat: + *(float*)data = (float)THPUtils_unpackDouble(obj); + break; + case at::kDouble: + *(double*)data = THPUtils_unpackDouble(obj); + break; + case at::kComplexHalf: + *(c10::complex*)data = + (c10::complex)static_cast>( + THPUtils_unpackComplexDouble(obj)); + break; + case at::kComplexFloat: + *(c10::complex*)data = + (c10::complex)THPUtils_unpackComplexDouble(obj); + break; + case at::kComplexDouble: + *(c10::complex*)data = THPUtils_unpackComplexDouble(obj); + break; + case at::kBool: + *(bool*)data = THPUtils_unpackNumberAsBool(obj); + break; + case at::kBFloat16: + *(at::BFloat16*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e5m2: + *(at::Float8_e5m2*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e5m2fnuz: + *(at::Float8_e5m2fnuz*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e4m3fn: + *(at::Float8_e4m3fn*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e4m3fnuz: + *(at::Float8_e4m3fnuz*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + default: + throw std::runtime_error("invalid type"); + } +} + +inline PyObject* load_scalar(void* data, at::ScalarType scalarType) { + switch (scalarType) { + case at::kByte: + return THPUtils_packInt64(*(uint8_t*)data); + case at::kChar: + return THPUtils_packInt64(*(int8_t*)data); + case at::kShort: + return THPUtils_packInt64(*(int16_t*)data); + case at::kInt: + return THPUtils_packInt64(*(int32_t*)data); + case at::kLong: + return THPUtils_packInt64(*(int64_t*)data); + case at::kHalf: + return PyFloat_FromDouble( + at::convert(*(at::Half*)data)); + case at::kFloat: + return PyFloat_FromDouble(*(float*)data); + case at::kDouble: + return PyFloat_FromDouble(*(double*)data); + case at::kComplexHalf: { + auto data_ = reinterpret_cast*>(data); + return PyComplex_FromDoubles(data_->real(), data_->imag()); + } + case at::kComplexFloat: { + auto data_ = reinterpret_cast*>(data); + return PyComplex_FromDoubles(data_->real(), data_->imag()); + } + case at::kComplexDouble: + return PyComplex_FromCComplex( + *reinterpret_cast((c10::complex*)data)); + case at::kBool: + return PyBool_FromLong(*(bool*)data); + case at::kBFloat16: + return PyFloat_FromDouble( + at::convert(*(at::BFloat16*)data)); + case at::kFloat8_e5m2: + return PyFloat_FromDouble( + at::convert(*(at::Float8_e5m2*)data)); + case at::kFloat8_e4m3fn: + return PyFloat_FromDouble( + at::convert(*(at::Float8_e4m3fn*)data)); + case at::kFloat8_e5m2fnuz: + return PyFloat_FromDouble(at::convert( + *(at::Float8_e5m2fnuz*)data)); + case at::kFloat8_e4m3fnuz: + return PyFloat_FromDouble(at::convert( + *(at::Float8_e5m2fnuz*)data)); + default: + throw std::runtime_error("invalid type"); + } +} + +} // namespace utils +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_symnode.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_symnode.h new file mode 100644 index 0000000000000000000000000000000000000000..112de773ea382fc735e7007b7b9911b7a6047996 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_symnode.h @@ -0,0 +1,278 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { + +TORCH_PYTHON_API py::handle get_symint_class(); +TORCH_PYTHON_API py::handle get_symfloat_class(); +TORCH_PYTHON_API py::handle get_symbool_class(); + +// NB: These functions must not be called too early, otherwise torch not setup. +// Alternate design is to have torch "register" the object to us +inline bool is_symint(py::handle obj) { + return py::isinstance(obj, get_symint_class()); +} +inline bool is_symfloat(py::handle obj) { + return py::isinstance(obj, get_symfloat_class()); +} +inline bool is_symbool(py::handle obj) { + return py::isinstance(obj, get_symbool_class()); +} + +namespace impl { + +// This c10::SymNodeImpl simply backends to a Python object that +// implements the API. The Python object is the source of truth, +// this is just an adapter so C++ calls can get to the object. +class PythonSymNodeImpl : public c10::SymNodeImpl { + public: + PythonSymNodeImpl(py::object pyobj) : c10::SymNodeImpl() { + pyobj_ = std::make_shared( + pyobj.release().ptr(), getPyInterpreter()); + }; + + c10::SymNode wrap_int(int64_t num) override { + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr("wrap_int")(num); + return c10::make_intrusive(std::move(r)); + } + + c10::SymNode wrap_float(double num) override { + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr("wrap_float")(num); + return c10::make_intrusive(std::move(r)); + } + + c10::SymNode wrap_bool(bool num) override { + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr("wrap_bool")(num); + return c10::make_intrusive(std::move(r)); + } + +#define TORCH_SYMNODE_SIZES_STRIDES(n) \ + c10::SymNode n( \ + c10::ArrayRef sizes, c10::ArrayRef strides) \ + override { \ + py::gil_scoped_acquire acquire; \ + auto r = getPyObj().attr(#n)(sizes, strides); \ + return c10::make_intrusive(std::move(r)); \ + } + + // clang-format off + TORCH_SYMNODE_SIZES_STRIDES(is_contiguous) + TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_contiguous_2d) + TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_contiguous_3d) + TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_strides_2d) + TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_strides_3d) + TORCH_SYMNODE_SIZES_STRIDES(is_non_overlapping_and_dense) + // clang-format on + +#undef TORCH_SYMNODE_SIZES_STRIDES + + bool bool_() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("bool_")().is(py::handle(Py_True)); + } + + bool is_int() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("is_int")().is(py::handle(Py_True)); + } + + bool is_float() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("is_float")().is(py::handle(Py_True)); + } + + bool is_bool() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("is_bool")().is(py::handle(Py_True)); + } + + bool has_hint() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("has_hint")().is(py::handle(Py_True)); + } + + int64_t guard_int(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("guard_int")(file, line).cast(); + } + + double guard_float(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("guard_float")(file, line).cast(); + } + + bool guard_bool(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("guard_bool")(file, line).cast(); + } + + bool expect_true(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("expect_true")(file, line).cast(); + } + + bool expect_size(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("expect_size")(file, line).cast(); + } + + int64_t int_() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("int_")().cast(); + } + + c10::optional maybe_as_int() override { + py::gil_scoped_acquire acquire; + const auto& r = getPyObj().attr("maybe_as_int")(); + if (r.is_none()) { + return c10::nullopt; + } else { + return r.cast(); + } + } + + std::string str() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("str")().cast(); + } + + c10::SymNode dispatch_sym_ite_( + const char* fname, + const c10::SymNode& other, + const c10::SymNode& third) { + auto pother = dynamic_cast(other.get()); + auto pthird = dynamic_cast(third.get()); + TORCH_CHECK(pother); + TORCH_CHECK(pthird); + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr(fname)(pother->getPyObj(), pthird->getPyObj()); + return c10::make_intrusive(r); + } + + c10::SymNode dispatch_common_(const char* fname, const c10::SymNode& other) { + auto pother = dynamic_cast(other.get()); + TORCH_CHECK(pother); + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr(fname)(pother->getPyObj()); + return c10::make_intrusive(r); + } + + c10::SymNode dispatch_common_(const char* fname) { + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr(fname)(); + return c10::make_intrusive(r); + } + + c10::SymNode add(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sub(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode mul(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode truediv(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode pow(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode floordiv(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode mod(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode eq(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode ne(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode gt(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode lt(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode le(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode ge(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sym_min(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + c10::SymNode sym_max(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sym_and(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sym_or(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sym_ite(const c10::SymNode& other, const c10::SymNode& third) + override { + return dispatch_sym_ite_(__func__, other, third); + } + + c10::SymNode sym_not() override { + return dispatch_common_(__func__); + } + + c10::SymNode ceil() override { + return dispatch_common_(__func__); + } + + c10::SymNode floor() override { + return dispatch_common_(__func__); + } + + c10::SymNode neg() override { + return dispatch_common_(__func__); + } + + c10::SymNode clone() override { + return dispatch_common_(__func__); + } + + c10::SymNode sym_float() override { + return dispatch_common_(__func__); + } + + py::handle getPyObj() { + return py::handle(pyobj_.get()->ptr(getPyInterpreter())); + } + std::shared_ptr pyobj_ = nullptr; +}; + +} // namespace impl +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_tuples.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_tuples.h new file mode 100644 index 0000000000000000000000000000000000000000..ab71ccbd4441180ad58e2df8f57179098b4f83eb --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_tuples.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include +#include +#include + +inline void THPUtils_packInt64Array( + PyObject* tuple, + size_t size, + const int64_t* sizes) { + for (size_t i = 0; i != size; ++i) { + PyObject* i64 = THPUtils_packInt64(sizes[i]); + if (!i64) { + throw python_error(); + } + PyTuple_SET_ITEM(tuple, i, i64); + } +} + +inline PyObject* THPUtils_packInt64Array(size_t size, const int64_t* sizes) { + THPObjectPtr tuple(PyTuple_New(size)); + if (!tuple) + throw python_error(); + THPUtils_packInt64Array(tuple.get(), size, sizes); + return tuple.release(); +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/six.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/six.h new file mode 100644 index 0000000000000000000000000000000000000000..cfca55bb86ec7158e7d3de90dbaae0b0e6cde4b2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/six.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include +#include + +namespace six { + +// Usually instances of PyStructSequence is also an instance of tuple +// but in some py2 environment it is not, so we have to manually check +// the name of the type to determine if it is a namedtupled returned +// by a pytorch operator. + +inline bool isStructSeq(pybind11::handle input) { + return pybind11::cast(input.get_type().attr("__module__")) == + "torch.return_types"; +} + +inline bool isStructSeq(PyObject* obj) { + return isStructSeq(pybind11::handle(obj)); +} + +inline bool isTuple(pybind11::handle input) { + if (PyTuple_Check(input.ptr())) { + return true; + } + return false; +} + +inline bool isTuple(PyObject* obj) { + return isTuple(pybind11::handle(obj)); +} + +// maybeAsTuple: if the input is a structseq, then convert it to a tuple +// +// On Python 3, structseq is a subtype of tuple, so these APIs could be used +// directly. But on Python 2, structseq is not a subtype of tuple, so we need to +// manually create a new tuple object from structseq. +inline THPObjectPtr maybeAsTuple(PyStructSequence* obj) { + Py_INCREF(obj); + return THPObjectPtr((PyObject*)obj); +} + +inline THPObjectPtr maybeAsTuple(PyObject* obj) { + if (isStructSeq(obj)) + return maybeAsTuple((PyStructSequence*)obj); + Py_INCREF(obj); + return THPObjectPtr(obj); +} + +} // namespace six diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_dtypes.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_dtypes.h new file mode 100644 index 0000000000000000000000000000000000000000..32b769971d03fa7e5cb87031b3ae972f5cf3ffeb --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_dtypes.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace utils { + +std::pair getDtypeNames(at::ScalarType scalarType); + +void initializeDtypes(); + +} // namespace utils +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_flatten.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_flatten.h new file mode 100644 index 0000000000000000000000000000000000000000..04a55ec7960e671d825524d1000d69e70ee6bf0f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_flatten.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace utils { + +/// Generate an ID for a combination of tensor backend + scalar type to be used +/// when ordering tensors ('like' tensors are grouped by pulling out their +/// backend + scalar type, so this function combines that into a single number) +inline size_t type_id(const at::Tensor& tensor) { + return static_cast(tensor.options().backend()) * + static_cast(at::ScalarType::NumOptions) + + static_cast(tensor.scalar_type()); +} + +inline at::Tensor flatten_dense_tensors(at::TensorList tensors) { + return at::flatten_dense_tensors(tensors); +} + +inline std::vector unflatten_dense_tensors( + const at::Tensor& flat, + at::TensorList tensors) { + return at::unflatten_dense_tensors(flat, tensors); +} + +struct TensorGroup { + std::vector tensors; + size_t size = 0; + + size_t type_id() { + AT_ASSERT(!tensors.empty()); + return ::torch::utils::type_id(tensors[0]); + } + + const at::TensorOptions options() { + AT_ASSERT(!tensors.empty()); + return tensors[0].options(); + } +}; + +// Helper function that takes a list of tensors and splits them into tensor +// groups by the size limit and outputs these tensor groups. If the input +// tensors are of different tensor types, they will be split into different +// groups as well. +// +// Two options of splitting provided to the user, +// +// Imagine the size_limit is 256 and the list of input tensors are: +// tensor_a(fp16 - 128 bytes), +// tensor_b(fp32 - 256 bytes), +// tensor_c(fp16 - 128 bytes), +// +// when fine_grained == false: +// The function will read the list of tensors sequentially and accumulate +// enough tensors for each data type until the size_limit, therefore: +// it will output: {{tensor_a, tensor_c}, {tensor_b}} +// +// when fine_grained == true: +// The function will read the list of tensors sequentially and accumulate +// enough tensors for all data types until the size_limit, and then split +// the accumulated tensors into different groups by data types, therefore: +// it will output: {{tensor_a}, {tensor_b}, {tensor_c}} +TORCH_API std::vector take_tensors( + at::TensorList tensors, + size_t size_limit, + bool fine_grained = false); + +TORCH_API void reorder_tensors_like( + std::vector& tensors, + at::TensorList order); + +TORCH_API std::pair flatten_sparse_tensors( + at::TensorList tensors); + +TORCH_API std::vector unflatten_sparse_tensors( + const at::Tensor& flat_indices, + const at::Tensor& flat_values, + at::TensorList tensors); + +} // namespace utils +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_layouts.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_layouts.h new file mode 100644 index 0000000000000000000000000000000000000000..33e32b516b1215a8e55b4261101f3ac67ce4171e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_layouts.h @@ -0,0 +1,9 @@ +#pragma once + +namespace torch { +namespace utils { + +void initializeLayouts(); + +} +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_memoryformats.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_memoryformats.h new file mode 100644 index 0000000000000000000000000000000000000000..3f86f0c445287fc4bfcd7f037b50e98352684c0d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_memoryformats.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace utils { + +void initializeMemoryFormats(); +TORCH_PYTHON_API PyObject* getTHPMemoryFormat(c10::MemoryFormat); + +} // namespace utils +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark.h new file mode 100644 index 0000000000000000000000000000000000000000..2fca95ca16bf79a8d7306ed260dce216fd94cac2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark.h @@ -0,0 +1,199 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +namespace py = pybind11; + +namespace torch { +namespace throughput_benchmark { + +/** + * The struct is used to provide results of a benchmark to the caller + * In the future all additional statics should be added here. + */ +struct BenchmarkExecutionStats { + float latency_avg_ms{-1}; + int64_t num_iters{-1}; +}; + +std::ostream& operator<<( + std::ostream& os, + const BenchmarkExecutionStats& value); + +/** + * Use this struct in order to configure a throughput benchmark run. + * This struct should include parameters related to threading, batching, number + * of iterations, warm-up, etc. More configs can be added as needed. + * General rule here is that only things that c++ must(!) to be aware of should + * be here. If we can keep other parts in python, we should keep them there. + * This is typical for things that are not perf critical and don't affect + * execution statistics benchmark returns. + */ +struct BenchmarkConfig { + public: + // Calling threads are those threads that are calling into a module in + // parallel. + int num_calling_threads{1}; + // Worker threads are not supported yet. This is just an example that we plan + // to support some sort of multi-threaded forward calls. We may change this + // setting in the future to support different intra and inter op parallelism + // which is not available in PyTorch yet + int num_worker_threads{1}; + // Warmup iters are used to make sure we run a module a few times before + // actually measuring things. This way we avoid cold caches and any other + // similar problems + int num_warmup_iters{1}; + // Number of iterations the benchmark should run with. This number is separate + // from the warmup iterations + int64_t num_iters{100}; + // If set autograd profiler will be enabled. I.e. this variable would be + // created before the main benchmark loop (but after the warmup): + // RecordProfile guard(profiler_output_path); + std::string profiler_output_path{""}; +}; + +namespace detail { + +/** + * A helper class to abstract out different models we test throughput of + */ +template +class BenchmarkHelper { + public: + BenchmarkHelper(); + explicit BenchmarkHelper(Model model) + : model_(std::move(model)), initialized_(true) {} + + // This method to be used in benchmark() method + // Note that there is no result. This way we don't have to call this under GIL + // even when running in the nn.Module mode. Otherwise destructor of the result + // would race with Python + void runOnce(Input&&) const; + // This method is to be used when calling from Python directly + Output runOnce(py::args&&, const py::kwargs&) const; + // Aggregate input in the format Model expects in order to avoid further + // conversions at the benchmark time + void addInput(py::args&&, py::kwargs&&); + void addInput(Input&&); + BenchmarkExecutionStats benchmark(const BenchmarkConfig& config) const; + + bool initialized() const { + return initialized_; + } + + // Destructor doesn't require the GIL because it is going to be executed on + // the PyThon thread + std::vector inputs_; + Model model_; + bool initialized_{false}; +}; + +struct C10_HIDDEN ModuleInput { + ModuleInput(ModuleInput&& other) = default; + + ModuleInput(const ModuleInput&) = delete; + ModuleInput& operator=(ModuleInput& other) = delete; + ModuleInput& operator=(ModuleInput&& other) = delete; + + ModuleInput(py::args&& args, py::kwargs&& kwargs) + : args(std::move(args)), kwargs(std::move(kwargs)) {} + + py::args args; + py::kwargs kwargs; +}; +typedef py::object ModuleOutput; +typedef std::vector ScriptModuleInput; +typedef at::IValue ScriptModuleOutput; + +template +Input cloneInput(const Input& input); + +typedef BenchmarkHelper + ScriptModuleBenchmark; +template <> +inline BenchmarkHelper:: + BenchmarkHelper() + : model_("Module", std::make_shared()), + initialized_(false) {} +typedef BenchmarkHelper ModuleBenchmark; +template <> +inline BenchmarkHelper::BenchmarkHelper() + : initialized_(false) {} + +template <> +void ScriptModuleBenchmark::runOnce(ScriptModuleInput&& input) const; + +template <> +ScriptModuleOutput ScriptModuleBenchmark::runOnce( + py::args&& args, + const py::kwargs& kwargs) const; + +template <> +void ModuleBenchmark::runOnce(ModuleInput&& input) const; + +template <> +ModuleOutput ModuleBenchmark::runOnce(py::args&& args, const py::kwargs& kwargs) + const; + +template <> +void ScriptModuleBenchmark::addInput(py::args&& args, py::kwargs&& kwargs); +template <> +void ScriptModuleBenchmark::addInput(ScriptModuleInput&& input); + +template <> +void ModuleBenchmark::addInput(py::args&& args, py::kwargs&& kwargs); + +} // namespace detail + +/** + * This class is a small c++ component responsible for executing a PyTorch + * module under an inference server like load. It can emulate multiple calling + * threads to a single module provided. In the future we plan to enhance this + * component to support inter and intra-op parallelism as well as multiple + * models running in a single process. + * + * For current available configurations refer to the BenchmarkConfig + * documentation + * + * The class supports working with either nn.Module or ScriptModule. + * Under the hood it just dispatches to corresponding specialization of + * class BenchmarkHelper + */ +class C10_HIDDEN ThroughputBenchmark { + public: + explicit ThroughputBenchmark(const jit::Module& module); + explicit ThroughputBenchmark(py::object module); + + // Add one more input example. This input example should be in the exact + // format the module under test expects. It is responsibility of the module to + // perform any such format checks, the benchmark doesn't perform any + // validation of its own + void addInput(py::args args, py::kwargs kwargs); + + // Equivalent to just running the model directly on the given input + py::object runOnce(py::args&& args, const py::kwargs& kwargs); + + // The main method of the class allows to perform a multi-threaded benchmark + // It returns BenchmarkExecutionStats object with a lot of useful statistics + // about runtime execution. We can enhance this class in the future to provide + // more information to the user + BenchmarkExecutionStats benchmark(const BenchmarkConfig& config) const; + + private: + detail::ScriptModuleBenchmark script_module_; + detail::ModuleBenchmark module_; +}; +} // namespace throughput_benchmark +} // namespace torch + +#include diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h new file mode 100644 index 0000000000000000000000000000000000000000..74ace7be77fd837d35699815681b0abcad63da1a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h @@ -0,0 +1,152 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace torch { + +using at::IterArgs; + +struct CountTensors : IterArgs { + size_t out = 0; + void operator()(const at::Tensor& x) { + out += 1; + } + void operator()(const c10::optional& x) { + out += x.has_value(); + } + void operator()(at::ArrayRef xs) { + out += xs.size(); + } +}; + +template +size_t count_tensors(Args&&... args) { + return CountTensors().apply(std::forward(args)...).out; +} + +struct CountVariables : IterArgs { + size_t out = 0; + void operator()(const autograd::Variable& x) { + out += 1; + } + void operator()(at::ArrayRef xs) { + out += xs.size(); + } +}; + +template +inline size_t count_variables(Args&&... args) { + return CountVariables().apply(std::forward(args)...).out; +} + +//===----------------------------------------------------------------------===// +// std::index_sequence shim for C++11 +//===----------------------------------------------------------------------===// + +// A container of type-template parameter indices. +template +struct Indices {}; + +// Decrements the index N, adds N-1 to the list of indices and forwards +// whatever we already have. +template +struct MakeIndices : MakeIndices {}; + +// Partial specialization that forms our base case. When N is zero, we stop +// and define a typedef that will be visible to earlier classes due to +// inheritance. The typedef we define is an index list containing the numbers +// 0 through N-1. +template +struct MakeIndices<0, Is...> { + using indices = Indices; +}; + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +template +using enable_if_t = typename std::enable_if::type; + +template +using disable_if_t = enable_if_t; + +template +using decay_t = typename std::decay::type; + +namespace detail { +template +struct pack; +} // namespace detail + +template +struct all_of : std::is_same< + detail::pack, + detail::pack> {}; + +template +struct any_of; + +template <> +struct any_of<> : std::false_type {}; + +template +struct any_of { + static constexpr bool value = head || any_of::value; +}; + +template +struct none_of { + static constexpr bool value = !any_of::value; +}; + +template +using enable_if_all_of_t = enable_if_t::value>; + +template +using disable_if_contains_t = + enable_if_all_of_t<(!std::is_same>::value)...>; + +template +void apply(Function function, Ts&&... ts) { + // https://stackoverflow.com/questions/13978916/inserting-a-variadic-argument-list-into-a-vector + // Creates a dummy array, so that each function call is evaluated in order. + // `(function(), 0)` is because `function` should (!) return `void`, so + // according to the comma operator, it is evaluated and its result (`void`) + // is discarded. Then the zero is evaluated and used as an element in the + // array. The first zero ensures the array is not empty. + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + int _[]{0, (function(std::forward(ts)), 0)...}; + (void)_; +} + +template < + typename ReturnType, + typename... Ts, + typename Function, + typename Accessor> +ReturnType unpack(Function function, Accessor accessor) { + return ReturnType(unpack( + std::move(function), + std::move(accessor), + typename MakeIndices::indices())); +} + +template < + typename ReturnType, + typename... Ts, + typename Function, + typename Accessor, + size_t... Is> +ReturnType unpack(Function function, Accessor accessor, Indices) { + return ReturnType(function(accessor.template operator()(Is)...)); +} + +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/verbose.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/verbose.h new file mode 100644 index 0000000000000000000000000000000000000000..f6c5eae461bcd6d8fe52a9b9700e85dd29c7765f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/verbose.h @@ -0,0 +1,8 @@ +#pragma once +#include + +namespace torch { + +void initVerboseBindings(PyObject* module); + +} // namespace torch diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/grid_helper_curvelinear.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/grid_helper_curvelinear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f950df0285e93b125386d06f4fda6c852fd509ad Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/grid_helper_curvelinear.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/parasite_axes.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/parasite_axes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59749ce026ba26efedf734b2741eacf96987f11c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/parasite_axes.cpython-310.pyc differ